xref: /qemu/tcg/arm/tcg-target.c.inc (revision c7b64948)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "elf.h"
26#include "../tcg-ldst.c.inc"
27#include "../tcg-pool.c.inc"
28
29int arm_arch = __ARM_ARCH;
30
31#ifndef use_idiv_instructions
32bool use_idiv_instructions;
33#endif
34#ifndef use_neon_instructions
35bool use_neon_instructions;
36#endif
37
38#ifdef CONFIG_DEBUG_TCG
39static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
40    "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
41    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%sp",  "%r14", "%pc",
42    "%q0",  "%q1",  "%q2",  "%q3",  "%q4",  "%q5",  "%q6",  "%q7",
43    "%q8",  "%q9",  "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
44};
45#endif
46
47static const int tcg_target_reg_alloc_order[] = {
48    TCG_REG_R4,
49    TCG_REG_R5,
50    TCG_REG_R6,
51    TCG_REG_R7,
52    TCG_REG_R8,
53    TCG_REG_R9,
54    TCG_REG_R10,
55    TCG_REG_R11,
56    TCG_REG_R13,
57    TCG_REG_R0,
58    TCG_REG_R1,
59    TCG_REG_R2,
60    TCG_REG_R3,
61    TCG_REG_R12,
62    TCG_REG_R14,
63
64    TCG_REG_Q0,
65    TCG_REG_Q1,
66    TCG_REG_Q2,
67    TCG_REG_Q3,
68    /* Q4 - Q7 are call-saved, and skipped. */
69    TCG_REG_Q8,
70    TCG_REG_Q9,
71    TCG_REG_Q10,
72    TCG_REG_Q11,
73    TCG_REG_Q12,
74    TCG_REG_Q13,
75    TCG_REG_Q14,
76    TCG_REG_Q15,
77};
78
79static const int tcg_target_call_iarg_regs[4] = {
80    TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
81};
82
83static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
84{
85    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
86    tcg_debug_assert(slot >= 0 && slot <= 3);
87    return TCG_REG_R0 + slot;
88}
89
90#define TCG_REG_TMP  TCG_REG_R12
91#define TCG_VEC_TMP  TCG_REG_Q15
92#ifndef CONFIG_SOFTMMU
93#define TCG_REG_GUEST_BASE  TCG_REG_R11
94#endif
95
96typedef enum {
97    COND_EQ = 0x0,
98    COND_NE = 0x1,
99    COND_CS = 0x2,	/* Unsigned greater or equal */
100    COND_CC = 0x3,	/* Unsigned less than */
101    COND_MI = 0x4,	/* Negative */
102    COND_PL = 0x5,	/* Zero or greater */
103    COND_VS = 0x6,	/* Overflow */
104    COND_VC = 0x7,	/* No overflow */
105    COND_HI = 0x8,	/* Unsigned greater than */
106    COND_LS = 0x9,	/* Unsigned less or equal */
107    COND_GE = 0xa,
108    COND_LT = 0xb,
109    COND_GT = 0xc,
110    COND_LE = 0xd,
111    COND_AL = 0xe,
112} ARMCond;
113
114#define TO_CPSR (1 << 20)
115
116#define SHIFT_IMM_LSL(im)	(((im) << 7) | 0x00)
117#define SHIFT_IMM_LSR(im)	(((im) << 7) | 0x20)
118#define SHIFT_IMM_ASR(im)	(((im) << 7) | 0x40)
119#define SHIFT_IMM_ROR(im)	(((im) << 7) | 0x60)
120#define SHIFT_REG_LSL(rs)	(((rs) << 8) | 0x10)
121#define SHIFT_REG_LSR(rs)	(((rs) << 8) | 0x30)
122#define SHIFT_REG_ASR(rs)	(((rs) << 8) | 0x50)
123#define SHIFT_REG_ROR(rs)	(((rs) << 8) | 0x70)
124
125typedef enum {
126    ARITH_AND = 0x0 << 21,
127    ARITH_EOR = 0x1 << 21,
128    ARITH_SUB = 0x2 << 21,
129    ARITH_RSB = 0x3 << 21,
130    ARITH_ADD = 0x4 << 21,
131    ARITH_ADC = 0x5 << 21,
132    ARITH_SBC = 0x6 << 21,
133    ARITH_RSC = 0x7 << 21,
134    ARITH_TST = 0x8 << 21 | TO_CPSR,
135    ARITH_CMP = 0xa << 21 | TO_CPSR,
136    ARITH_CMN = 0xb << 21 | TO_CPSR,
137    ARITH_ORR = 0xc << 21,
138    ARITH_MOV = 0xd << 21,
139    ARITH_BIC = 0xe << 21,
140    ARITH_MVN = 0xf << 21,
141
142    INSN_B         = 0x0a000000,
143
144    INSN_CLZ       = 0x016f0f10,
145    INSN_RBIT      = 0x06ff0f30,
146
147    INSN_LDMIA     = 0x08b00000,
148    INSN_STMDB     = 0x09200000,
149
150    INSN_LDR_IMM   = 0x04100000,
151    INSN_LDR_REG   = 0x06100000,
152    INSN_STR_IMM   = 0x04000000,
153    INSN_STR_REG   = 0x06000000,
154
155    INSN_LDRH_IMM  = 0x005000b0,
156    INSN_LDRH_REG  = 0x001000b0,
157    INSN_LDRSH_IMM = 0x005000f0,
158    INSN_LDRSH_REG = 0x001000f0,
159    INSN_STRH_IMM  = 0x004000b0,
160    INSN_STRH_REG  = 0x000000b0,
161
162    INSN_LDRB_IMM  = 0x04500000,
163    INSN_LDRB_REG  = 0x06500000,
164    INSN_LDRSB_IMM = 0x005000d0,
165    INSN_LDRSB_REG = 0x001000d0,
166    INSN_STRB_IMM  = 0x04400000,
167    INSN_STRB_REG  = 0x06400000,
168
169    INSN_LDRD_IMM  = 0x004000d0,
170    INSN_LDRD_REG  = 0x000000d0,
171    INSN_STRD_IMM  = 0x004000f0,
172    INSN_STRD_REG  = 0x000000f0,
173
174    INSN_DMB_ISH   = 0xf57ff05b,
175    INSN_DMB_MCR   = 0xee070fba,
176
177    /* Architected nop introduced in v6k.  */
178    /* ??? This is an MSR (imm) 0,0,0 insn.  Anyone know if this
179       also Just So Happened to do nothing on pre-v6k so that we
180       don't need to conditionalize it?  */
181    INSN_NOP_v6k   = 0xe320f000,
182    /* Otherwise the assembler uses mov r0,r0 */
183    INSN_NOP_v4    = (COND_AL << 28) | ARITH_MOV,
184
185    INSN_VADD      = 0xf2000800,
186    INSN_VAND      = 0xf2000110,
187    INSN_VBIC      = 0xf2100110,
188    INSN_VEOR      = 0xf3000110,
189    INSN_VORN      = 0xf2300110,
190    INSN_VORR      = 0xf2200110,
191    INSN_VSUB      = 0xf3000800,
192    INSN_VMUL      = 0xf2000910,
193    INSN_VQADD     = 0xf2000010,
194    INSN_VQADD_U   = 0xf3000010,
195    INSN_VQSUB     = 0xf2000210,
196    INSN_VQSUB_U   = 0xf3000210,
197    INSN_VMAX      = 0xf2000600,
198    INSN_VMAX_U    = 0xf3000600,
199    INSN_VMIN      = 0xf2000610,
200    INSN_VMIN_U    = 0xf3000610,
201
202    INSN_VABS      = 0xf3b10300,
203    INSN_VMVN      = 0xf3b00580,
204    INSN_VNEG      = 0xf3b10380,
205
206    INSN_VCEQ0     = 0xf3b10100,
207    INSN_VCGT0     = 0xf3b10000,
208    INSN_VCGE0     = 0xf3b10080,
209    INSN_VCLE0     = 0xf3b10180,
210    INSN_VCLT0     = 0xf3b10200,
211
212    INSN_VCEQ      = 0xf3000810,
213    INSN_VCGE      = 0xf2000310,
214    INSN_VCGT      = 0xf2000300,
215    INSN_VCGE_U    = 0xf3000310,
216    INSN_VCGT_U    = 0xf3000300,
217
218    INSN_VSHLI     = 0xf2800510,  /* VSHL (immediate) */
219    INSN_VSARI     = 0xf2800010,  /* VSHR.S */
220    INSN_VSHRI     = 0xf3800010,  /* VSHR.U */
221    INSN_VSLI      = 0xf3800510,
222    INSN_VSHL_S    = 0xf2000400,  /* VSHL.S (register) */
223    INSN_VSHL_U    = 0xf3000400,  /* VSHL.U (register) */
224
225    INSN_VBSL      = 0xf3100110,
226    INSN_VBIT      = 0xf3200110,
227    INSN_VBIF      = 0xf3300110,
228
229    INSN_VTST      = 0xf2000810,
230
231    INSN_VDUP_G    = 0xee800b10,  /* VDUP (ARM core register) */
232    INSN_VDUP_S    = 0xf3b00c00,  /* VDUP (scalar) */
233    INSN_VLDR_D    = 0xed100b00,  /* VLDR.64 */
234    INSN_VLD1      = 0xf4200000,  /* VLD1 (multiple single elements) */
235    INSN_VLD1R     = 0xf4a00c00,  /* VLD1 (single element to all lanes) */
236    INSN_VST1      = 0xf4000000,  /* VST1 (multiple single elements) */
237    INSN_VMOVI     = 0xf2800010,  /* VMOV (immediate) */
238} ARMInsn;
239
240#define INSN_NOP   (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
241
242static const uint8_t tcg_cond_to_arm_cond[] = {
243    [TCG_COND_EQ] = COND_EQ,
244    [TCG_COND_NE] = COND_NE,
245    [TCG_COND_LT] = COND_LT,
246    [TCG_COND_GE] = COND_GE,
247    [TCG_COND_LE] = COND_LE,
248    [TCG_COND_GT] = COND_GT,
249    /* unsigned */
250    [TCG_COND_LTU] = COND_CC,
251    [TCG_COND_GEU] = COND_CS,
252    [TCG_COND_LEU] = COND_LS,
253    [TCG_COND_GTU] = COND_HI,
254};
255
256static int encode_imm(uint32_t imm);
257
258/* TCG private relocation type: add with pc+imm8 */
259#define R_ARM_PC8  11
260
261/* TCG private relocation type: vldr with imm8 << 2 */
262#define R_ARM_PC11 12
263
264static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
265{
266    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
267    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
268
269    if (offset == sextract32(offset, 0, 24)) {
270        *src_rw = deposit32(*src_rw, 0, 24, offset);
271        return true;
272    }
273    return false;
274}
275
276static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
277{
278    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
279    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
280
281    if (offset >= -0xfff && offset <= 0xfff) {
282        tcg_insn_unit insn = *src_rw;
283        bool u = (offset >= 0);
284        if (!u) {
285            offset = -offset;
286        }
287        insn = deposit32(insn, 23, 1, u);
288        insn = deposit32(insn, 0, 12, offset);
289        *src_rw = insn;
290        return true;
291    }
292    return false;
293}
294
295static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
296{
297    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
299
300    if (offset >= -0xff && offset <= 0xff) {
301        tcg_insn_unit insn = *src_rw;
302        bool u = (offset >= 0);
303        if (!u) {
304            offset = -offset;
305        }
306        insn = deposit32(insn, 23, 1, u);
307        insn = deposit32(insn, 0, 8, offset);
308        *src_rw = insn;
309        return true;
310    }
311    return false;
312}
313
314static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
315{
316    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
317    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
318    int imm12 = encode_imm(offset);
319
320    if (imm12 >= 0) {
321        *src_rw = deposit32(*src_rw, 0, 12, imm12);
322        return true;
323    }
324    return false;
325}
326
327static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
328                        intptr_t value, intptr_t addend)
329{
330    tcg_debug_assert(addend == 0);
331    switch (type) {
332    case R_ARM_PC24:
333        return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
334    case R_ARM_PC13:
335        return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
336    case R_ARM_PC11:
337        return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
338    case R_ARM_PC8:
339        return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
340    default:
341        g_assert_not_reached();
342    }
343}
344
345#define TCG_CT_CONST_ARM  0x100
346#define TCG_CT_CONST_INV  0x200
347#define TCG_CT_CONST_NEG  0x400
348#define TCG_CT_CONST_ZERO 0x800
349#define TCG_CT_CONST_ORRI 0x1000
350#define TCG_CT_CONST_ANDI 0x2000
351
352#define ALL_GENERAL_REGS  0xffffu
353#define ALL_VECTOR_REGS   0xffff0000u
354
355/*
356 * r0-r3 will be overwritten when reading the tlb entry (softmmu only);
357 * r14 will be overwritten by the BLNE branching to the slow path.
358 */
359#ifdef CONFIG_SOFTMMU
360#define ALL_QLDST_REGS \
361    (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
362                          (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \
363                          (1 << TCG_REG_R14)))
364#else
365#define ALL_QLDST_REGS   (ALL_GENERAL_REGS & ~(1 << TCG_REG_R14))
366#endif
367
368/*
369 * ARM immediates for ALU instructions are made of an unsigned 8-bit
370 * right-rotated by an even amount between 0 and 30.
371 *
372 * Return < 0 if @imm cannot be encoded, else the entire imm12 field.
373 */
374static int encode_imm(uint32_t imm)
375{
376    uint32_t rot, imm8;
377
378    /* Simple case, no rotation required. */
379    if ((imm & ~0xff) == 0) {
380        return imm;
381    }
382
383    /* Next, try a simple even shift.  */
384    rot = ctz32(imm) & ~1;
385    imm8 = imm >> rot;
386    rot = 32 - rot;
387    if ((imm8 & ~0xff) == 0) {
388        goto found;
389    }
390
391    /*
392     * Finally, try harder with rotations.
393     * The ctz test above will have taken care of rotates >= 8.
394     */
395    for (rot = 2; rot < 8; rot += 2) {
396        imm8 = rol32(imm, rot);
397        if ((imm8 & ~0xff) == 0) {
398            goto found;
399        }
400    }
401    /* Fail: imm cannot be encoded. */
402    return -1;
403
404 found:
405    /* Note that rot is even, and we discard bit 0 by shifting by 7. */
406    return rot << 7 | imm8;
407}
408
409static int encode_imm_nofail(uint32_t imm)
410{
411    int ret = encode_imm(imm);
412    tcg_debug_assert(ret >= 0);
413    return ret;
414}
415
416static bool check_fit_imm(uint32_t imm)
417{
418    return encode_imm(imm) >= 0;
419}
420
421/* Return true if v16 is a valid 16-bit shifted immediate.  */
422static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
423{
424    if (v16 == (v16 & 0xff)) {
425        *cmode = 0x8;
426        *imm8 = v16 & 0xff;
427        return true;
428    } else if (v16 == (v16 & 0xff00)) {
429        *cmode = 0xa;
430        *imm8 = v16 >> 8;
431        return true;
432    }
433    return false;
434}
435
436/* Return true if v32 is a valid 32-bit shifted immediate.  */
437static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
438{
439    if (v32 == (v32 & 0xff)) {
440        *cmode = 0x0;
441        *imm8 = v32 & 0xff;
442        return true;
443    } else if (v32 == (v32 & 0xff00)) {
444        *cmode = 0x2;
445        *imm8 = (v32 >> 8) & 0xff;
446        return true;
447    } else if (v32 == (v32 & 0xff0000)) {
448        *cmode = 0x4;
449        *imm8 = (v32 >> 16) & 0xff;
450        return true;
451    } else if (v32 == (v32 & 0xff000000)) {
452        *cmode = 0x6;
453        *imm8 = v32 >> 24;
454        return true;
455    }
456    return false;
457}
458
459/* Return true if v32 is a valid 32-bit shifting ones immediate.  */
460static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
461{
462    if ((v32 & 0xffff00ff) == 0xff) {
463        *cmode = 0xc;
464        *imm8 = (v32 >> 8) & 0xff;
465        return true;
466    } else if ((v32 & 0xff00ffff) == 0xffff) {
467        *cmode = 0xd;
468        *imm8 = (v32 >> 16) & 0xff;
469        return true;
470    }
471    return false;
472}
473
474/*
475 * Return non-zero if v32 can be formed by MOVI+ORR.
476 * Place the parameters for MOVI in (cmode, imm8).
477 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
478 */
479static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
480{
481    int i;
482
483    for (i = 6; i > 0; i -= 2) {
484        /* Mask out one byte we can add with ORR.  */
485        uint32_t tmp = v32 & ~(0xffu << (i * 4));
486        if (is_shimm32(tmp, cmode, imm8) ||
487            is_soimm32(tmp, cmode, imm8)) {
488            break;
489        }
490    }
491    return i;
492}
493
494/* Return true if V is a valid 16-bit or 32-bit shifted immediate.  */
495static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
496{
497    if (v32 == deposit32(v32, 16, 16, v32)) {
498        return is_shimm16(v32, cmode, imm8);
499    } else {
500        return is_shimm32(v32, cmode, imm8);
501    }
502}
503
504/* Test if a constant matches the constraint.
505 * TODO: define constraints for:
506 *
507 * ldr/str offset:   between -0xfff and 0xfff
508 * ldrh/strh offset: between -0xff and 0xff
509 * mov operand2:     values represented with x << (2 * y), x < 0x100
510 * add, sub, eor...: ditto
511 */
512static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
513{
514    if (ct & TCG_CT_CONST) {
515        return 1;
516    } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
517        return 1;
518    } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
519        return 1;
520    } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
521        return 1;
522    } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
523        return 1;
524    }
525
526    switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
527    case 0:
528        break;
529    case TCG_CT_CONST_ANDI:
530        val = ~val;
531        /* fallthru */
532    case TCG_CT_CONST_ORRI:
533        if (val == deposit64(val, 32, 32, val)) {
534            int cmode, imm8;
535            return is_shimm1632(val, &cmode, &imm8);
536        }
537        break;
538    default:
539        /* Both bits should not be set for the same insn.  */
540        g_assert_not_reached();
541    }
542
543    return 0;
544}
545
546static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
547{
548    tcg_out32(s, (cond << 28) | INSN_B |
549                    (((offset - 8) >> 2) & 0x00ffffff));
550}
551
552static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
553{
554    tcg_out32(s, (cond << 28) | 0x0b000000 |
555                    (((offset - 8) >> 2) & 0x00ffffff));
556}
557
558static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
559{
560    tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
561}
562
563static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
564{
565    tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
566                (((offset - 8) >> 2) & 0x00ffffff));
567}
568
569static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
570                            TCGReg rd, TCGReg rn, TCGReg rm, int shift)
571{
572    tcg_out32(s, (cond << 28) | (0 << 25) | opc |
573                    (rn << 16) | (rd << 12) | shift | rm);
574}
575
576static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
577{
578    /* Simple reg-reg move, optimising out the 'do nothing' case */
579    if (rd != rm) {
580        tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
581    }
582}
583
584static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
585{
586    tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
587}
588
589static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
590{
591    /*
592     * Unless the C portion of QEMU is compiled as thumb, we don't need
593     * true BX semantics; merely a branch to an address held in a register.
594     */
595    tcg_out_bx_reg(s, cond, rn);
596}
597
598static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
599                            TCGReg rd, TCGReg rn, int im)
600{
601    tcg_out32(s, (cond << 28) | (1 << 25) | opc |
602                    (rn << 16) | (rd << 12) | im);
603}
604
605static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
606                          TCGReg rn, uint16_t mask)
607{
608    tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
609}
610
611/* Note that this routine is used for both LDR and LDRH formats, so we do
612   not wish to include an immediate shift at this point.  */
613static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
614                            TCGReg rn, TCGReg rm, bool u, bool p, bool w)
615{
616    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
617              | (w << 21) | (rn << 16) | (rt << 12) | rm);
618}
619
620static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
621                            TCGReg rn, int imm8, bool p, bool w)
622{
623    bool u = 1;
624    if (imm8 < 0) {
625        imm8 = -imm8;
626        u = 0;
627    }
628    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
629              (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
630}
631
632static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
633                             TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
634{
635    bool u = 1;
636    if (imm12 < 0) {
637        imm12 = -imm12;
638        u = 0;
639    }
640    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
641              (rn << 16) | (rt << 12) | imm12);
642}
643
644static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
645                            TCGReg rn, int imm12)
646{
647    tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
648}
649
650static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
651                            TCGReg rn, int imm12)
652{
653    tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
654}
655
656static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
657                           TCGReg rn, TCGReg rm)
658{
659    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
660}
661
662static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
663                           TCGReg rn, TCGReg rm)
664{
665    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
666}
667
668static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
669                           TCGReg rn, int imm8)
670{
671    tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
672}
673
674static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
675                           TCGReg rn, TCGReg rm)
676{
677    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
678}
679
680static void __attribute__((unused))
681tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
682{
683    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
684}
685
686static void __attribute__((unused))
687tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8)
688{
689    tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
690}
691
692static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
693                           TCGReg rn, TCGReg rm)
694{
695    tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
696}
697
698/* Register pre-increment with base writeback.  */
699static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
700                             TCGReg rn, TCGReg rm)
701{
702    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
703}
704
705static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
706                             TCGReg rn, TCGReg rm)
707{
708    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
709}
710
711static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
712                            TCGReg rn, int imm8)
713{
714    tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
715}
716
717static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
718                           TCGReg rn, int imm8)
719{
720    tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
721}
722
723static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
724                            TCGReg rn, TCGReg rm)
725{
726    tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
727}
728
729static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
730                           TCGReg rn, TCGReg rm)
731{
732    tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
733}
734
735static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
736                            TCGReg rn, int imm8)
737{
738    tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
739}
740
741static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
742                            TCGReg rn, TCGReg rm)
743{
744    tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
745}
746
747static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
748                           TCGReg rn, int imm12)
749{
750    tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
751}
752
753static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
754                           TCGReg rn, int imm12)
755{
756    tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
757}
758
759static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
760                          TCGReg rn, TCGReg rm)
761{
762    tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
763}
764
765static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
766                          TCGReg rn, TCGReg rm)
767{
768    tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
769}
770
771static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
772                           TCGReg rn, int imm8)
773{
774    tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
775}
776
777static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
778                           TCGReg rn, TCGReg rm)
779{
780    tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
781}
782
783static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
784                              TCGReg rd, uint32_t arg)
785{
786    new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
787    tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
788}
789
790static void tcg_out_movi32(TCGContext *s, ARMCond cond,
791                           TCGReg rd, uint32_t arg)
792{
793    int imm12, diff, opc, sh1, sh2;
794    uint32_t tt0, tt1, tt2;
795
796    /* Check a single MOV/MVN before anything else.  */
797    imm12 = encode_imm(arg);
798    if (imm12 >= 0) {
799        tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
800        return;
801    }
802    imm12 = encode_imm(~arg);
803    if (imm12 >= 0) {
804        tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
805        return;
806    }
807
808    /* Check for a pc-relative address.  This will usually be the TB,
809       or within the TB, which is immediately before the code block.  */
810    diff = tcg_pcrel_diff(s, (void *)arg) - 8;
811    if (diff >= 0) {
812        imm12 = encode_imm(diff);
813        if (imm12 >= 0) {
814            tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
815            return;
816        }
817    } else {
818        imm12 = encode_imm(-diff);
819        if (imm12 >= 0) {
820            tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
821            return;
822        }
823    }
824
825    /* Use movw + movt.  */
826    if (use_armv7_instructions) {
827        /* movw */
828        tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
829                  | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
830        if (arg & 0xffff0000) {
831            /* movt */
832            tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
833                      | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
834        }
835        return;
836    }
837
838    /* Look for sequences of two insns.  If we have lots of 1's, we can
839       shorten the sequence by beginning with mvn and then clearing
840       higher bits with eor.  */
841    tt0 = arg;
842    opc = ARITH_MOV;
843    if (ctpop32(arg) > 16) {
844        tt0 = ~arg;
845        opc = ARITH_MVN;
846    }
847    sh1 = ctz32(tt0) & ~1;
848    tt1 = tt0 & ~(0xff << sh1);
849    sh2 = ctz32(tt1) & ~1;
850    tt2 = tt1 & ~(0xff << sh2);
851    if (tt2 == 0) {
852        int rot;
853
854        rot = ((32 - sh1) << 7) & 0xf00;
855        tcg_out_dat_imm(s, cond, opc, rd,  0, ((tt0 >> sh1) & 0xff) | rot);
856        rot = ((32 - sh2) << 7) & 0xf00;
857        tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
858                        ((tt0 >> sh2) & 0xff) | rot);
859        return;
860    }
861
862    /* Otherwise, drop it into the constant pool.  */
863    tcg_out_movi_pool(s, cond, rd, arg);
864}
865
866/*
867 * Emit either the reg,imm or reg,reg form of a data-processing insn.
868 * rhs must satisfy the "rI" constraint.
869 */
870static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
871                           TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
872{
873    if (rhs_is_const) {
874        tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
875    } else {
876        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
877    }
878}
879
880/*
881 * Emit either the reg,imm or reg,reg form of a data-processing insn.
882 * rhs must satisfy the "rIK" constraint.
883 */
884static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
885                            ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
886                            bool rhs_is_const)
887{
888    if (rhs_is_const) {
889        int imm12 = encode_imm(rhs);
890        if (imm12 < 0) {
891            imm12 = encode_imm_nofail(~rhs);
892            opc = opinv;
893        }
894        tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
895    } else {
896        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
897    }
898}
899
900static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
901                            ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
902                            bool rhs_is_const)
903{
904    /* Emit either the reg,imm or reg,reg form of a data-processing insn.
905     * rhs must satisfy the "rIN" constraint.
906     */
907    if (rhs_is_const) {
908        int imm12 = encode_imm(rhs);
909        if (imm12 < 0) {
910            imm12 = encode_imm_nofail(-rhs);
911            opc = opneg;
912        }
913        tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
914    } else {
915        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
916    }
917}
918
919static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
920                          TCGReg rn, TCGReg rm)
921{
922    /* mul */
923    tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
924}
925
926static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
927                            TCGReg rd1, TCGReg rn, TCGReg rm)
928{
929    /* umull */
930    tcg_out32(s, (cond << 28) | 0x00800090 |
931              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
932}
933
934static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
935                            TCGReg rd1, TCGReg rn, TCGReg rm)
936{
937    /* smull */
938    tcg_out32(s, (cond << 28) | 0x00c00090 |
939              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
940}
941
942static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
943                         TCGReg rd, TCGReg rn, TCGReg rm)
944{
945    tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
946}
947
948static void tcg_out_udiv(TCGContext *s, ARMCond cond,
949                         TCGReg rd, TCGReg rn, TCGReg rm)
950{
951    tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
952}
953
954static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
955{
956    /* sxtb */
957    tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn);
958}
959
960static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
961{
962    tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff);
963}
964
965static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
966{
967    /* sxth */
968    tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn);
969}
970
971static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
972{
973    /* uxth */
974    tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn);
975}
976
977static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
978{
979    g_assert_not_reached();
980}
981
982static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
983{
984    g_assert_not_reached();
985}
986
987static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
988{
989    g_assert_not_reached();
990}
991
992static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
993{
994    g_assert_not_reached();
995}
996
997static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
998{
999    g_assert_not_reached();
1000}
1001
1002static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
1003                            TCGReg rd, TCGReg rn, int flags)
1004{
1005    if (flags & TCG_BSWAP_OS) {
1006        /* revsh */
1007        tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
1008        return;
1009    }
1010
1011    /* rev16 */
1012    tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
1013    if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1014        /* uxth */
1015        tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
1016    }
1017}
1018
1019static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
1020{
1021    /* rev */
1022    tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
1023}
1024
1025static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
1026                            TCGArg a1, int ofs, int len, bool const_a1)
1027{
1028    if (const_a1) {
1029        /* bfi becomes bfc with rn == 15.  */
1030        a1 = 15;
1031    }
1032    /* bfi/bfc */
1033    tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
1034              | (ofs << 7) | ((ofs + len - 1) << 16));
1035}
1036
1037static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
1038                            TCGReg rn, int ofs, int len)
1039{
1040    /* ubfx */
1041    tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
1042              | (ofs << 7) | ((len - 1) << 16));
1043}
1044
1045static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
1046                             TCGReg rn, int ofs, int len)
1047{
1048    /* sbfx */
1049    tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
1050              | (ofs << 7) | ((len - 1) << 16));
1051}
1052
1053static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
1054                          TCGReg rd, TCGReg rn, int32_t offset)
1055{
1056    if (offset > 0xfff || offset < -0xfff) {
1057        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1058        tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
1059    } else
1060        tcg_out_ld32_12(s, cond, rd, rn, offset);
1061}
1062
1063static void tcg_out_st32(TCGContext *s, ARMCond cond,
1064                         TCGReg rd, TCGReg rn, int32_t offset)
1065{
1066    if (offset > 0xfff || offset < -0xfff) {
1067        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1068        tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
1069    } else
1070        tcg_out_st32_12(s, cond, rd, rn, offset);
1071}
1072
1073static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
1074                          TCGReg rd, TCGReg rn, int32_t offset)
1075{
1076    if (offset > 0xff || offset < -0xff) {
1077        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1078        tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
1079    } else
1080        tcg_out_ld16u_8(s, cond, rd, rn, offset);
1081}
1082
1083static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
1084                          TCGReg rd, TCGReg rn, int32_t offset)
1085{
1086    if (offset > 0xff || offset < -0xff) {
1087        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1088        tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
1089    } else
1090        tcg_out_ld16s_8(s, cond, rd, rn, offset);
1091}
1092
1093static void tcg_out_st16(TCGContext *s, ARMCond cond,
1094                         TCGReg rd, TCGReg rn, int32_t offset)
1095{
1096    if (offset > 0xff || offset < -0xff) {
1097        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1098        tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
1099    } else
1100        tcg_out_st16_8(s, cond, rd, rn, offset);
1101}
1102
1103static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
1104                         TCGReg rd, TCGReg rn, int32_t offset)
1105{
1106    if (offset > 0xfff || offset < -0xfff) {
1107        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1108        tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
1109    } else
1110        tcg_out_ld8_12(s, cond, rd, rn, offset);
1111}
1112
1113static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
1114                         TCGReg rd, TCGReg rn, int32_t offset)
1115{
1116    if (offset > 0xff || offset < -0xff) {
1117        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1118        tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1119    } else
1120        tcg_out_ld8s_8(s, cond, rd, rn, offset);
1121}
1122
1123static void tcg_out_st8(TCGContext *s, ARMCond cond,
1124                        TCGReg rd, TCGReg rn, int32_t offset)
1125{
1126    if (offset > 0xfff || offset < -0xfff) {
1127        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1128        tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1129    } else
1130        tcg_out_st8_12(s, cond, rd, rn, offset);
1131}
1132
1133/*
1134 * The _goto case is normally between TBs within the same code buffer, and
1135 * with the code buffer limited to 16MB we wouldn't need the long case.
1136 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1137 */
1138static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
1139{
1140    intptr_t addri = (intptr_t)addr;
1141    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1142    bool arm_mode = !(addri & 1);
1143
1144    if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1145        tcg_out_b_imm(s, cond, disp);
1146        return;
1147    }
1148
1149    /* LDR is interworking from v5t. */
1150    tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1151}
1152
1153/*
1154 * The call case is mostly used for helpers - so it's not unreasonable
1155 * for them to be beyond branch range.
1156 */
1157static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr)
1158{
1159    intptr_t addri = (intptr_t)addr;
1160    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1161    bool arm_mode = !(addri & 1);
1162
1163    if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1164        if (arm_mode) {
1165            tcg_out_bl_imm(s, COND_AL, disp);
1166        } else {
1167            tcg_out_blx_imm(s, disp);
1168        }
1169        return;
1170    }
1171
1172    tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1173    tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
1174}
1175
1176static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr,
1177                         const TCGHelperInfo *info)
1178{
1179    tcg_out_call_int(s, addr);
1180}
1181
1182static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
1183{
1184    if (l->has_value) {
1185        tcg_out_goto(s, cond, l->u.value_ptr);
1186    } else {
1187        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1188        tcg_out_b_imm(s, cond, 0);
1189    }
1190}
1191
1192static void tcg_out_mb(TCGContext *s, TCGArg a0)
1193{
1194    if (use_armv7_instructions) {
1195        tcg_out32(s, INSN_DMB_ISH);
1196    } else {
1197        tcg_out32(s, INSN_DMB_MCR);
1198    }
1199}
1200
1201static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1202                            const int *const_args)
1203{
1204    TCGReg al = args[0];
1205    TCGReg ah = args[1];
1206    TCGArg bl = args[2];
1207    TCGArg bh = args[3];
1208    TCGCond cond = args[4];
1209    int const_bl = const_args[2];
1210    int const_bh = const_args[3];
1211
1212    switch (cond) {
1213    case TCG_COND_EQ:
1214    case TCG_COND_NE:
1215    case TCG_COND_LTU:
1216    case TCG_COND_LEU:
1217    case TCG_COND_GTU:
1218    case TCG_COND_GEU:
1219        /* We perform a conditional comparision.  If the high half is
1220           equal, then overwrite the flags with the comparison of the
1221           low half.  The resulting flags cover the whole.  */
1222        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1223        tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1224        return cond;
1225
1226    case TCG_COND_LT:
1227    case TCG_COND_GE:
1228        /* We perform a double-word subtraction and examine the result.
1229           We do not actually need the result of the subtract, so the
1230           low part "subtract" is a compare.  For the high half we have
1231           no choice but to compute into a temporary.  */
1232        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1233        tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1234                       TCG_REG_TMP, ah, bh, const_bh);
1235        return cond;
1236
1237    case TCG_COND_LE:
1238    case TCG_COND_GT:
1239        /* Similar, but with swapped arguments, via reversed subtract.  */
1240        tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1241                       TCG_REG_TMP, al, bl, const_bl);
1242        tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1243                       TCG_REG_TMP, ah, bh, const_bh);
1244        return tcg_swap_cond(cond);
1245
1246    default:
1247        g_assert_not_reached();
1248    }
1249}
1250
1251/*
1252 * Note that TCGReg references Q-registers.
1253 * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting.
1254 */
1255static uint32_t encode_vd(TCGReg rd)
1256{
1257    tcg_debug_assert(rd >= TCG_REG_Q0);
1258    return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
1259}
1260
1261static uint32_t encode_vn(TCGReg rn)
1262{
1263    tcg_debug_assert(rn >= TCG_REG_Q0);
1264    return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
1265}
1266
1267static uint32_t encode_vm(TCGReg rm)
1268{
1269    tcg_debug_assert(rm >= TCG_REG_Q0);
1270    return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
1271}
1272
1273static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
1274                          TCGReg d, TCGReg m)
1275{
1276    tcg_out32(s, insn | (vece << 18) | (q << 6) |
1277              encode_vd(d) | encode_vm(m));
1278}
1279
1280static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
1281                          TCGReg d, TCGReg n, TCGReg m)
1282{
1283    tcg_out32(s, insn | (vece << 20) | (q << 6) |
1284              encode_vd(d) | encode_vn(n) | encode_vm(m));
1285}
1286
1287static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
1288                          int q, int op, int cmode, uint8_t imm8)
1289{
1290    tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
1291              | (cmode << 8) | extract32(imm8, 0, 4)
1292              | (extract32(imm8, 4, 3) << 16)
1293              | (extract32(imm8, 7, 1) << 24));
1294}
1295
1296static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
1297                            TCGReg rd, TCGReg rm, int l_imm6)
1298{
1299    tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
1300              (extract32(l_imm6, 6, 1) << 7) |
1301              (extract32(l_imm6, 0, 6) << 16));
1302}
1303
1304static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
1305                          TCGReg rd, TCGReg rn, int offset)
1306{
1307    if (offset != 0) {
1308        if (check_fit_imm(offset) || check_fit_imm(-offset)) {
1309            tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1310                            TCG_REG_TMP, rn, offset, true);
1311        } else {
1312            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
1313            tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1314                            TCG_REG_TMP, TCG_REG_TMP, rn, 0);
1315        }
1316        rn = TCG_REG_TMP;
1317    }
1318    tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
1319}
1320
1321typedef struct {
1322    ARMCond cond;
1323    TCGReg base;
1324    int index;
1325    bool index_scratch;
1326    TCGAtomAlign aa;
1327} HostAddress;
1328
1329bool tcg_target_has_memory_bswap(MemOp memop)
1330{
1331    return false;
1332}
1333
1334static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
1335{
1336    /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */
1337    return TCG_REG_R14;
1338}
1339
1340static const TCGLdstHelperParam ldst_helper_param = {
1341    .ra_gen = ldst_ra_gen,
1342    .ntmp = 1,
1343    .tmp = { TCG_REG_TMP },
1344};
1345
1346static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1347{
1348    MemOp opc = get_memop(lb->oi);
1349
1350    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1351        return false;
1352    }
1353
1354    tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1355    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1356    tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1357
1358    tcg_out_goto(s, COND_AL, lb->raddr);
1359    return true;
1360}
1361
1362static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1363{
1364    MemOp opc = get_memop(lb->oi);
1365
1366    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1367        return false;
1368    }
1369
1370    tcg_out_st_helper_args(s, lb, &ldst_helper_param);
1371
1372    /* Tail-call to the helper, which will return to the fast path.  */
1373    tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
1374    return true;
1375}
1376
1377/* We expect to use an 9-bit sign-magnitude negative offset from ENV.  */
1378#define MIN_TLB_MASK_TABLE_OFS  -256
1379
1380static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
1381                                           TCGReg addrlo, TCGReg addrhi,
1382                                           MemOpIdx oi, bool is_ld)
1383{
1384    TCGLabelQemuLdst *ldst = NULL;
1385    MemOp opc = get_memop(oi);
1386    unsigned a_mask;
1387
1388#ifdef CONFIG_SOFTMMU
1389    *h = (HostAddress){
1390        .cond = COND_AL,
1391        .base = addrlo,
1392        .index = TCG_REG_R1,
1393        .index_scratch = true,
1394    };
1395#else
1396    *h = (HostAddress){
1397        .cond = COND_AL,
1398        .base = addrlo,
1399        .index = guest_base ? TCG_REG_GUEST_BASE : -1,
1400        .index_scratch = false,
1401    };
1402#endif
1403
1404    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
1405    a_mask = (1 << h->aa.align) - 1;
1406
1407#ifdef CONFIG_SOFTMMU
1408    int mem_index = get_mmuidx(oi);
1409    int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
1410                        : offsetof(CPUTLBEntry, addr_write);
1411    int fast_off = tlb_mask_table_ofs(s, mem_index);
1412    unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
1413    TCGReg t_addr;
1414
1415    ldst = new_ldst_label(s);
1416    ldst->is_ld = is_ld;
1417    ldst->oi = oi;
1418    ldst->addrlo_reg = addrlo;
1419    ldst->addrhi_reg = addrhi;
1420
1421    /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}.  */
1422    QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1423    QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1424    tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1425
1426    /* Extract the tlb index from the address into R0.  */
1427    tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
1428                    SHIFT_IMM_LSR(s->page_bits - CPU_TLB_ENTRY_BITS));
1429
1430    /*
1431     * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1432     * Load the tlb comparator into R2/R3 and the fast path addend into R1.
1433     */
1434    QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1435    if (cmp_off == 0) {
1436        if (s->addr_type == TCG_TYPE_I32) {
1437            tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1438        } else {
1439            tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1440        }
1441    } else {
1442        tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1443                        TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1444        if (s->addr_type == TCG_TYPE_I32) {
1445            tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1446        } else {
1447            tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1448        }
1449    }
1450
1451    /* Load the tlb addend.  */
1452    tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1453                    offsetof(CPUTLBEntry, addend));
1454
1455    /*
1456     * Check alignment, check comparators.
1457     * Do this in 2-4 insns.  Use MOVW for v7, if possible,
1458     * to reduce the number of sequential conditional instructions.
1459     * Almost all guests have at least 4k pages, which means that we need
1460     * to clear at least 9 bits even for an 8-byte memory, which means it
1461     * isn't worth checking for an immediate operand for BIC.
1462     *
1463     * For unaligned accesses, test the page of the last unit of alignment.
1464     * This leaves the least significant alignment bits unchanged, and of
1465     * course must be zero.
1466     */
1467    t_addr = addrlo;
1468    if (a_mask < s_mask) {
1469        t_addr = TCG_REG_R0;
1470        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
1471                        addrlo, s_mask - a_mask);
1472    }
1473    if (use_armv7_instructions && s->page_bits <= 16) {
1474        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(s->page_mask | a_mask));
1475        tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1476                        t_addr, TCG_REG_TMP, 0);
1477        tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
1478    } else {
1479        if (a_mask) {
1480            tcg_debug_assert(a_mask <= 0xff);
1481            tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1482        }
1483        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
1484                        SHIFT_IMM_LSR(s->page_bits));
1485        tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
1486                        0, TCG_REG_R2, TCG_REG_TMP,
1487                        SHIFT_IMM_LSL(s->page_bits));
1488    }
1489
1490    if (s->addr_type != TCG_TYPE_I32) {
1491        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
1492    }
1493#else
1494    if (a_mask) {
1495        ldst = new_ldst_label(s);
1496        ldst->is_ld = is_ld;
1497        ldst->oi = oi;
1498        ldst->addrlo_reg = addrlo;
1499        ldst->addrhi_reg = addrhi;
1500
1501        /* We are expecting alignment to max out at 7 */
1502        tcg_debug_assert(a_mask <= 0xff);
1503        /* tst addr, #mask */
1504        tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1505    }
1506#endif
1507
1508    return ldst;
1509}
1510
1511static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1512                                   TCGReg datahi, HostAddress h)
1513{
1514    TCGReg base;
1515
1516    /* Byte swapping is left to middle-end expansion. */
1517    tcg_debug_assert((opc & MO_BSWAP) == 0);
1518
1519    switch (opc & MO_SSIZE) {
1520    case MO_UB:
1521        if (h.index < 0) {
1522            tcg_out_ld8_12(s, h.cond, datalo, h.base, 0);
1523        } else {
1524            tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index);
1525        }
1526        break;
1527    case MO_SB:
1528        if (h.index < 0) {
1529            tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0);
1530        } else {
1531            tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index);
1532        }
1533        break;
1534    case MO_UW:
1535        if (h.index < 0) {
1536            tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0);
1537        } else {
1538            tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index);
1539        }
1540        break;
1541    case MO_SW:
1542        if (h.index < 0) {
1543            tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0);
1544        } else {
1545            tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index);
1546        }
1547        break;
1548    case MO_UL:
1549        if (h.index < 0) {
1550            tcg_out_ld32_12(s, h.cond, datalo, h.base, 0);
1551        } else {
1552            tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index);
1553        }
1554        break;
1555    case MO_UQ:
1556        /* We used pair allocation for datalo, so already should be aligned. */
1557        tcg_debug_assert((datalo & 1) == 0);
1558        tcg_debug_assert(datahi == datalo + 1);
1559        /* LDRD requires alignment; double-check that. */
1560        if (get_alignment_bits(opc) >= MO_64) {
1561            if (h.index < 0) {
1562                tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
1563                break;
1564            }
1565            /*
1566             * Rm (the second address op) must not overlap Rt or Rt + 1.
1567             * Since datalo is aligned, we can simplify the test via alignment.
1568             * Flip the two address arguments if that works.
1569             */
1570            if ((h.index & ~1) != datalo) {
1571                tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index);
1572                break;
1573            }
1574            if ((h.base & ~1) != datalo) {
1575                tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base);
1576                break;
1577            }
1578        }
1579        if (h.index < 0) {
1580            base = h.base;
1581            if (datalo == h.base) {
1582                tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base);
1583                base = TCG_REG_TMP;
1584            }
1585        } else if (h.index_scratch) {
1586            tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base);
1587            tcg_out_ld32_12(s, h.cond, datahi, h.index, 4);
1588            break;
1589        } else {
1590            tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
1591                            h.base, h.index, SHIFT_IMM_LSL(0));
1592            base = TCG_REG_TMP;
1593        }
1594        tcg_out_ld32_12(s, h.cond, datalo, base, 0);
1595        tcg_out_ld32_12(s, h.cond, datahi, base, 4);
1596        break;
1597    default:
1598        g_assert_not_reached();
1599    }
1600}
1601
1602static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
1603                            TCGReg addrlo, TCGReg addrhi,
1604                            MemOpIdx oi, TCGType data_type)
1605{
1606    MemOp opc = get_memop(oi);
1607    TCGLabelQemuLdst *ldst;
1608    HostAddress h;
1609
1610    ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
1611    if (ldst) {
1612        ldst->type = data_type;
1613        ldst->datalo_reg = datalo;
1614        ldst->datahi_reg = datahi;
1615
1616        /*
1617         * This a conditional BL only to load a pointer within this
1618         * opcode into LR for the slow path.  We will not be using
1619         * the value for a tail call.
1620         */
1621        ldst->label_ptr[0] = s->code_ptr;
1622        tcg_out_bl_imm(s, COND_NE, 0);
1623
1624        tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
1625        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1626    } else {
1627        tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
1628    }
1629}
1630
1631static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1632                                   TCGReg datahi, HostAddress h)
1633{
1634    /* Byte swapping is left to middle-end expansion. */
1635    tcg_debug_assert((opc & MO_BSWAP) == 0);
1636
1637    switch (opc & MO_SIZE) {
1638    case MO_8:
1639        if (h.index < 0) {
1640            tcg_out_st8_12(s, h.cond, datalo, h.base, 0);
1641        } else {
1642            tcg_out_st8_r(s, h.cond, datalo, h.base, h.index);
1643        }
1644        break;
1645    case MO_16:
1646        if (h.index < 0) {
1647            tcg_out_st16_8(s, h.cond, datalo, h.base, 0);
1648        } else {
1649            tcg_out_st16_r(s, h.cond, datalo, h.base, h.index);
1650        }
1651        break;
1652    case MO_32:
1653        if (h.index < 0) {
1654            tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
1655        } else {
1656            tcg_out_st32_r(s, h.cond, datalo, h.base, h.index);
1657        }
1658        break;
1659    case MO_64:
1660        /* We used pair allocation for datalo, so already should be aligned. */
1661        tcg_debug_assert((datalo & 1) == 0);
1662        tcg_debug_assert(datahi == datalo + 1);
1663        /* STRD requires alignment; double-check that. */
1664        if (get_alignment_bits(opc) >= MO_64) {
1665            if (h.index < 0) {
1666                tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
1667            } else {
1668                tcg_out_strd_r(s, h.cond, datalo, h.base, h.index);
1669            }
1670        } else if (h.index_scratch) {
1671            tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base);
1672            tcg_out_st32_12(s, h.cond, datahi, h.index, 4);
1673        } else {
1674            tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
1675                            h.base, h.index, SHIFT_IMM_LSL(0));
1676            tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0);
1677            tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4);
1678        }
1679        break;
1680    default:
1681        g_assert_not_reached();
1682    }
1683}
1684
1685static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
1686                            TCGReg addrlo, TCGReg addrhi,
1687                            MemOpIdx oi, TCGType data_type)
1688{
1689    MemOp opc = get_memop(oi);
1690    TCGLabelQemuLdst *ldst;
1691    HostAddress h;
1692
1693    ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
1694    if (ldst) {
1695        ldst->type = data_type;
1696        ldst->datalo_reg = datalo;
1697        ldst->datahi_reg = datahi;
1698
1699        h.cond = COND_EQ;
1700        tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
1701
1702        /* The conditional call is last, as we're going to return here. */
1703        ldst->label_ptr[0] = s->code_ptr;
1704        tcg_out_bl_imm(s, COND_NE, 0);
1705        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1706    } else {
1707        tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
1708    }
1709}
1710
1711static void tcg_out_epilogue(TCGContext *s);
1712
1713static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
1714{
1715    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
1716    tcg_out_epilogue(s);
1717}
1718
1719static void tcg_out_goto_tb(TCGContext *s, int which)
1720{
1721    uintptr_t i_addr;
1722    intptr_t i_disp;
1723
1724    /* Direct branch will be patched by tb_target_set_jmp_target. */
1725    set_jmp_insn_offset(s, which);
1726    tcg_out32(s, INSN_NOP);
1727
1728    /* When branch is out of range, fall through to indirect. */
1729    i_addr = get_jmp_target_addr(s, which);
1730    i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
1731    tcg_debug_assert(i_disp < 0);
1732    if (i_disp >= -0xfff) {
1733        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
1734    } else {
1735        /*
1736         * The TB is close, but outside the 12 bits addressable by
1737         * the load.  We can extend this to 20 bits with a sub of a
1738         * shifted immediate from pc.
1739         */
1740        int h = -i_disp;
1741        int l = h & 0xfff;
1742
1743        h = encode_imm_nofail(h - l);
1744        tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
1745        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
1746    }
1747    set_jmp_reset_offset(s, which);
1748}
1749
1750void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1751                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1752{
1753    uintptr_t addr = tb->jmp_target_addr[n];
1754    ptrdiff_t offset = addr - (jmp_rx + 8);
1755    tcg_insn_unit insn;
1756
1757    /* Either directly branch, or fall through to indirect branch. */
1758    if (offset == sextract64(offset, 0, 26)) {
1759        /* B <addr> */
1760        insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
1761    } else {
1762        insn = INSN_NOP;
1763    }
1764
1765    qatomic_set((uint32_t *)jmp_rw, insn);
1766    flush_idcache_range(jmp_rx, jmp_rw, 4);
1767}
1768
1769static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1770                       const TCGArg args[TCG_MAX_OP_ARGS],
1771                       const int const_args[TCG_MAX_OP_ARGS])
1772{
1773    TCGArg a0, a1, a2, a3, a4, a5;
1774    int c;
1775
1776    switch (opc) {
1777    case INDEX_op_goto_ptr:
1778        tcg_out_b_reg(s, COND_AL, args[0]);
1779        break;
1780    case INDEX_op_br:
1781        tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
1782        break;
1783
1784    case INDEX_op_ld8u_i32:
1785        tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1786        break;
1787    case INDEX_op_ld8s_i32:
1788        tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1789        break;
1790    case INDEX_op_ld16u_i32:
1791        tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1792        break;
1793    case INDEX_op_ld16s_i32:
1794        tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1795        break;
1796    case INDEX_op_ld_i32:
1797        tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1798        break;
1799    case INDEX_op_st8_i32:
1800        tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1801        break;
1802    case INDEX_op_st16_i32:
1803        tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1804        break;
1805    case INDEX_op_st_i32:
1806        tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1807        break;
1808
1809    case INDEX_op_movcond_i32:
1810        /* Constraints mean that v2 is always in the same register as dest,
1811         * so we only need to do "if condition passed, move v1 to dest".
1812         */
1813        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1814                        args[1], args[2], const_args[2]);
1815        tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
1816                        ARITH_MVN, args[0], 0, args[3], const_args[3]);
1817        break;
1818    case INDEX_op_add_i32:
1819        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1820                        args[0], args[1], args[2], const_args[2]);
1821        break;
1822    case INDEX_op_sub_i32:
1823        if (const_args[1]) {
1824            if (const_args[2]) {
1825                tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1826            } else {
1827                tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1828                               args[0], args[2], args[1], 1);
1829            }
1830        } else {
1831            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1832                            args[0], args[1], args[2], const_args[2]);
1833        }
1834        break;
1835    case INDEX_op_and_i32:
1836        tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1837                        args[0], args[1], args[2], const_args[2]);
1838        break;
1839    case INDEX_op_andc_i32:
1840        tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1841                        args[0], args[1], args[2], const_args[2]);
1842        break;
1843    case INDEX_op_or_i32:
1844        c = ARITH_ORR;
1845        goto gen_arith;
1846    case INDEX_op_xor_i32:
1847        c = ARITH_EOR;
1848        /* Fall through.  */
1849    gen_arith:
1850        tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
1851        break;
1852    case INDEX_op_add2_i32:
1853        a0 = args[0], a1 = args[1], a2 = args[2];
1854        a3 = args[3], a4 = args[4], a5 = args[5];
1855        if (a0 == a3 || (a0 == a5 && !const_args[5])) {
1856            a0 = TCG_REG_TMP;
1857        }
1858        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1859                        a0, a2, a4, const_args[4]);
1860        tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1861                        a1, a3, a5, const_args[5]);
1862        tcg_out_mov_reg(s, COND_AL, args[0], a0);
1863        break;
1864    case INDEX_op_sub2_i32:
1865        a0 = args[0], a1 = args[1], a2 = args[2];
1866        a3 = args[3], a4 = args[4], a5 = args[5];
1867        if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
1868            a0 = TCG_REG_TMP;
1869        }
1870        if (const_args[2]) {
1871            if (const_args[4]) {
1872                tcg_out_movi32(s, COND_AL, a0, a4);
1873                a4 = a0;
1874            }
1875            tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1876        } else {
1877            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1878                            ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1879        }
1880        if (const_args[3]) {
1881            if (const_args[5]) {
1882                tcg_out_movi32(s, COND_AL, a1, a5);
1883                a5 = a1;
1884            }
1885            tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1886        } else {
1887            tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1888                            a1, a3, a5, const_args[5]);
1889        }
1890        tcg_out_mov_reg(s, COND_AL, args[0], a0);
1891        break;
1892    case INDEX_op_neg_i32:
1893        tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1894        break;
1895    case INDEX_op_not_i32:
1896        tcg_out_dat_reg(s, COND_AL,
1897                        ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1898        break;
1899    case INDEX_op_mul_i32:
1900        tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1901        break;
1902    case INDEX_op_mulu2_i32:
1903        tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1904        break;
1905    case INDEX_op_muls2_i32:
1906        tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1907        break;
1908    /* XXX: Perhaps args[2] & 0x1f is wrong */
1909    case INDEX_op_shl_i32:
1910        c = const_args[2] ?
1911                SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1912        goto gen_shift32;
1913    case INDEX_op_shr_i32:
1914        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1915                SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1916        goto gen_shift32;
1917    case INDEX_op_sar_i32:
1918        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1919                SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1920        goto gen_shift32;
1921    case INDEX_op_rotr_i32:
1922        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1923                SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1924        /* Fall through.  */
1925    gen_shift32:
1926        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1927        break;
1928
1929    case INDEX_op_rotl_i32:
1930        if (const_args[2]) {
1931            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1932                            ((0x20 - args[2]) & 0x1f) ?
1933                            SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1934                            SHIFT_IMM_LSL(0));
1935        } else {
1936            tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
1937            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1938                            SHIFT_REG_ROR(TCG_REG_TMP));
1939        }
1940        break;
1941
1942    case INDEX_op_ctz_i32:
1943        tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
1944        a1 = TCG_REG_TMP;
1945        goto do_clz;
1946
1947    case INDEX_op_clz_i32:
1948        a1 = args[1];
1949    do_clz:
1950        a0 = args[0];
1951        a2 = args[2];
1952        c = const_args[2];
1953        if (c && a2 == 32) {
1954            tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
1955            break;
1956        }
1957        tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
1958        tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
1959        if (c || a0 != a2) {
1960            tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
1961        }
1962        break;
1963
1964    case INDEX_op_brcond_i32:
1965        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1966                       args[0], args[1], const_args[1]);
1967        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
1968                           arg_label(args[3]));
1969        break;
1970    case INDEX_op_setcond_i32:
1971        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1972                        args[1], args[2], const_args[2]);
1973        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1974                        ARITH_MOV, args[0], 0, 1);
1975        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1976                        ARITH_MOV, args[0], 0, 0);
1977        break;
1978
1979    case INDEX_op_brcond2_i32:
1980        c = tcg_out_cmp2(s, args, const_args);
1981        tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
1982        break;
1983    case INDEX_op_setcond2_i32:
1984        c = tcg_out_cmp2(s, args + 1, const_args + 1);
1985        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
1986        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
1987                        ARITH_MOV, args[0], 0, 0);
1988        break;
1989
1990    case INDEX_op_qemu_ld_a32_i32:
1991        tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
1992        break;
1993    case INDEX_op_qemu_ld_a64_i32:
1994        tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
1995                        args[3], TCG_TYPE_I32);
1996        break;
1997    case INDEX_op_qemu_ld_a32_i64:
1998        tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
1999                        args[3], TCG_TYPE_I64);
2000        break;
2001    case INDEX_op_qemu_ld_a64_i64:
2002        tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
2003                        args[4], TCG_TYPE_I64);
2004        break;
2005
2006    case INDEX_op_qemu_st_a32_i32:
2007        tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
2008        break;
2009    case INDEX_op_qemu_st_a64_i32:
2010        tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
2011                        args[3], TCG_TYPE_I32);
2012        break;
2013    case INDEX_op_qemu_st_a32_i64:
2014        tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
2015                        args[3], TCG_TYPE_I64);
2016        break;
2017    case INDEX_op_qemu_st_a64_i64:
2018        tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
2019                        args[4], TCG_TYPE_I64);
2020        break;
2021
2022    case INDEX_op_bswap16_i32:
2023        tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
2024        break;
2025    case INDEX_op_bswap32_i32:
2026        tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2027        break;
2028
2029    case INDEX_op_deposit_i32:
2030        tcg_out_deposit(s, COND_AL, args[0], args[2],
2031                        args[3], args[4], const_args[2]);
2032        break;
2033    case INDEX_op_extract_i32:
2034        tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2035        break;
2036    case INDEX_op_sextract_i32:
2037        tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2038        break;
2039    case INDEX_op_extract2_i32:
2040        /* ??? These optimization vs zero should be generic.  */
2041        /* ??? But we can't substitute 2 for 1 in the opcode stream yet.  */
2042        if (const_args[1]) {
2043            if (const_args[2]) {
2044                tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2045            } else {
2046                tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2047                                args[2], SHIFT_IMM_LSL(32 - args[3]));
2048            }
2049        } else if (const_args[2]) {
2050            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2051                            args[1], SHIFT_IMM_LSR(args[3]));
2052        } else {
2053            /* We can do extract2 in 2 insns, vs the 3 required otherwise.  */
2054            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2055                            args[2], SHIFT_IMM_LSL(32 - args[3]));
2056            tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2057                            args[1], SHIFT_IMM_LSR(args[3]));
2058        }
2059        break;
2060
2061    case INDEX_op_div_i32:
2062        tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2063        break;
2064    case INDEX_op_divu_i32:
2065        tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2066        break;
2067
2068    case INDEX_op_mb:
2069        tcg_out_mb(s, args[0]);
2070        break;
2071
2072    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
2073    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2074    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2075    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2076    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
2077    case INDEX_op_ext8u_i32:
2078    case INDEX_op_ext16s_i32:
2079    case INDEX_op_ext16u_i32:
2080    default:
2081        g_assert_not_reached();
2082    }
2083}
2084
2085static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2086{
2087    switch (op) {
2088    case INDEX_op_goto_ptr:
2089        return C_O0_I1(r);
2090
2091    case INDEX_op_ld8u_i32:
2092    case INDEX_op_ld8s_i32:
2093    case INDEX_op_ld16u_i32:
2094    case INDEX_op_ld16s_i32:
2095    case INDEX_op_ld_i32:
2096    case INDEX_op_neg_i32:
2097    case INDEX_op_not_i32:
2098    case INDEX_op_bswap16_i32:
2099    case INDEX_op_bswap32_i32:
2100    case INDEX_op_ext8s_i32:
2101    case INDEX_op_ext16s_i32:
2102    case INDEX_op_ext16u_i32:
2103    case INDEX_op_extract_i32:
2104    case INDEX_op_sextract_i32:
2105        return C_O1_I1(r, r);
2106
2107    case INDEX_op_st8_i32:
2108    case INDEX_op_st16_i32:
2109    case INDEX_op_st_i32:
2110        return C_O0_I2(r, r);
2111
2112    case INDEX_op_add_i32:
2113    case INDEX_op_sub_i32:
2114    case INDEX_op_setcond_i32:
2115        return C_O1_I2(r, r, rIN);
2116
2117    case INDEX_op_and_i32:
2118    case INDEX_op_andc_i32:
2119    case INDEX_op_clz_i32:
2120    case INDEX_op_ctz_i32:
2121        return C_O1_I2(r, r, rIK);
2122
2123    case INDEX_op_mul_i32:
2124    case INDEX_op_div_i32:
2125    case INDEX_op_divu_i32:
2126        return C_O1_I2(r, r, r);
2127
2128    case INDEX_op_mulu2_i32:
2129    case INDEX_op_muls2_i32:
2130        return C_O2_I2(r, r, r, r);
2131
2132    case INDEX_op_or_i32:
2133    case INDEX_op_xor_i32:
2134        return C_O1_I2(r, r, rI);
2135
2136    case INDEX_op_shl_i32:
2137    case INDEX_op_shr_i32:
2138    case INDEX_op_sar_i32:
2139    case INDEX_op_rotl_i32:
2140    case INDEX_op_rotr_i32:
2141        return C_O1_I2(r, r, ri);
2142
2143    case INDEX_op_brcond_i32:
2144        return C_O0_I2(r, rIN);
2145    case INDEX_op_deposit_i32:
2146        return C_O1_I2(r, 0, rZ);
2147    case INDEX_op_extract2_i32:
2148        return C_O1_I2(r, rZ, rZ);
2149    case INDEX_op_movcond_i32:
2150        return C_O1_I4(r, r, rIN, rIK, 0);
2151    case INDEX_op_add2_i32:
2152        return C_O2_I4(r, r, r, r, rIN, rIK);
2153    case INDEX_op_sub2_i32:
2154        return C_O2_I4(r, r, rI, rI, rIN, rIK);
2155    case INDEX_op_brcond2_i32:
2156        return C_O0_I4(r, r, rI, rI);
2157    case INDEX_op_setcond2_i32:
2158        return C_O1_I4(r, r, r, rI, rI);
2159
2160    case INDEX_op_qemu_ld_a32_i32:
2161        return C_O1_I1(r, q);
2162    case INDEX_op_qemu_ld_a64_i32:
2163        return C_O1_I2(r, q, q);
2164    case INDEX_op_qemu_ld_a32_i64:
2165        return C_O2_I1(e, p, q);
2166    case INDEX_op_qemu_ld_a64_i64:
2167        return C_O2_I2(e, p, q, q);
2168    case INDEX_op_qemu_st_a32_i32:
2169        return C_O0_I2(q, q);
2170    case INDEX_op_qemu_st_a64_i32:
2171        return C_O0_I3(q, q, q);
2172    case INDEX_op_qemu_st_a32_i64:
2173        return C_O0_I3(Q, p, q);
2174    case INDEX_op_qemu_st_a64_i64:
2175        return C_O0_I4(Q, p, q, q);
2176
2177    case INDEX_op_st_vec:
2178        return C_O0_I2(w, r);
2179    case INDEX_op_ld_vec:
2180    case INDEX_op_dupm_vec:
2181        return C_O1_I1(w, r);
2182    case INDEX_op_dup_vec:
2183        return C_O1_I1(w, wr);
2184    case INDEX_op_abs_vec:
2185    case INDEX_op_neg_vec:
2186    case INDEX_op_not_vec:
2187    case INDEX_op_shli_vec:
2188    case INDEX_op_shri_vec:
2189    case INDEX_op_sari_vec:
2190        return C_O1_I1(w, w);
2191    case INDEX_op_dup2_vec:
2192    case INDEX_op_add_vec:
2193    case INDEX_op_mul_vec:
2194    case INDEX_op_smax_vec:
2195    case INDEX_op_smin_vec:
2196    case INDEX_op_ssadd_vec:
2197    case INDEX_op_sssub_vec:
2198    case INDEX_op_sub_vec:
2199    case INDEX_op_umax_vec:
2200    case INDEX_op_umin_vec:
2201    case INDEX_op_usadd_vec:
2202    case INDEX_op_ussub_vec:
2203    case INDEX_op_xor_vec:
2204    case INDEX_op_arm_sshl_vec:
2205    case INDEX_op_arm_ushl_vec:
2206        return C_O1_I2(w, w, w);
2207    case INDEX_op_arm_sli_vec:
2208        return C_O1_I2(w, 0, w);
2209    case INDEX_op_or_vec:
2210    case INDEX_op_andc_vec:
2211        return C_O1_I2(w, w, wO);
2212    case INDEX_op_and_vec:
2213    case INDEX_op_orc_vec:
2214        return C_O1_I2(w, w, wV);
2215    case INDEX_op_cmp_vec:
2216        return C_O1_I2(w, w, wZ);
2217    case INDEX_op_bitsel_vec:
2218        return C_O1_I3(w, w, w, w);
2219    default:
2220        g_assert_not_reached();
2221    }
2222}
2223
2224static void tcg_target_init(TCGContext *s)
2225{
2226    /*
2227     * Only probe for the platform and capabilities if we haven't already
2228     * determined maximum values at compile time.
2229     */
2230#if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
2231    {
2232        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2233#ifndef use_idiv_instructions
2234        use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2235#endif
2236#ifndef use_neon_instructions
2237        use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
2238#endif
2239    }
2240#endif
2241
2242    if (__ARM_ARCH < 7) {
2243        const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2244        if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2245            arm_arch = pl[1] - '0';
2246        }
2247
2248        if (arm_arch < 6) {
2249            error_report("TCG: ARMv%d is unsupported; exiting", arm_arch);
2250            exit(EXIT_FAILURE);
2251        }
2252    }
2253
2254    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2255
2256    tcg_target_call_clobber_regs = 0;
2257    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2258    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2259    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2260    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2261    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2262    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2263
2264    if (use_neon_instructions) {
2265        tcg_target_available_regs[TCG_TYPE_V64]  = ALL_VECTOR_REGS;
2266        tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2267
2268        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
2269        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
2270        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
2271        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
2272        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
2273        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
2274        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
2275        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
2276        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
2277        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
2278        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
2279        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
2280    }
2281
2282    s->reserved_regs = 0;
2283    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2284    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2285    tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2286    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2287}
2288
2289static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2290                       TCGReg arg1, intptr_t arg2)
2291{
2292    switch (type) {
2293    case TCG_TYPE_I32:
2294        tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2295        return;
2296    case TCG_TYPE_V64:
2297        /* regs 1; size 8; align 8 */
2298        tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
2299        return;
2300    case TCG_TYPE_V128:
2301        /*
2302         * We have only 8-byte alignment for the stack per the ABI.
2303         * Rather than dynamically re-align the stack, it's easier
2304         * to simply not request alignment beyond that.  So:
2305         * regs 2; size 8; align 8
2306         */
2307        tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2);
2308        return;
2309    default:
2310        g_assert_not_reached();
2311    }
2312}
2313
2314static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2315                       TCGReg arg1, intptr_t arg2)
2316{
2317    switch (type) {
2318    case TCG_TYPE_I32:
2319        tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2320        return;
2321    case TCG_TYPE_V64:
2322        /* regs 1; size 8; align 8 */
2323        tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
2324        return;
2325    case TCG_TYPE_V128:
2326        /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */
2327        tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2);
2328        return;
2329    default:
2330        g_assert_not_reached();
2331    }
2332}
2333
2334static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2335                        TCGReg base, intptr_t ofs)
2336{
2337    return false;
2338}
2339
2340static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
2341{
2342    if (ret == arg) {
2343        return true;
2344    }
2345    switch (type) {
2346    case TCG_TYPE_I32:
2347        if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
2348            tcg_out_mov_reg(s, COND_AL, ret, arg);
2349            return true;
2350        }
2351        return false;
2352
2353    case TCG_TYPE_V64:
2354    case TCG_TYPE_V128:
2355        /* "VMOV D,N" is an alias for "VORR D,N,N". */
2356        tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
2357        return true;
2358
2359    default:
2360        g_assert_not_reached();
2361    }
2362}
2363
2364static void tcg_out_movi(TCGContext *s, TCGType type,
2365                         TCGReg ret, tcg_target_long arg)
2366{
2367    tcg_debug_assert(type == TCG_TYPE_I32);
2368    tcg_debug_assert(ret < TCG_REG_Q0);
2369    tcg_out_movi32(s, COND_AL, ret, arg);
2370}
2371
2372static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
2373{
2374    return false;
2375}
2376
2377static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
2378                             tcg_target_long imm)
2379{
2380    int enc, opc = ARITH_ADD;
2381
2382    /* All of the easiest immediates to encode are positive. */
2383    if (imm < 0) {
2384        imm = -imm;
2385        opc = ARITH_SUB;
2386    }
2387    enc = encode_imm(imm);
2388    if (enc >= 0) {
2389        tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc);
2390    } else {
2391        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm);
2392        tcg_out_dat_reg(s, COND_AL, opc, rd, rs,
2393                        TCG_REG_TMP, SHIFT_IMM_LSL(0));
2394    }
2395}
2396
2397/* Type is always V128, with I64 elements.  */
2398static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
2399{
2400    /* Move high element into place first. */
2401    /* VMOV Dd+1, Ds */
2402    tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
2403    /* Move low element into place; tcg_out_mov will check for nop. */
2404    tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
2405}
2406
2407static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2408                            TCGReg rd, TCGReg rs)
2409{
2410    int q = type - TCG_TYPE_V64;
2411
2412    if (vece == MO_64) {
2413        if (type == TCG_TYPE_V128) {
2414            tcg_out_dup2_vec(s, rd, rs, rs);
2415        } else {
2416            tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
2417        }
2418    } else if (rs < TCG_REG_Q0) {
2419        int b = (vece == MO_8);
2420        int e = (vece == MO_16);
2421        tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
2422                  encode_vn(rd) | (rs << 12));
2423    } else {
2424        int imm4 = 1 << vece;
2425        tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
2426                  encode_vd(rd) | encode_vm(rs));
2427    }
2428    return true;
2429}
2430
2431static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2432                             TCGReg rd, TCGReg base, intptr_t offset)
2433{
2434    if (vece == MO_64) {
2435        tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
2436        if (type == TCG_TYPE_V128) {
2437            tcg_out_dup2_vec(s, rd, rd, rd);
2438        }
2439    } else {
2440        int q = type - TCG_TYPE_V64;
2441        tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
2442                      rd, base, offset);
2443    }
2444    return true;
2445}
2446
2447static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2448                             TCGReg rd, int64_t v64)
2449{
2450    int q = type - TCG_TYPE_V64;
2451    int cmode, imm8, i;
2452
2453    /* Test all bytes equal first.  */
2454    if (vece == MO_8) {
2455        tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
2456        return;
2457    }
2458
2459    /*
2460     * Test all bytes 0x00 or 0xff second.  This can match cases that
2461     * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
2462     */
2463    for (i = imm8 = 0; i < 8; i++) {
2464        uint8_t byte = v64 >> (i * 8);
2465        if (byte == 0xff) {
2466            imm8 |= 1 << i;
2467        } else if (byte != 0) {
2468            goto fail_bytes;
2469        }
2470    }
2471    tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
2472    return;
2473 fail_bytes:
2474
2475    /*
2476     * Tests for various replications.  For each element width, if we
2477     * cannot find an expansion there's no point checking a larger
2478     * width because we already know by replication it cannot match.
2479     */
2480    if (vece == MO_16) {
2481        uint16_t v16 = v64;
2482
2483        if (is_shimm16(v16, &cmode, &imm8)) {
2484            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2485            return;
2486        }
2487        if (is_shimm16(~v16, &cmode, &imm8)) {
2488            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2489            return;
2490        }
2491
2492        /*
2493         * Otherwise, all remaining constants can be loaded in two insns:
2494         * rd = v16 & 0xff, rd |= v16 & 0xff00.
2495         */
2496        tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
2497        tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8);   /* VORRI */
2498        return;
2499    }
2500
2501    if (vece == MO_32) {
2502        uint32_t v32 = v64;
2503
2504        if (is_shimm32(v32, &cmode, &imm8) ||
2505            is_soimm32(v32, &cmode, &imm8)) {
2506            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2507            return;
2508        }
2509        if (is_shimm32(~v32, &cmode, &imm8) ||
2510            is_soimm32(~v32, &cmode, &imm8)) {
2511            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2512            return;
2513        }
2514
2515        /*
2516         * Restrict the set of constants to those we can load with
2517         * two instructions.  Others we load from the pool.
2518         */
2519        i = is_shimm32_pair(v32, &cmode, &imm8);
2520        if (i) {
2521            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2522            tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
2523            return;
2524        }
2525        i = is_shimm32_pair(~v32, &cmode, &imm8);
2526        if (i) {
2527            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2528            tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
2529            return;
2530        }
2531    }
2532
2533    /*
2534     * As a last resort, load from the constant pool.
2535     */
2536    if (!q || vece == MO_64) {
2537        new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
2538        /* VLDR Dd, [pc + offset] */
2539        tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
2540        if (q) {
2541            tcg_out_dup2_vec(s, rd, rd, rd);
2542        }
2543    } else {
2544        new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
2545        /* add tmp, pc, offset */
2546        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
2547        tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
2548    }
2549}
2550
2551static const ARMInsn vec_cmp_insn[16] = {
2552    [TCG_COND_EQ] = INSN_VCEQ,
2553    [TCG_COND_GT] = INSN_VCGT,
2554    [TCG_COND_GE] = INSN_VCGE,
2555    [TCG_COND_GTU] = INSN_VCGT_U,
2556    [TCG_COND_GEU] = INSN_VCGE_U,
2557};
2558
2559static const ARMInsn vec_cmp0_insn[16] = {
2560    [TCG_COND_EQ] = INSN_VCEQ0,
2561    [TCG_COND_GT] = INSN_VCGT0,
2562    [TCG_COND_GE] = INSN_VCGE0,
2563    [TCG_COND_LT] = INSN_VCLT0,
2564    [TCG_COND_LE] = INSN_VCLE0,
2565};
2566
2567static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2568                           unsigned vecl, unsigned vece,
2569                           const TCGArg args[TCG_MAX_OP_ARGS],
2570                           const int const_args[TCG_MAX_OP_ARGS])
2571{
2572    TCGType type = vecl + TCG_TYPE_V64;
2573    unsigned q = vecl;
2574    TCGArg a0, a1, a2, a3;
2575    int cmode, imm8;
2576
2577    a0 = args[0];
2578    a1 = args[1];
2579    a2 = args[2];
2580
2581    switch (opc) {
2582    case INDEX_op_ld_vec:
2583        tcg_out_ld(s, type, a0, a1, a2);
2584        return;
2585    case INDEX_op_st_vec:
2586        tcg_out_st(s, type, a0, a1, a2);
2587        return;
2588    case INDEX_op_dupm_vec:
2589        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2590        return;
2591    case INDEX_op_dup2_vec:
2592        tcg_out_dup2_vec(s, a0, a1, a2);
2593        return;
2594    case INDEX_op_abs_vec:
2595        tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
2596        return;
2597    case INDEX_op_neg_vec:
2598        tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
2599        return;
2600    case INDEX_op_not_vec:
2601        tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
2602        return;
2603    case INDEX_op_add_vec:
2604        tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
2605        return;
2606    case INDEX_op_mul_vec:
2607        tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
2608        return;
2609    case INDEX_op_smax_vec:
2610        tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
2611        return;
2612    case INDEX_op_smin_vec:
2613        tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
2614        return;
2615    case INDEX_op_sub_vec:
2616        tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
2617        return;
2618    case INDEX_op_ssadd_vec:
2619        tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
2620        return;
2621    case INDEX_op_sssub_vec:
2622        tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
2623        return;
2624    case INDEX_op_umax_vec:
2625        tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
2626        return;
2627    case INDEX_op_umin_vec:
2628        tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
2629        return;
2630    case INDEX_op_usadd_vec:
2631        tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
2632        return;
2633    case INDEX_op_ussub_vec:
2634        tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
2635        return;
2636    case INDEX_op_xor_vec:
2637        tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
2638        return;
2639    case INDEX_op_arm_sshl_vec:
2640        /*
2641         * Note that Vm is the data and Vn is the shift count,
2642         * therefore the arguments appear reversed.
2643         */
2644        tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
2645        return;
2646    case INDEX_op_arm_ushl_vec:
2647        /* See above. */
2648        tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
2649        return;
2650    case INDEX_op_shli_vec:
2651        tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
2652        return;
2653    case INDEX_op_shri_vec:
2654        tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
2655        return;
2656    case INDEX_op_sari_vec:
2657        tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
2658        return;
2659    case INDEX_op_arm_sli_vec:
2660        tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
2661        return;
2662
2663    case INDEX_op_andc_vec:
2664        if (!const_args[2]) {
2665            tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
2666            return;
2667        }
2668        a2 = ~a2;
2669        /* fall through */
2670    case INDEX_op_and_vec:
2671        if (const_args[2]) {
2672            is_shimm1632(~a2, &cmode, &imm8);
2673            if (a0 == a1) {
2674                tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
2675                return;
2676            }
2677            tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
2678            a2 = a0;
2679        }
2680        tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
2681        return;
2682
2683    case INDEX_op_orc_vec:
2684        if (!const_args[2]) {
2685            tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
2686            return;
2687        }
2688        a2 = ~a2;
2689        /* fall through */
2690    case INDEX_op_or_vec:
2691        if (const_args[2]) {
2692            is_shimm1632(a2, &cmode, &imm8);
2693            if (a0 == a1) {
2694                tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
2695                return;
2696            }
2697            tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
2698            a2 = a0;
2699        }
2700        tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
2701        return;
2702
2703    case INDEX_op_cmp_vec:
2704        {
2705            TCGCond cond = args[3];
2706
2707            if (cond == TCG_COND_NE) {
2708                if (const_args[2]) {
2709                    tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
2710                } else {
2711                    tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
2712                    tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2713                }
2714            } else {
2715                ARMInsn insn;
2716
2717                if (const_args[2]) {
2718                    insn = vec_cmp0_insn[cond];
2719                    if (insn) {
2720                        tcg_out_vreg2(s, insn, q, vece, a0, a1);
2721                        return;
2722                    }
2723                    tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2724                    a2 = TCG_VEC_TMP;
2725                }
2726                insn = vec_cmp_insn[cond];
2727                if (insn == 0) {
2728                    TCGArg t;
2729                    t = a1, a1 = a2, a2 = t;
2730                    cond = tcg_swap_cond(cond);
2731                    insn = vec_cmp_insn[cond];
2732                    tcg_debug_assert(insn != 0);
2733                }
2734                tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
2735            }
2736        }
2737        return;
2738
2739    case INDEX_op_bitsel_vec:
2740        a3 = args[3];
2741        if (a0 == a3) {
2742            tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
2743        } else if (a0 == a2) {
2744            tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
2745        } else {
2746            tcg_out_mov(s, type, a0, a1);
2747            tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
2748        }
2749        return;
2750
2751    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
2752    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
2753    default:
2754        g_assert_not_reached();
2755    }
2756}
2757
2758int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2759{
2760    switch (opc) {
2761    case INDEX_op_add_vec:
2762    case INDEX_op_sub_vec:
2763    case INDEX_op_and_vec:
2764    case INDEX_op_andc_vec:
2765    case INDEX_op_or_vec:
2766    case INDEX_op_orc_vec:
2767    case INDEX_op_xor_vec:
2768    case INDEX_op_not_vec:
2769    case INDEX_op_shli_vec:
2770    case INDEX_op_shri_vec:
2771    case INDEX_op_sari_vec:
2772    case INDEX_op_ssadd_vec:
2773    case INDEX_op_sssub_vec:
2774    case INDEX_op_usadd_vec:
2775    case INDEX_op_ussub_vec:
2776    case INDEX_op_bitsel_vec:
2777        return 1;
2778    case INDEX_op_abs_vec:
2779    case INDEX_op_cmp_vec:
2780    case INDEX_op_mul_vec:
2781    case INDEX_op_neg_vec:
2782    case INDEX_op_smax_vec:
2783    case INDEX_op_smin_vec:
2784    case INDEX_op_umax_vec:
2785    case INDEX_op_umin_vec:
2786        return vece < MO_64;
2787    case INDEX_op_shlv_vec:
2788    case INDEX_op_shrv_vec:
2789    case INDEX_op_sarv_vec:
2790    case INDEX_op_rotli_vec:
2791    case INDEX_op_rotlv_vec:
2792    case INDEX_op_rotrv_vec:
2793        return -1;
2794    default:
2795        return 0;
2796    }
2797}
2798
2799void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2800                       TCGArg a0, ...)
2801{
2802    va_list va;
2803    TCGv_vec v0, v1, v2, t1, t2, c1;
2804    TCGArg a2;
2805
2806    va_start(va, a0);
2807    v0 = temp_tcgv_vec(arg_temp(a0));
2808    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
2809    a2 = va_arg(va, TCGArg);
2810    va_end(va);
2811
2812    switch (opc) {
2813    case INDEX_op_shlv_vec:
2814        /*
2815         * Merely propagate shlv_vec to arm_ushl_vec.
2816         * In this way we don't set TCG_TARGET_HAS_shv_vec
2817         * because everything is done via expansion.
2818         */
2819        v2 = temp_tcgv_vec(arg_temp(a2));
2820        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
2821                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
2822        break;
2823
2824    case INDEX_op_shrv_vec:
2825    case INDEX_op_sarv_vec:
2826        /* Right shifts are negative left shifts for NEON.  */
2827        v2 = temp_tcgv_vec(arg_temp(a2));
2828        t1 = tcg_temp_new_vec(type);
2829        tcg_gen_neg_vec(vece, t1, v2);
2830        if (opc == INDEX_op_shrv_vec) {
2831            opc = INDEX_op_arm_ushl_vec;
2832        } else {
2833            opc = INDEX_op_arm_sshl_vec;
2834        }
2835        vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
2836                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2837        tcg_temp_free_vec(t1);
2838        break;
2839
2840    case INDEX_op_rotli_vec:
2841        t1 = tcg_temp_new_vec(type);
2842        tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
2843        vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
2844                  tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
2845        tcg_temp_free_vec(t1);
2846        break;
2847
2848    case INDEX_op_rotlv_vec:
2849        v2 = temp_tcgv_vec(arg_temp(a2));
2850        t1 = tcg_temp_new_vec(type);
2851        c1 = tcg_constant_vec(type, vece, 8 << vece);
2852        tcg_gen_sub_vec(vece, t1, v2, c1);
2853        /* Right shifts are negative left shifts for NEON.  */
2854        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
2855                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2856        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
2857                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
2858        tcg_gen_or_vec(vece, v0, v0, t1);
2859        tcg_temp_free_vec(t1);
2860        break;
2861
2862    case INDEX_op_rotrv_vec:
2863        v2 = temp_tcgv_vec(arg_temp(a2));
2864        t1 = tcg_temp_new_vec(type);
2865        t2 = tcg_temp_new_vec(type);
2866        c1 = tcg_constant_vec(type, vece, 8 << vece);
2867        tcg_gen_neg_vec(vece, t1, v2);
2868        tcg_gen_sub_vec(vece, t2, c1, v2);
2869        /* Right shifts are negative left shifts for NEON.  */
2870        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
2871                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
2872        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
2873                  tcgv_vec_arg(v1), tcgv_vec_arg(t2));
2874        tcg_gen_or_vec(vece, v0, t1, t2);
2875        tcg_temp_free_vec(t1);
2876        tcg_temp_free_vec(t2);
2877        break;
2878
2879    default:
2880        g_assert_not_reached();
2881    }
2882}
2883
2884static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2885{
2886    int i;
2887    for (i = 0; i < count; ++i) {
2888        p[i] = INSN_NOP;
2889    }
2890}
2891
2892/* Compute frame size via macros, to share between tcg_target_qemu_prologue
2893   and tcg_register_jit.  */
2894
2895#define PUSH_SIZE  ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
2896
2897#define FRAME_SIZE \
2898    ((PUSH_SIZE \
2899      + TCG_STATIC_CALL_ARGS_SIZE \
2900      + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2901      + TCG_TARGET_STACK_ALIGN - 1) \
2902     & -TCG_TARGET_STACK_ALIGN)
2903
2904#define STACK_ADDEND  (FRAME_SIZE - PUSH_SIZE)
2905
2906static void tcg_target_qemu_prologue(TCGContext *s)
2907{
2908    /* Calling convention requires us to save r4-r11 and lr.  */
2909    /* stmdb sp!, { r4 - r11, lr } */
2910    tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
2911                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
2912                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
2913                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
2914
2915    /* Reserve callee argument and tcg temp space.  */
2916    tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2917                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
2918    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2919                  CPU_TEMP_BUF_NLONGS * sizeof(long));
2920
2921    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2922
2923#ifndef CONFIG_SOFTMMU
2924    if (guest_base) {
2925        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
2926        tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
2927    }
2928#endif
2929
2930    tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
2931
2932    /*
2933     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2934     * and fall through to the rest of the epilogue.
2935     */
2936    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2937    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
2938    tcg_out_epilogue(s);
2939}
2940
2941static void tcg_out_epilogue(TCGContext *s)
2942{
2943    /* Release local stack frame.  */
2944    tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
2945                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
2946
2947    /* ldmia sp!, { r4 - r11, pc } */
2948    tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
2949                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
2950                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
2951                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
2952}
2953
2954typedef struct {
2955    DebugFrameHeader h;
2956    uint8_t fde_def_cfa[4];
2957    uint8_t fde_reg_ofs[18];
2958} DebugFrame;
2959
2960#define ELF_HOST_MACHINE EM_ARM
2961
2962/* We're expecting a 2 byte uleb128 encoded value.  */
2963QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2964
2965static const DebugFrame debug_frame = {
2966    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2967    .h.cie.id = -1,
2968    .h.cie.version = 1,
2969    .h.cie.code_align = 1,
2970    .h.cie.data_align = 0x7c,             /* sleb128 -4 */
2971    .h.cie.return_column = 14,
2972
2973    /* Total FDE size does not include the "len" member.  */
2974    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2975
2976    .fde_def_cfa = {
2977        12, 13,                         /* DW_CFA_def_cfa sp, ... */
2978        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
2979        (FRAME_SIZE >> 7)
2980    },
2981    .fde_reg_ofs = {
2982        /* The following must match the stmdb in the prologue.  */
2983        0x8e, 1,                        /* DW_CFA_offset, lr, -4 */
2984        0x8b, 2,                        /* DW_CFA_offset, r11, -8 */
2985        0x8a, 3,                        /* DW_CFA_offset, r10, -12 */
2986        0x89, 4,                        /* DW_CFA_offset, r9, -16 */
2987        0x88, 5,                        /* DW_CFA_offset, r8, -20 */
2988        0x87, 6,                        /* DW_CFA_offset, r7, -24 */
2989        0x86, 7,                        /* DW_CFA_offset, r6, -28 */
2990        0x85, 8,                        /* DW_CFA_offset, r5, -32 */
2991        0x84, 9,                        /* DW_CFA_offset, r4, -36 */
2992    }
2993};
2994
2995void tcg_register_jit(const void *buf, size_t buf_size)
2996{
2997    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2998}
2999