xref: /qemu/tcg/arm/tcg-target.c.inc (revision 29b62a10)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "elf.h"
26#include "../tcg-ldst.c.inc"
27#include "../tcg-pool.c.inc"
28
29int arm_arch = __ARM_ARCH;
30
31#ifndef use_idiv_instructions
32bool use_idiv_instructions;
33#endif
34#ifndef use_neon_instructions
35bool use_neon_instructions;
36#endif
37
38#ifdef CONFIG_DEBUG_TCG
39static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
40    "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
41    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%sp",  "%r14", "%pc",
42    "%q0",  "%q1",  "%q2",  "%q3",  "%q4",  "%q5",  "%q6",  "%q7",
43    "%q8",  "%q9",  "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
44};
45#endif
46
47static const int tcg_target_reg_alloc_order[] = {
48    TCG_REG_R4,
49    TCG_REG_R5,
50    TCG_REG_R6,
51    TCG_REG_R7,
52    TCG_REG_R8,
53    TCG_REG_R9,
54    TCG_REG_R10,
55    TCG_REG_R11,
56    TCG_REG_R13,
57    TCG_REG_R0,
58    TCG_REG_R1,
59    TCG_REG_R2,
60    TCG_REG_R3,
61    TCG_REG_R12,
62    TCG_REG_R14,
63
64    TCG_REG_Q0,
65    TCG_REG_Q1,
66    TCG_REG_Q2,
67    TCG_REG_Q3,
68    /* Q4 - Q7 are call-saved, and skipped. */
69    TCG_REG_Q8,
70    TCG_REG_Q9,
71    TCG_REG_Q10,
72    TCG_REG_Q11,
73    TCG_REG_Q12,
74    TCG_REG_Q13,
75    TCG_REG_Q14,
76    TCG_REG_Q15,
77};
78
79static const int tcg_target_call_iarg_regs[4] = {
80    TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
81};
82static const int tcg_target_call_oarg_regs[2] = {
83    TCG_REG_R0, TCG_REG_R1
84};
85
86#define TCG_REG_TMP  TCG_REG_R12
87#define TCG_VEC_TMP  TCG_REG_Q15
88#ifndef CONFIG_SOFTMMU
89#define TCG_REG_GUEST_BASE  TCG_REG_R11
90#endif
91
92typedef enum {
93    COND_EQ = 0x0,
94    COND_NE = 0x1,
95    COND_CS = 0x2,	/* Unsigned greater or equal */
96    COND_CC = 0x3,	/* Unsigned less than */
97    COND_MI = 0x4,	/* Negative */
98    COND_PL = 0x5,	/* Zero or greater */
99    COND_VS = 0x6,	/* Overflow */
100    COND_VC = 0x7,	/* No overflow */
101    COND_HI = 0x8,	/* Unsigned greater than */
102    COND_LS = 0x9,	/* Unsigned less or equal */
103    COND_GE = 0xa,
104    COND_LT = 0xb,
105    COND_GT = 0xc,
106    COND_LE = 0xd,
107    COND_AL = 0xe,
108} ARMCond;
109
110#define TO_CPSR (1 << 20)
111
112#define SHIFT_IMM_LSL(im)	(((im) << 7) | 0x00)
113#define SHIFT_IMM_LSR(im)	(((im) << 7) | 0x20)
114#define SHIFT_IMM_ASR(im)	(((im) << 7) | 0x40)
115#define SHIFT_IMM_ROR(im)	(((im) << 7) | 0x60)
116#define SHIFT_REG_LSL(rs)	(((rs) << 8) | 0x10)
117#define SHIFT_REG_LSR(rs)	(((rs) << 8) | 0x30)
118#define SHIFT_REG_ASR(rs)	(((rs) << 8) | 0x50)
119#define SHIFT_REG_ROR(rs)	(((rs) << 8) | 0x70)
120
121typedef enum {
122    ARITH_AND = 0x0 << 21,
123    ARITH_EOR = 0x1 << 21,
124    ARITH_SUB = 0x2 << 21,
125    ARITH_RSB = 0x3 << 21,
126    ARITH_ADD = 0x4 << 21,
127    ARITH_ADC = 0x5 << 21,
128    ARITH_SBC = 0x6 << 21,
129    ARITH_RSC = 0x7 << 21,
130    ARITH_TST = 0x8 << 21 | TO_CPSR,
131    ARITH_CMP = 0xa << 21 | TO_CPSR,
132    ARITH_CMN = 0xb << 21 | TO_CPSR,
133    ARITH_ORR = 0xc << 21,
134    ARITH_MOV = 0xd << 21,
135    ARITH_BIC = 0xe << 21,
136    ARITH_MVN = 0xf << 21,
137
138    INSN_B         = 0x0a000000,
139
140    INSN_CLZ       = 0x016f0f10,
141    INSN_RBIT      = 0x06ff0f30,
142
143    INSN_LDMIA     = 0x08b00000,
144    INSN_STMDB     = 0x09200000,
145
146    INSN_LDR_IMM   = 0x04100000,
147    INSN_LDR_REG   = 0x06100000,
148    INSN_STR_IMM   = 0x04000000,
149    INSN_STR_REG   = 0x06000000,
150
151    INSN_LDRH_IMM  = 0x005000b0,
152    INSN_LDRH_REG  = 0x001000b0,
153    INSN_LDRSH_IMM = 0x005000f0,
154    INSN_LDRSH_REG = 0x001000f0,
155    INSN_STRH_IMM  = 0x004000b0,
156    INSN_STRH_REG  = 0x000000b0,
157
158    INSN_LDRB_IMM  = 0x04500000,
159    INSN_LDRB_REG  = 0x06500000,
160    INSN_LDRSB_IMM = 0x005000d0,
161    INSN_LDRSB_REG = 0x001000d0,
162    INSN_STRB_IMM  = 0x04400000,
163    INSN_STRB_REG  = 0x06400000,
164
165    INSN_LDRD_IMM  = 0x004000d0,
166    INSN_LDRD_REG  = 0x000000d0,
167    INSN_STRD_IMM  = 0x004000f0,
168    INSN_STRD_REG  = 0x000000f0,
169
170    INSN_DMB_ISH   = 0xf57ff05b,
171    INSN_DMB_MCR   = 0xee070fba,
172
173    /* Architected nop introduced in v6k.  */
174    /* ??? This is an MSR (imm) 0,0,0 insn.  Anyone know if this
175       also Just So Happened to do nothing on pre-v6k so that we
176       don't need to conditionalize it?  */
177    INSN_NOP_v6k   = 0xe320f000,
178    /* Otherwise the assembler uses mov r0,r0 */
179    INSN_NOP_v4    = (COND_AL << 28) | ARITH_MOV,
180
181    INSN_VADD      = 0xf2000800,
182    INSN_VAND      = 0xf2000110,
183    INSN_VBIC      = 0xf2100110,
184    INSN_VEOR      = 0xf3000110,
185    INSN_VORN      = 0xf2300110,
186    INSN_VORR      = 0xf2200110,
187    INSN_VSUB      = 0xf3000800,
188    INSN_VMUL      = 0xf2000910,
189    INSN_VQADD     = 0xf2000010,
190    INSN_VQADD_U   = 0xf3000010,
191    INSN_VQSUB     = 0xf2000210,
192    INSN_VQSUB_U   = 0xf3000210,
193    INSN_VMAX      = 0xf2000600,
194    INSN_VMAX_U    = 0xf3000600,
195    INSN_VMIN      = 0xf2000610,
196    INSN_VMIN_U    = 0xf3000610,
197
198    INSN_VABS      = 0xf3b10300,
199    INSN_VMVN      = 0xf3b00580,
200    INSN_VNEG      = 0xf3b10380,
201
202    INSN_VCEQ0     = 0xf3b10100,
203    INSN_VCGT0     = 0xf3b10000,
204    INSN_VCGE0     = 0xf3b10080,
205    INSN_VCLE0     = 0xf3b10180,
206    INSN_VCLT0     = 0xf3b10200,
207
208    INSN_VCEQ      = 0xf3000810,
209    INSN_VCGE      = 0xf2000310,
210    INSN_VCGT      = 0xf2000300,
211    INSN_VCGE_U    = 0xf3000310,
212    INSN_VCGT_U    = 0xf3000300,
213
214    INSN_VSHLI     = 0xf2800510,  /* VSHL (immediate) */
215    INSN_VSARI     = 0xf2800010,  /* VSHR.S */
216    INSN_VSHRI     = 0xf3800010,  /* VSHR.U */
217    INSN_VSLI      = 0xf3800510,
218    INSN_VSHL_S    = 0xf2000400,  /* VSHL.S (register) */
219    INSN_VSHL_U    = 0xf3000400,  /* VSHL.U (register) */
220
221    INSN_VBSL      = 0xf3100110,
222    INSN_VBIT      = 0xf3200110,
223    INSN_VBIF      = 0xf3300110,
224
225    INSN_VTST      = 0xf2000810,
226
227    INSN_VDUP_G    = 0xee800b10,  /* VDUP (ARM core register) */
228    INSN_VDUP_S    = 0xf3b00c00,  /* VDUP (scalar) */
229    INSN_VLDR_D    = 0xed100b00,  /* VLDR.64 */
230    INSN_VLD1      = 0xf4200000,  /* VLD1 (multiple single elements) */
231    INSN_VLD1R     = 0xf4a00c00,  /* VLD1 (single element to all lanes) */
232    INSN_VST1      = 0xf4000000,  /* VST1 (multiple single elements) */
233    INSN_VMOVI     = 0xf2800010,  /* VMOV (immediate) */
234} ARMInsn;
235
236#define INSN_NOP   (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
237
238static const uint8_t tcg_cond_to_arm_cond[] = {
239    [TCG_COND_EQ] = COND_EQ,
240    [TCG_COND_NE] = COND_NE,
241    [TCG_COND_LT] = COND_LT,
242    [TCG_COND_GE] = COND_GE,
243    [TCG_COND_LE] = COND_LE,
244    [TCG_COND_GT] = COND_GT,
245    /* unsigned */
246    [TCG_COND_LTU] = COND_CC,
247    [TCG_COND_GEU] = COND_CS,
248    [TCG_COND_LEU] = COND_LS,
249    [TCG_COND_GTU] = COND_HI,
250};
251
252static int encode_imm(uint32_t imm);
253
254/* TCG private relocation type: add with pc+imm8 */
255#define R_ARM_PC8  11
256
257/* TCG private relocation type: vldr with imm8 << 2 */
258#define R_ARM_PC11 12
259
260static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
261{
262    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
263    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
264
265    if (offset == sextract32(offset, 0, 24)) {
266        *src_rw = deposit32(*src_rw, 0, 24, offset);
267        return true;
268    }
269    return false;
270}
271
272static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
273{
274    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
275    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
276
277    if (offset >= -0xfff && offset <= 0xfff) {
278        tcg_insn_unit insn = *src_rw;
279        bool u = (offset >= 0);
280        if (!u) {
281            offset = -offset;
282        }
283        insn = deposit32(insn, 23, 1, u);
284        insn = deposit32(insn, 0, 12, offset);
285        *src_rw = insn;
286        return true;
287    }
288    return false;
289}
290
291static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
292{
293    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
294    ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
295
296    if (offset >= -0xff && offset <= 0xff) {
297        tcg_insn_unit insn = *src_rw;
298        bool u = (offset >= 0);
299        if (!u) {
300            offset = -offset;
301        }
302        insn = deposit32(insn, 23, 1, u);
303        insn = deposit32(insn, 0, 8, offset);
304        *src_rw = insn;
305        return true;
306    }
307    return false;
308}
309
310static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
311{
312    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
313    ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
314    int imm12 = encode_imm(offset);
315
316    if (imm12 >= 0) {
317        *src_rw = deposit32(*src_rw, 0, 12, imm12);
318        return true;
319    }
320    return false;
321}
322
323static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
324                        intptr_t value, intptr_t addend)
325{
326    tcg_debug_assert(addend == 0);
327    switch (type) {
328    case R_ARM_PC24:
329        return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
330    case R_ARM_PC13:
331        return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
332    case R_ARM_PC11:
333        return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
334    case R_ARM_PC8:
335        return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
336    default:
337        g_assert_not_reached();
338    }
339}
340
341#define TCG_CT_CONST_ARM  0x100
342#define TCG_CT_CONST_INV  0x200
343#define TCG_CT_CONST_NEG  0x400
344#define TCG_CT_CONST_ZERO 0x800
345#define TCG_CT_CONST_ORRI 0x1000
346#define TCG_CT_CONST_ANDI 0x2000
347
348#define ALL_GENERAL_REGS  0xffffu
349#define ALL_VECTOR_REGS   0xffff0000u
350
351/*
352 * r0-r2 will be overwritten when reading the tlb entry (softmmu only)
353 * and r0-r1 doing the byte swapping, so don't use these.
354 * r3 is removed for softmmu to avoid clashes with helper arguments.
355 */
356#ifdef CONFIG_SOFTMMU
357#define ALL_QLOAD_REGS \
358    (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
359                          (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \
360                          (1 << TCG_REG_R14)))
361#define ALL_QSTORE_REGS \
362    (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
363                          (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \
364                          ((TARGET_LONG_BITS == 64) << TCG_REG_R3)))
365#else
366#define ALL_QLOAD_REGS   ALL_GENERAL_REGS
367#define ALL_QSTORE_REGS \
368    (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1)))
369#endif
370
371/*
372 * ARM immediates for ALU instructions are made of an unsigned 8-bit
373 * right-rotated by an even amount between 0 and 30.
374 *
375 * Return < 0 if @imm cannot be encoded, else the entire imm12 field.
376 */
377static int encode_imm(uint32_t imm)
378{
379    uint32_t rot, imm8;
380
381    /* Simple case, no rotation required. */
382    if ((imm & ~0xff) == 0) {
383        return imm;
384    }
385
386    /* Next, try a simple even shift.  */
387    rot = ctz32(imm) & ~1;
388    imm8 = imm >> rot;
389    rot = 32 - rot;
390    if ((imm8 & ~0xff) == 0) {
391        goto found;
392    }
393
394    /*
395     * Finally, try harder with rotations.
396     * The ctz test above will have taken care of rotates >= 8.
397     */
398    for (rot = 2; rot < 8; rot += 2) {
399        imm8 = rol32(imm, rot);
400        if ((imm8 & ~0xff) == 0) {
401            goto found;
402        }
403    }
404    /* Fail: imm cannot be encoded. */
405    return -1;
406
407 found:
408    /* Note that rot is even, and we discard bit 0 by shifting by 7. */
409    return rot << 7 | imm8;
410}
411
412static int encode_imm_nofail(uint32_t imm)
413{
414    int ret = encode_imm(imm);
415    tcg_debug_assert(ret >= 0);
416    return ret;
417}
418
419static bool check_fit_imm(uint32_t imm)
420{
421    return encode_imm(imm) >= 0;
422}
423
424/* Return true if v16 is a valid 16-bit shifted immediate.  */
425static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
426{
427    if (v16 == (v16 & 0xff)) {
428        *cmode = 0x8;
429        *imm8 = v16 & 0xff;
430        return true;
431    } else if (v16 == (v16 & 0xff00)) {
432        *cmode = 0xa;
433        *imm8 = v16 >> 8;
434        return true;
435    }
436    return false;
437}
438
439/* Return true if v32 is a valid 32-bit shifted immediate.  */
440static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
441{
442    if (v32 == (v32 & 0xff)) {
443        *cmode = 0x0;
444        *imm8 = v32 & 0xff;
445        return true;
446    } else if (v32 == (v32 & 0xff00)) {
447        *cmode = 0x2;
448        *imm8 = (v32 >> 8) & 0xff;
449        return true;
450    } else if (v32 == (v32 & 0xff0000)) {
451        *cmode = 0x4;
452        *imm8 = (v32 >> 16) & 0xff;
453        return true;
454    } else if (v32 == (v32 & 0xff000000)) {
455        *cmode = 0x6;
456        *imm8 = v32 >> 24;
457        return true;
458    }
459    return false;
460}
461
462/* Return true if v32 is a valid 32-bit shifting ones immediate.  */
463static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
464{
465    if ((v32 & 0xffff00ff) == 0xff) {
466        *cmode = 0xc;
467        *imm8 = (v32 >> 8) & 0xff;
468        return true;
469    } else if ((v32 & 0xff00ffff) == 0xffff) {
470        *cmode = 0xd;
471        *imm8 = (v32 >> 16) & 0xff;
472        return true;
473    }
474    return false;
475}
476
477/*
478 * Return non-zero if v32 can be formed by MOVI+ORR.
479 * Place the parameters for MOVI in (cmode, imm8).
480 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
481 */
482static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
483{
484    int i;
485
486    for (i = 6; i > 0; i -= 2) {
487        /* Mask out one byte we can add with ORR.  */
488        uint32_t tmp = v32 & ~(0xffu << (i * 4));
489        if (is_shimm32(tmp, cmode, imm8) ||
490            is_soimm32(tmp, cmode, imm8)) {
491            break;
492        }
493    }
494    return i;
495}
496
497/* Return true if V is a valid 16-bit or 32-bit shifted immediate.  */
498static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
499{
500    if (v32 == deposit32(v32, 16, 16, v32)) {
501        return is_shimm16(v32, cmode, imm8);
502    } else {
503        return is_shimm32(v32, cmode, imm8);
504    }
505}
506
507/* Test if a constant matches the constraint.
508 * TODO: define constraints for:
509 *
510 * ldr/str offset:   between -0xfff and 0xfff
511 * ldrh/strh offset: between -0xff and 0xff
512 * mov operand2:     values represented with x << (2 * y), x < 0x100
513 * add, sub, eor...: ditto
514 */
515static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
516{
517    if (ct & TCG_CT_CONST) {
518        return 1;
519    } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
520        return 1;
521    } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
522        return 1;
523    } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
524        return 1;
525    } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
526        return 1;
527    }
528
529    switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
530    case 0:
531        break;
532    case TCG_CT_CONST_ANDI:
533        val = ~val;
534        /* fallthru */
535    case TCG_CT_CONST_ORRI:
536        if (val == deposit64(val, 32, 32, val)) {
537            int cmode, imm8;
538            return is_shimm1632(val, &cmode, &imm8);
539        }
540        break;
541    default:
542        /* Both bits should not be set for the same insn.  */
543        g_assert_not_reached();
544    }
545
546    return 0;
547}
548
549static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
550{
551    tcg_out32(s, (cond << 28) | INSN_B |
552                    (((offset - 8) >> 2) & 0x00ffffff));
553}
554
555static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
556{
557    tcg_out32(s, (cond << 28) | 0x0b000000 |
558                    (((offset - 8) >> 2) & 0x00ffffff));
559}
560
561static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
562{
563    tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
564}
565
566static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
567{
568    tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
569                (((offset - 8) >> 2) & 0x00ffffff));
570}
571
572static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
573                            TCGReg rd, TCGReg rn, TCGReg rm, int shift)
574{
575    tcg_out32(s, (cond << 28) | (0 << 25) | opc |
576                    (rn << 16) | (rd << 12) | shift | rm);
577}
578
579static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
580{
581    /* Simple reg-reg move, optimising out the 'do nothing' case */
582    if (rd != rm) {
583        tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
584    }
585}
586
587static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
588{
589    tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
590}
591
592static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
593{
594    /*
595     * Unless the C portion of QEMU is compiled as thumb, we don't need
596     * true BX semantics; merely a branch to an address held in a register.
597     */
598    tcg_out_bx_reg(s, cond, rn);
599}
600
601static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
602                            TCGReg rd, TCGReg rn, int im)
603{
604    tcg_out32(s, (cond << 28) | (1 << 25) | opc |
605                    (rn << 16) | (rd << 12) | im);
606}
607
608static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
609                          TCGReg rn, uint16_t mask)
610{
611    tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
612}
613
614/* Note that this routine is used for both LDR and LDRH formats, so we do
615   not wish to include an immediate shift at this point.  */
616static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
617                            TCGReg rn, TCGReg rm, bool u, bool p, bool w)
618{
619    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
620              | (w << 21) | (rn << 16) | (rt << 12) | rm);
621}
622
623static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
624                            TCGReg rn, int imm8, bool p, bool w)
625{
626    bool u = 1;
627    if (imm8 < 0) {
628        imm8 = -imm8;
629        u = 0;
630    }
631    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
632              (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
633}
634
635static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
636                             TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
637{
638    bool u = 1;
639    if (imm12 < 0) {
640        imm12 = -imm12;
641        u = 0;
642    }
643    tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
644              (rn << 16) | (rt << 12) | imm12);
645}
646
647static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
648                            TCGReg rn, int imm12)
649{
650    tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
651}
652
653static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
654                            TCGReg rn, int imm12)
655{
656    tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
657}
658
659static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
660                           TCGReg rn, TCGReg rm)
661{
662    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
663}
664
665static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
666                           TCGReg rn, TCGReg rm)
667{
668    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
669}
670
671static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
672                           TCGReg rn, int imm8)
673{
674    tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
675}
676
677static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
678                           TCGReg rn, TCGReg rm)
679{
680    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
681}
682
683static void __attribute__((unused))
684tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
685{
686    tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
687}
688
689static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt,
690                           TCGReg rn, int imm8)
691{
692    tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
693}
694
695static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
696                           TCGReg rn, TCGReg rm)
697{
698    tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
699}
700
701/* Register pre-increment with base writeback.  */
702static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
703                             TCGReg rn, TCGReg rm)
704{
705    tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
706}
707
708static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
709                             TCGReg rn, TCGReg rm)
710{
711    tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
712}
713
714static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
715                            TCGReg rn, int imm8)
716{
717    tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
718}
719
720static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
721                           TCGReg rn, int imm8)
722{
723    tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
724}
725
726static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
727                            TCGReg rn, TCGReg rm)
728{
729    tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
730}
731
732static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
733                           TCGReg rn, TCGReg rm)
734{
735    tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
736}
737
738static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
739                            TCGReg rn, int imm8)
740{
741    tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
742}
743
744static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
745                            TCGReg rn, TCGReg rm)
746{
747    tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
748}
749
750static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
751                           TCGReg rn, int imm12)
752{
753    tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
754}
755
756static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
757                           TCGReg rn, int imm12)
758{
759    tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
760}
761
762static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
763                          TCGReg rn, TCGReg rm)
764{
765    tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
766}
767
768static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
769                          TCGReg rn, TCGReg rm)
770{
771    tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
772}
773
774static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
775                           TCGReg rn, int imm8)
776{
777    tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
778}
779
780static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
781                           TCGReg rn, TCGReg rm)
782{
783    tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
784}
785
786static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
787                              TCGReg rd, uint32_t arg)
788{
789    new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
790    tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
791}
792
793static void tcg_out_movi32(TCGContext *s, ARMCond cond,
794                           TCGReg rd, uint32_t arg)
795{
796    int imm12, diff, opc, sh1, sh2;
797    uint32_t tt0, tt1, tt2;
798
799    /* Check a single MOV/MVN before anything else.  */
800    imm12 = encode_imm(arg);
801    if (imm12 >= 0) {
802        tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
803        return;
804    }
805    imm12 = encode_imm(~arg);
806    if (imm12 >= 0) {
807        tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
808        return;
809    }
810
811    /* Check for a pc-relative address.  This will usually be the TB,
812       or within the TB, which is immediately before the code block.  */
813    diff = tcg_pcrel_diff(s, (void *)arg) - 8;
814    if (diff >= 0) {
815        imm12 = encode_imm(diff);
816        if (imm12 >= 0) {
817            tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
818            return;
819        }
820    } else {
821        imm12 = encode_imm(-diff);
822        if (imm12 >= 0) {
823            tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
824            return;
825        }
826    }
827
828    /* Use movw + movt.  */
829    if (use_armv7_instructions) {
830        /* movw */
831        tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
832                  | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
833        if (arg & 0xffff0000) {
834            /* movt */
835            tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
836                      | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
837        }
838        return;
839    }
840
841    /* Look for sequences of two insns.  If we have lots of 1's, we can
842       shorten the sequence by beginning with mvn and then clearing
843       higher bits with eor.  */
844    tt0 = arg;
845    opc = ARITH_MOV;
846    if (ctpop32(arg) > 16) {
847        tt0 = ~arg;
848        opc = ARITH_MVN;
849    }
850    sh1 = ctz32(tt0) & ~1;
851    tt1 = tt0 & ~(0xff << sh1);
852    sh2 = ctz32(tt1) & ~1;
853    tt2 = tt1 & ~(0xff << sh2);
854    if (tt2 == 0) {
855        int rot;
856
857        rot = ((32 - sh1) << 7) & 0xf00;
858        tcg_out_dat_imm(s, cond, opc, rd,  0, ((tt0 >> sh1) & 0xff) | rot);
859        rot = ((32 - sh2) << 7) & 0xf00;
860        tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
861                        ((tt0 >> sh2) & 0xff) | rot);
862        return;
863    }
864
865    /* Otherwise, drop it into the constant pool.  */
866    tcg_out_movi_pool(s, cond, rd, arg);
867}
868
869/*
870 * Emit either the reg,imm or reg,reg form of a data-processing insn.
871 * rhs must satisfy the "rI" constraint.
872 */
873static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
874                           TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
875{
876    if (rhs_is_const) {
877        tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
878    } else {
879        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
880    }
881}
882
883/*
884 * Emit either the reg,imm or reg,reg form of a data-processing insn.
885 * rhs must satisfy the "rIK" constraint.
886 */
887static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
888                            ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
889                            bool rhs_is_const)
890{
891    if (rhs_is_const) {
892        int imm12 = encode_imm(rhs);
893        if (imm12 < 0) {
894            imm12 = encode_imm_nofail(~rhs);
895            opc = opinv;
896        }
897        tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
898    } else {
899        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
900    }
901}
902
903static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
904                            ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
905                            bool rhs_is_const)
906{
907    /* Emit either the reg,imm or reg,reg form of a data-processing insn.
908     * rhs must satisfy the "rIN" constraint.
909     */
910    if (rhs_is_const) {
911        int imm12 = encode_imm(rhs);
912        if (imm12 < 0) {
913            imm12 = encode_imm_nofail(-rhs);
914            opc = opneg;
915        }
916        tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
917    } else {
918        tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
919    }
920}
921
922static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
923                          TCGReg rn, TCGReg rm)
924{
925    /* mul */
926    tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
927}
928
929static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
930                            TCGReg rd1, TCGReg rn, TCGReg rm)
931{
932    /* umull */
933    tcg_out32(s, (cond << 28) | 0x00800090 |
934              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
935}
936
937static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
938                            TCGReg rd1, TCGReg rn, TCGReg rm)
939{
940    /* smull */
941    tcg_out32(s, (cond << 28) | 0x00c00090 |
942              (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
943}
944
945static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
946                         TCGReg rd, TCGReg rn, TCGReg rm)
947{
948    tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
949}
950
951static void tcg_out_udiv(TCGContext *s, ARMCond cond,
952                         TCGReg rd, TCGReg rn, TCGReg rm)
953{
954    tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
955}
956
957static void tcg_out_ext8s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
958{
959    /* sxtb */
960    tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
961}
962
963static void __attribute__((unused))
964tcg_out_ext8u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
965{
966    tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
967}
968
969static void tcg_out_ext16s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
970{
971    /* sxth */
972    tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
973}
974
975static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
976{
977    /* uxth */
978    tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
979}
980
981static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
982                            TCGReg rd, TCGReg rn, int flags)
983{
984    if (flags & TCG_BSWAP_OS) {
985        /* revsh */
986        tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
987        return;
988    }
989
990    /* rev16 */
991    tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
992    if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
993        /* uxth */
994        tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
995    }
996}
997
998static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
999{
1000    /* rev */
1001    tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
1002}
1003
1004static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
1005                            TCGArg a1, int ofs, int len, bool const_a1)
1006{
1007    if (const_a1) {
1008        /* bfi becomes bfc with rn == 15.  */
1009        a1 = 15;
1010    }
1011    /* bfi/bfc */
1012    tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
1013              | (ofs << 7) | ((ofs + len - 1) << 16));
1014}
1015
1016static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
1017                            TCGReg rn, int ofs, int len)
1018{
1019    /* ubfx */
1020    tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
1021              | (ofs << 7) | ((len - 1) << 16));
1022}
1023
1024static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
1025                             TCGReg rn, int ofs, int len)
1026{
1027    /* sbfx */
1028    tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
1029              | (ofs << 7) | ((len - 1) << 16));
1030}
1031
1032static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
1033                          TCGReg rd, TCGReg rn, int32_t offset)
1034{
1035    if (offset > 0xfff || offset < -0xfff) {
1036        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1037        tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
1038    } else
1039        tcg_out_ld32_12(s, cond, rd, rn, offset);
1040}
1041
1042static void tcg_out_st32(TCGContext *s, ARMCond cond,
1043                         TCGReg rd, TCGReg rn, int32_t offset)
1044{
1045    if (offset > 0xfff || offset < -0xfff) {
1046        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1047        tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
1048    } else
1049        tcg_out_st32_12(s, cond, rd, rn, offset);
1050}
1051
1052static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
1053                          TCGReg rd, TCGReg rn, int32_t offset)
1054{
1055    if (offset > 0xff || offset < -0xff) {
1056        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1057        tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
1058    } else
1059        tcg_out_ld16u_8(s, cond, rd, rn, offset);
1060}
1061
1062static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
1063                          TCGReg rd, TCGReg rn, int32_t offset)
1064{
1065    if (offset > 0xff || offset < -0xff) {
1066        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1067        tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
1068    } else
1069        tcg_out_ld16s_8(s, cond, rd, rn, offset);
1070}
1071
1072static void tcg_out_st16(TCGContext *s, ARMCond cond,
1073                         TCGReg rd, TCGReg rn, int32_t offset)
1074{
1075    if (offset > 0xff || offset < -0xff) {
1076        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1077        tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
1078    } else
1079        tcg_out_st16_8(s, cond, rd, rn, offset);
1080}
1081
1082static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
1083                         TCGReg rd, TCGReg rn, int32_t offset)
1084{
1085    if (offset > 0xfff || offset < -0xfff) {
1086        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1087        tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
1088    } else
1089        tcg_out_ld8_12(s, cond, rd, rn, offset);
1090}
1091
1092static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
1093                         TCGReg rd, TCGReg rn, int32_t offset)
1094{
1095    if (offset > 0xff || offset < -0xff) {
1096        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1097        tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1098    } else
1099        tcg_out_ld8s_8(s, cond, rd, rn, offset);
1100}
1101
1102static void tcg_out_st8(TCGContext *s, ARMCond cond,
1103                        TCGReg rd, TCGReg rn, int32_t offset)
1104{
1105    if (offset > 0xfff || offset < -0xfff) {
1106        tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1107        tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1108    } else
1109        tcg_out_st8_12(s, cond, rd, rn, offset);
1110}
1111
1112/*
1113 * The _goto case is normally between TBs within the same code buffer, and
1114 * with the code buffer limited to 16MB we wouldn't need the long case.
1115 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1116 */
1117static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
1118{
1119    intptr_t addri = (intptr_t)addr;
1120    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1121    bool arm_mode = !(addri & 1);
1122
1123    if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1124        tcg_out_b_imm(s, cond, disp);
1125        return;
1126    }
1127
1128    /* LDR is interworking from v5t. */
1129    tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1130}
1131
1132/*
1133 * The call case is mostly used for helpers - so it's not unreasonable
1134 * for them to be beyond branch range.
1135 */
1136static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr)
1137{
1138    intptr_t addri = (intptr_t)addr;
1139    ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1140    bool arm_mode = !(addri & 1);
1141
1142    if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1143        if (arm_mode) {
1144            tcg_out_bl_imm(s, COND_AL, disp);
1145        } else {
1146            tcg_out_blx_imm(s, disp);
1147        }
1148        return;
1149    }
1150
1151    tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1152    tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
1153}
1154
1155static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr,
1156                         const TCGHelperInfo *info)
1157{
1158    tcg_out_call_int(s, addr);
1159}
1160
1161static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
1162{
1163    if (l->has_value) {
1164        tcg_out_goto(s, cond, l->u.value_ptr);
1165    } else {
1166        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1167        tcg_out_b_imm(s, cond, 0);
1168    }
1169}
1170
1171static void tcg_out_mb(TCGContext *s, TCGArg a0)
1172{
1173    if (use_armv7_instructions) {
1174        tcg_out32(s, INSN_DMB_ISH);
1175    } else {
1176        tcg_out32(s, INSN_DMB_MCR);
1177    }
1178}
1179
1180static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1181                            const int *const_args)
1182{
1183    TCGReg al = args[0];
1184    TCGReg ah = args[1];
1185    TCGArg bl = args[2];
1186    TCGArg bh = args[3];
1187    TCGCond cond = args[4];
1188    int const_bl = const_args[2];
1189    int const_bh = const_args[3];
1190
1191    switch (cond) {
1192    case TCG_COND_EQ:
1193    case TCG_COND_NE:
1194    case TCG_COND_LTU:
1195    case TCG_COND_LEU:
1196    case TCG_COND_GTU:
1197    case TCG_COND_GEU:
1198        /* We perform a conditional comparision.  If the high half is
1199           equal, then overwrite the flags with the comparison of the
1200           low half.  The resulting flags cover the whole.  */
1201        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1202        tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1203        return cond;
1204
1205    case TCG_COND_LT:
1206    case TCG_COND_GE:
1207        /* We perform a double-word subtraction and examine the result.
1208           We do not actually need the result of the subtract, so the
1209           low part "subtract" is a compare.  For the high half we have
1210           no choice but to compute into a temporary.  */
1211        tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1212        tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1213                       TCG_REG_TMP, ah, bh, const_bh);
1214        return cond;
1215
1216    case TCG_COND_LE:
1217    case TCG_COND_GT:
1218        /* Similar, but with swapped arguments, via reversed subtract.  */
1219        tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1220                       TCG_REG_TMP, al, bl, const_bl);
1221        tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1222                       TCG_REG_TMP, ah, bh, const_bh);
1223        return tcg_swap_cond(cond);
1224
1225    default:
1226        g_assert_not_reached();
1227    }
1228}
1229
1230/*
1231 * Note that TCGReg references Q-registers.
1232 * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting.
1233 */
1234static uint32_t encode_vd(TCGReg rd)
1235{
1236    tcg_debug_assert(rd >= TCG_REG_Q0);
1237    return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
1238}
1239
1240static uint32_t encode_vn(TCGReg rn)
1241{
1242    tcg_debug_assert(rn >= TCG_REG_Q0);
1243    return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
1244}
1245
1246static uint32_t encode_vm(TCGReg rm)
1247{
1248    tcg_debug_assert(rm >= TCG_REG_Q0);
1249    return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
1250}
1251
1252static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
1253                          TCGReg d, TCGReg m)
1254{
1255    tcg_out32(s, insn | (vece << 18) | (q << 6) |
1256              encode_vd(d) | encode_vm(m));
1257}
1258
1259static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
1260                          TCGReg d, TCGReg n, TCGReg m)
1261{
1262    tcg_out32(s, insn | (vece << 20) | (q << 6) |
1263              encode_vd(d) | encode_vn(n) | encode_vm(m));
1264}
1265
1266static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
1267                          int q, int op, int cmode, uint8_t imm8)
1268{
1269    tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
1270              | (cmode << 8) | extract32(imm8, 0, 4)
1271              | (extract32(imm8, 4, 3) << 16)
1272              | (extract32(imm8, 7, 1) << 24));
1273}
1274
1275static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
1276                            TCGReg rd, TCGReg rm, int l_imm6)
1277{
1278    tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
1279              (extract32(l_imm6, 6, 1) << 7) |
1280              (extract32(l_imm6, 0, 6) << 16));
1281}
1282
1283static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
1284                          TCGReg rd, TCGReg rn, int offset)
1285{
1286    if (offset != 0) {
1287        if (check_fit_imm(offset) || check_fit_imm(-offset)) {
1288            tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1289                            TCG_REG_TMP, rn, offset, true);
1290        } else {
1291            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
1292            tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1293                            TCG_REG_TMP, TCG_REG_TMP, rn, 0);
1294        }
1295        rn = TCG_REG_TMP;
1296    }
1297    tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
1298}
1299
1300#ifdef CONFIG_SOFTMMU
1301/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1302 *                                     int mmu_idx, uintptr_t ra)
1303 */
1304static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
1305    [MO_UB]   = helper_ret_ldub_mmu,
1306    [MO_SB]   = helper_ret_ldsb_mmu,
1307#if HOST_BIG_ENDIAN
1308    [MO_UW] = helper_be_lduw_mmu,
1309    [MO_UL] = helper_be_ldul_mmu,
1310    [MO_UQ] = helper_be_ldq_mmu,
1311    [MO_SW] = helper_be_ldsw_mmu,
1312    [MO_SL] = helper_be_ldul_mmu,
1313#else
1314    [MO_UW] = helper_le_lduw_mmu,
1315    [MO_UL] = helper_le_ldul_mmu,
1316    [MO_UQ] = helper_le_ldq_mmu,
1317    [MO_SW] = helper_le_ldsw_mmu,
1318    [MO_SL] = helper_le_ldul_mmu,
1319#endif
1320};
1321
1322/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1323 *                                     uintxx_t val, int mmu_idx, uintptr_t ra)
1324 */
1325static void * const qemu_st_helpers[MO_SIZE + 1] = {
1326    [MO_8]   = helper_ret_stb_mmu,
1327#if HOST_BIG_ENDIAN
1328    [MO_16] = helper_be_stw_mmu,
1329    [MO_32] = helper_be_stl_mmu,
1330    [MO_64] = helper_be_stq_mmu,
1331#else
1332    [MO_16] = helper_le_stw_mmu,
1333    [MO_32] = helper_le_stl_mmu,
1334    [MO_64] = helper_le_stq_mmu,
1335#endif
1336};
1337
1338/* Helper routines for marshalling helper function arguments into
1339 * the correct registers and stack.
1340 * argreg is where we want to put this argument, arg is the argument itself.
1341 * Return value is the updated argreg ready for the next call.
1342 * Note that argreg 0..3 is real registers, 4+ on stack.
1343 *
1344 * We provide routines for arguments which are: immediate, 32 bit
1345 * value in register, 16 and 8 bit values in register (which must be zero
1346 * extended before use) and 64 bit value in a lo:hi register pair.
1347 */
1348#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG)                \
1349static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg)              \
1350{                                                                          \
1351    if (argreg < 4) {                                                      \
1352        MOV_ARG(s, COND_AL, argreg, arg);                                  \
1353    } else {                                                               \
1354        int ofs = (argreg - 4) * 4;                                        \
1355        EXT_ARG;                                                           \
1356        tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE);            \
1357        tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs);         \
1358    }                                                                      \
1359    return argreg + 1;                                                     \
1360}
1361
1362DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
1363    (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1364DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
1365    (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1366DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
1367    (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1368DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1369
1370static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1371                                TCGReg arglo, TCGReg arghi)
1372{
1373    /* 64 bit arguments must go in even/odd register pairs
1374     * and in 8-aligned stack slots.
1375     */
1376    if (argreg & 1) {
1377        argreg++;
1378    }
1379    if (argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) {
1380        tcg_out_strd_8(s, COND_AL, arglo,
1381                       TCG_REG_CALL_STACK, (argreg - 4) * 4);
1382        return argreg + 2;
1383    } else {
1384        argreg = tcg_out_arg_reg32(s, argreg, arglo);
1385        argreg = tcg_out_arg_reg32(s, argreg, arghi);
1386        return argreg;
1387    }
1388}
1389
1390#define TLB_SHIFT	(CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1391
1392/* We expect to use an 9-bit sign-magnitude negative offset from ENV.  */
1393QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1394QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256);
1395
1396/* These offsets are built into the LDRD below.  */
1397QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1398QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1399
1400/* Load and compare a TLB entry, leaving the flags set.  Returns the register
1401   containing the addend of the tlb entry.  Clobbers R0, R1, R2, TMP.  */
1402
1403static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1404                               MemOp opc, int mem_index, bool is_load)
1405{
1406    int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
1407                   : offsetof(CPUTLBEntry, addr_write));
1408    int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1409    unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
1410    unsigned a_mask = (1 << get_alignment_bits(opc)) - 1;
1411    TCGReg t_addr;
1412
1413    /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}.  */
1414    tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1415
1416    /* Extract the tlb index from the address into R0.  */
1417    tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
1418                    SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
1419
1420    /*
1421     * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1422     * Load the tlb comparator into R2/R3 and the fast path addend into R1.
1423     */
1424    if (cmp_off == 0) {
1425        if (TARGET_LONG_BITS == 64) {
1426            tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1427        } else {
1428            tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1429        }
1430    } else {
1431        tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1432                        TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1433        if (TARGET_LONG_BITS == 64) {
1434            tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1435        } else {
1436            tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1437        }
1438    }
1439
1440    /* Load the tlb addend.  */
1441    tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1442                    offsetof(CPUTLBEntry, addend));
1443
1444    /*
1445     * Check alignment, check comparators.
1446     * Do this in 2-4 insns.  Use MOVW for v7, if possible,
1447     * to reduce the number of sequential conditional instructions.
1448     * Almost all guests have at least 4k pages, which means that we need
1449     * to clear at least 9 bits even for an 8-byte memory, which means it
1450     * isn't worth checking for an immediate operand for BIC.
1451     *
1452     * For unaligned accesses, test the page of the last unit of alignment.
1453     * This leaves the least significant alignment bits unchanged, and of
1454     * course must be zero.
1455     */
1456    t_addr = addrlo;
1457    if (a_mask < s_mask) {
1458        t_addr = TCG_REG_R0;
1459        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
1460                        addrlo, s_mask - a_mask);
1461    }
1462    if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
1463        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask));
1464        tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1465                        t_addr, TCG_REG_TMP, 0);
1466        tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
1467    } else {
1468        if (a_mask) {
1469            tcg_debug_assert(a_mask <= 0xff);
1470            tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1471        }
1472        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
1473                        SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1474        tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
1475                        0, TCG_REG_R2, TCG_REG_TMP,
1476                        SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1477    }
1478
1479    if (TARGET_LONG_BITS == 64) {
1480        tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
1481    }
1482
1483    return TCG_REG_R1;
1484}
1485
1486/* Record the context of a call to the out of line helper code for the slow
1487   path for a load or store, so that we can later generate the correct
1488   helper code.  */
1489static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
1490                                TCGReg datalo, TCGReg datahi, TCGReg addrlo,
1491                                TCGReg addrhi, tcg_insn_unit *raddr,
1492                                tcg_insn_unit *label_ptr)
1493{
1494    TCGLabelQemuLdst *label = new_ldst_label(s);
1495
1496    label->is_ld = is_ld;
1497    label->oi = oi;
1498    label->datalo_reg = datalo;
1499    label->datahi_reg = datahi;
1500    label->addrlo_reg = addrlo;
1501    label->addrhi_reg = addrhi;
1502    label->raddr = tcg_splitwx_to_rx(raddr);
1503    label->label_ptr[0] = label_ptr;
1504}
1505
1506static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1507{
1508    TCGReg argreg, datalo, datahi;
1509    MemOpIdx oi = lb->oi;
1510    MemOp opc = get_memop(oi);
1511
1512    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1513        return false;
1514    }
1515
1516    argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1517    if (TARGET_LONG_BITS == 64) {
1518        argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1519    } else {
1520        argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1521    }
1522    argreg = tcg_out_arg_imm32(s, argreg, oi);
1523    argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1524
1525    /* Use the canonical unsigned helpers and minimize icache usage. */
1526    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1527
1528    datalo = lb->datalo_reg;
1529    datahi = lb->datahi_reg;
1530    switch (opc & MO_SSIZE) {
1531    case MO_SB:
1532        tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0);
1533        break;
1534    case MO_SW:
1535        tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0);
1536        break;
1537    default:
1538        tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1539        break;
1540    case MO_UQ:
1541        if (datalo != TCG_REG_R1) {
1542            tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1543            tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1544        } else if (datahi != TCG_REG_R0) {
1545            tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1546            tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1547        } else {
1548            tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
1549            tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1550            tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
1551        }
1552        break;
1553    }
1554
1555    tcg_out_goto(s, COND_AL, lb->raddr);
1556    return true;
1557}
1558
1559static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1560{
1561    TCGReg argreg, datalo, datahi;
1562    MemOpIdx oi = lb->oi;
1563    MemOp opc = get_memop(oi);
1564
1565    if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1566        return false;
1567    }
1568
1569    argreg = TCG_REG_R0;
1570    argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1571    if (TARGET_LONG_BITS == 64) {
1572        argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1573    } else {
1574        argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1575    }
1576
1577    datalo = lb->datalo_reg;
1578    datahi = lb->datahi_reg;
1579    switch (opc & MO_SIZE) {
1580    case MO_8:
1581        argreg = tcg_out_arg_reg8(s, argreg, datalo);
1582        break;
1583    case MO_16:
1584        argreg = tcg_out_arg_reg16(s, argreg, datalo);
1585        break;
1586    case MO_32:
1587    default:
1588        argreg = tcg_out_arg_reg32(s, argreg, datalo);
1589        break;
1590    case MO_64:
1591        argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi);
1592        break;
1593    }
1594
1595    argreg = tcg_out_arg_imm32(s, argreg, oi);
1596    argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1597
1598    /* Tail-call to the helper, which will return to the fast path.  */
1599    tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
1600    return true;
1601}
1602#else
1603
1604static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
1605                                   TCGReg addrhi, unsigned a_bits)
1606{
1607    unsigned a_mask = (1 << a_bits) - 1;
1608    TCGLabelQemuLdst *label = new_ldst_label(s);
1609
1610    label->is_ld = is_ld;
1611    label->addrlo_reg = addrlo;
1612    label->addrhi_reg = addrhi;
1613
1614    /* We are expecting a_bits to max out at 7, and can easily support 8. */
1615    tcg_debug_assert(a_mask <= 0xff);
1616    /* tst addr, #mask */
1617    tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1618
1619    /* blne slow_path */
1620    label->label_ptr[0] = s->code_ptr;
1621    tcg_out_bl_imm(s, COND_NE, 0);
1622
1623    label->raddr = tcg_splitwx_to_rx(s->code_ptr);
1624}
1625
1626static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
1627{
1628    if (!reloc_pc24(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1629        return false;
1630    }
1631
1632    if (TARGET_LONG_BITS == 64) {
1633        /* 64-bit target address is aligned into R2:R3. */
1634        if (l->addrhi_reg != TCG_REG_R2) {
1635            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg);
1636            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg);
1637        } else if (l->addrlo_reg != TCG_REG_R3) {
1638            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg);
1639            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg);
1640        } else {
1641            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, TCG_REG_R2);
1642            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, TCG_REG_R3);
1643            tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, TCG_REG_R1);
1644        }
1645    } else {
1646        tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg);
1647    }
1648    tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_AREG0);
1649
1650    /*
1651     * Tail call to the helper, with the return address back inline,
1652     * just for the clarity of the debugging traceback -- the helper
1653     * cannot return.  We have used BLNE to arrive here, so LR is
1654     * already set.
1655     */
1656    tcg_out_goto(s, COND_AL, (const void *)
1657                 (l->is_ld ? helper_unaligned_ld : helper_unaligned_st));
1658    return true;
1659}
1660
1661static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1662{
1663    return tcg_out_fail_alignment(s, l);
1664}
1665
1666static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1667{
1668    return tcg_out_fail_alignment(s, l);
1669}
1670#endif /* SOFTMMU */
1671
1672static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
1673                                  TCGReg datalo, TCGReg datahi,
1674                                  TCGReg addrlo, TCGReg addend,
1675                                  bool scratch_addend)
1676{
1677    /* Byte swapping is left to middle-end expansion. */
1678    tcg_debug_assert((opc & MO_BSWAP) == 0);
1679
1680    switch (opc & MO_SSIZE) {
1681    case MO_UB:
1682        tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
1683        break;
1684    case MO_SB:
1685        tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
1686        break;
1687    case MO_UW:
1688        tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1689        break;
1690    case MO_SW:
1691        tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
1692        break;
1693    case MO_UL:
1694        tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
1695        break;
1696    case MO_UQ:
1697        /* LDRD requires alignment; double-check that. */
1698        if (get_alignment_bits(opc) >= MO_64
1699            && (datalo & 1) == 0 && datahi == datalo + 1) {
1700            /*
1701             * Rm (the second address op) must not overlap Rt or Rt + 1.
1702             * Since datalo is aligned, we can simplify the test via alignment.
1703             * Flip the two address arguments if that works.
1704             */
1705            if ((addend & ~1) != datalo) {
1706                tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
1707                break;
1708            }
1709            if ((addrlo & ~1) != datalo) {
1710                tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo);
1711                break;
1712            }
1713        }
1714        if (scratch_addend) {
1715            tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo);
1716            tcg_out_ld32_12(s, COND_AL, datahi, addend, 4);
1717        } else {
1718            tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
1719                            addend, addrlo, SHIFT_IMM_LSL(0));
1720            tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0);
1721            tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4);
1722        }
1723        break;
1724    default:
1725        g_assert_not_reached();
1726    }
1727}
1728
1729#ifndef CONFIG_SOFTMMU
1730static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1731                                   TCGReg datahi, TCGReg addrlo)
1732{
1733    /* Byte swapping is left to middle-end expansion. */
1734    tcg_debug_assert((opc & MO_BSWAP) == 0);
1735
1736    switch (opc & MO_SSIZE) {
1737    case MO_UB:
1738        tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
1739        break;
1740    case MO_SB:
1741        tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
1742        break;
1743    case MO_UW:
1744        tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1745        break;
1746    case MO_SW:
1747        tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
1748        break;
1749    case MO_UL:
1750        tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1751        break;
1752    case MO_UQ:
1753        /* LDRD requires alignment; double-check that. */
1754        if (get_alignment_bits(opc) >= MO_64
1755            && (datalo & 1) == 0 && datahi == datalo + 1) {
1756            tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
1757        } else if (datalo == addrlo) {
1758            tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
1759            tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1760        } else {
1761            tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1762            tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
1763        }
1764        break;
1765    default:
1766        g_assert_not_reached();
1767    }
1768}
1769#endif
1770
1771static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1772{
1773    TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1774    MemOpIdx oi;
1775    MemOp opc;
1776#ifdef CONFIG_SOFTMMU
1777    int mem_index;
1778    TCGReg addend;
1779    tcg_insn_unit *label_ptr;
1780#else
1781    unsigned a_bits;
1782#endif
1783
1784    datalo = *args++;
1785    datahi = (is64 ? *args++ : 0);
1786    addrlo = *args++;
1787    addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1788    oi = *args++;
1789    opc = get_memop(oi);
1790
1791#ifdef CONFIG_SOFTMMU
1792    mem_index = get_mmuidx(oi);
1793    addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
1794
1795    /* This a conditional BL only to load a pointer within this opcode into LR
1796       for the slow path.  We will not be using the value for a tail call.  */
1797    label_ptr = s->code_ptr;
1798    tcg_out_bl_imm(s, COND_NE, 0);
1799
1800    tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true);
1801
1802    add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
1803                        s->code_ptr, label_ptr);
1804#else /* !CONFIG_SOFTMMU */
1805    a_bits = get_alignment_bits(opc);
1806    if (a_bits) {
1807        tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
1808    }
1809    if (guest_base) {
1810        tcg_out_qemu_ld_index(s, opc, datalo, datahi,
1811                              addrlo, TCG_REG_GUEST_BASE, false);
1812    } else {
1813        tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
1814    }
1815#endif
1816}
1817
1818static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
1819                                  TCGReg datalo, TCGReg datahi,
1820                                  TCGReg addrlo, TCGReg addend,
1821                                  bool scratch_addend)
1822{
1823    /* Byte swapping is left to middle-end expansion. */
1824    tcg_debug_assert((opc & MO_BSWAP) == 0);
1825
1826    switch (opc & MO_SIZE) {
1827    case MO_8:
1828        tcg_out_st8_r(s, cond, datalo, addrlo, addend);
1829        break;
1830    case MO_16:
1831        tcg_out_st16_r(s, cond, datalo, addrlo, addend);
1832        break;
1833    case MO_32:
1834        tcg_out_st32_r(s, cond, datalo, addrlo, addend);
1835        break;
1836    case MO_64:
1837        /* STRD requires alignment; double-check that. */
1838        if (get_alignment_bits(opc) >= MO_64
1839            && (datalo & 1) == 0 && datahi == datalo + 1) {
1840            tcg_out_strd_r(s, cond, datalo, addrlo, addend);
1841        } else if (scratch_addend) {
1842            tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
1843            tcg_out_st32_12(s, cond, datahi, addend, 4);
1844        } else {
1845            tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP,
1846                            addend, addrlo, SHIFT_IMM_LSL(0));
1847            tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0);
1848            tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4);
1849        }
1850        break;
1851    default:
1852        g_assert_not_reached();
1853    }
1854}
1855
1856#ifndef CONFIG_SOFTMMU
1857static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1858                                   TCGReg datahi, TCGReg addrlo)
1859{
1860    /* Byte swapping is left to middle-end expansion. */
1861    tcg_debug_assert((opc & MO_BSWAP) == 0);
1862
1863    switch (opc & MO_SIZE) {
1864    case MO_8:
1865        tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
1866        break;
1867    case MO_16:
1868        tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
1869        break;
1870    case MO_32:
1871        tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1872        break;
1873    case MO_64:
1874        /* STRD requires alignment; double-check that. */
1875        if (get_alignment_bits(opc) >= MO_64
1876            && (datalo & 1) == 0 && datahi == datalo + 1) {
1877            tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
1878        } else {
1879            tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1880            tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
1881        }
1882        break;
1883    default:
1884        g_assert_not_reached();
1885    }
1886}
1887#endif
1888
1889static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1890{
1891    TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1892    MemOpIdx oi;
1893    MemOp opc;
1894#ifdef CONFIG_SOFTMMU
1895    int mem_index;
1896    TCGReg addend;
1897    tcg_insn_unit *label_ptr;
1898#else
1899    unsigned a_bits;
1900#endif
1901
1902    datalo = *args++;
1903    datahi = (is64 ? *args++ : 0);
1904    addrlo = *args++;
1905    addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1906    oi = *args++;
1907    opc = get_memop(oi);
1908
1909#ifdef CONFIG_SOFTMMU
1910    mem_index = get_mmuidx(oi);
1911    addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
1912
1913    tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi,
1914                          addrlo, addend, true);
1915
1916    /* The conditional call must come last, as we're going to return here.  */
1917    label_ptr = s->code_ptr;
1918    tcg_out_bl_imm(s, COND_NE, 0);
1919
1920    add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
1921                        s->code_ptr, label_ptr);
1922#else /* !CONFIG_SOFTMMU */
1923    a_bits = get_alignment_bits(opc);
1924    if (a_bits) {
1925        tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
1926    }
1927    if (guest_base) {
1928        tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi,
1929                              addrlo, TCG_REG_GUEST_BASE, false);
1930    } else {
1931        tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
1932    }
1933#endif
1934}
1935
1936static void tcg_out_epilogue(TCGContext *s);
1937
1938static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
1939{
1940    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
1941    tcg_out_epilogue(s);
1942}
1943
1944static void tcg_out_goto_tb(TCGContext *s, int which)
1945{
1946    uintptr_t i_addr;
1947    intptr_t i_disp;
1948
1949    /* Direct branch will be patched by tb_target_set_jmp_target. */
1950    set_jmp_insn_offset(s, which);
1951    tcg_out32(s, INSN_NOP);
1952
1953    /* When branch is out of range, fall through to indirect. */
1954    i_addr = get_jmp_target_addr(s, which);
1955    i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
1956    tcg_debug_assert(i_disp < 0);
1957    if (i_disp >= -0xfff) {
1958        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
1959    } else {
1960        /*
1961         * The TB is close, but outside the 12 bits addressable by
1962         * the load.  We can extend this to 20 bits with a sub of a
1963         * shifted immediate from pc.
1964         */
1965        int h = -i_disp;
1966        int l = h & 0xfff;
1967
1968        h = encode_imm_nofail(h - l);
1969        tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
1970        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
1971    }
1972    set_jmp_reset_offset(s, which);
1973}
1974
1975void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1976                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1977{
1978    uintptr_t addr = tb->jmp_target_addr[n];
1979    ptrdiff_t offset = addr - (jmp_rx + 8);
1980    tcg_insn_unit insn;
1981
1982    /* Either directly branch, or fall through to indirect branch. */
1983    if (offset == sextract64(offset, 0, 26)) {
1984        /* B <addr> */
1985        insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
1986    } else {
1987        insn = INSN_NOP;
1988    }
1989
1990    qatomic_set((uint32_t *)jmp_rw, insn);
1991    flush_idcache_range(jmp_rx, jmp_rw, 4);
1992}
1993
1994static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1995                       const TCGArg args[TCG_MAX_OP_ARGS],
1996                       const int const_args[TCG_MAX_OP_ARGS])
1997{
1998    TCGArg a0, a1, a2, a3, a4, a5;
1999    int c;
2000
2001    switch (opc) {
2002    case INDEX_op_goto_ptr:
2003        tcg_out_b_reg(s, COND_AL, args[0]);
2004        break;
2005    case INDEX_op_br:
2006        tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
2007        break;
2008
2009    case INDEX_op_ld8u_i32:
2010        tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
2011        break;
2012    case INDEX_op_ld8s_i32:
2013        tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
2014        break;
2015    case INDEX_op_ld16u_i32:
2016        tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
2017        break;
2018    case INDEX_op_ld16s_i32:
2019        tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
2020        break;
2021    case INDEX_op_ld_i32:
2022        tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
2023        break;
2024    case INDEX_op_st8_i32:
2025        tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
2026        break;
2027    case INDEX_op_st16_i32:
2028        tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
2029        break;
2030    case INDEX_op_st_i32:
2031        tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
2032        break;
2033
2034    case INDEX_op_movcond_i32:
2035        /* Constraints mean that v2 is always in the same register as dest,
2036         * so we only need to do "if condition passed, move v1 to dest".
2037         */
2038        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2039                        args[1], args[2], const_args[2]);
2040        tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
2041                        ARITH_MVN, args[0], 0, args[3], const_args[3]);
2042        break;
2043    case INDEX_op_add_i32:
2044        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
2045                        args[0], args[1], args[2], const_args[2]);
2046        break;
2047    case INDEX_op_sub_i32:
2048        if (const_args[1]) {
2049            if (const_args[2]) {
2050                tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
2051            } else {
2052                tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
2053                               args[0], args[2], args[1], 1);
2054            }
2055        } else {
2056            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
2057                            args[0], args[1], args[2], const_args[2]);
2058        }
2059        break;
2060    case INDEX_op_and_i32:
2061        tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
2062                        args[0], args[1], args[2], const_args[2]);
2063        break;
2064    case INDEX_op_andc_i32:
2065        tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
2066                        args[0], args[1], args[2], const_args[2]);
2067        break;
2068    case INDEX_op_or_i32:
2069        c = ARITH_ORR;
2070        goto gen_arith;
2071    case INDEX_op_xor_i32:
2072        c = ARITH_EOR;
2073        /* Fall through.  */
2074    gen_arith:
2075        tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
2076        break;
2077    case INDEX_op_add2_i32:
2078        a0 = args[0], a1 = args[1], a2 = args[2];
2079        a3 = args[3], a4 = args[4], a5 = args[5];
2080        if (a0 == a3 || (a0 == a5 && !const_args[5])) {
2081            a0 = TCG_REG_TMP;
2082        }
2083        tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
2084                        a0, a2, a4, const_args[4]);
2085        tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
2086                        a1, a3, a5, const_args[5]);
2087        tcg_out_mov_reg(s, COND_AL, args[0], a0);
2088        break;
2089    case INDEX_op_sub2_i32:
2090        a0 = args[0], a1 = args[1], a2 = args[2];
2091        a3 = args[3], a4 = args[4], a5 = args[5];
2092        if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
2093            a0 = TCG_REG_TMP;
2094        }
2095        if (const_args[2]) {
2096            if (const_args[4]) {
2097                tcg_out_movi32(s, COND_AL, a0, a4);
2098                a4 = a0;
2099            }
2100            tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
2101        } else {
2102            tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
2103                            ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
2104        }
2105        if (const_args[3]) {
2106            if (const_args[5]) {
2107                tcg_out_movi32(s, COND_AL, a1, a5);
2108                a5 = a1;
2109            }
2110            tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
2111        } else {
2112            tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
2113                            a1, a3, a5, const_args[5]);
2114        }
2115        tcg_out_mov_reg(s, COND_AL, args[0], a0);
2116        break;
2117    case INDEX_op_neg_i32:
2118        tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
2119        break;
2120    case INDEX_op_not_i32:
2121        tcg_out_dat_reg(s, COND_AL,
2122                        ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
2123        break;
2124    case INDEX_op_mul_i32:
2125        tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
2126        break;
2127    case INDEX_op_mulu2_i32:
2128        tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2129        break;
2130    case INDEX_op_muls2_i32:
2131        tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2132        break;
2133    /* XXX: Perhaps args[2] & 0x1f is wrong */
2134    case INDEX_op_shl_i32:
2135        c = const_args[2] ?
2136                SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
2137        goto gen_shift32;
2138    case INDEX_op_shr_i32:
2139        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
2140                SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
2141        goto gen_shift32;
2142    case INDEX_op_sar_i32:
2143        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
2144                SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
2145        goto gen_shift32;
2146    case INDEX_op_rotr_i32:
2147        c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
2148                SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
2149        /* Fall through.  */
2150    gen_shift32:
2151        tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
2152        break;
2153
2154    case INDEX_op_rotl_i32:
2155        if (const_args[2]) {
2156            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
2157                            ((0x20 - args[2]) & 0x1f) ?
2158                            SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
2159                            SHIFT_IMM_LSL(0));
2160        } else {
2161            tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
2162            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
2163                            SHIFT_REG_ROR(TCG_REG_TMP));
2164        }
2165        break;
2166
2167    case INDEX_op_ctz_i32:
2168        tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
2169        a1 = TCG_REG_TMP;
2170        goto do_clz;
2171
2172    case INDEX_op_clz_i32:
2173        a1 = args[1];
2174    do_clz:
2175        a0 = args[0];
2176        a2 = args[2];
2177        c = const_args[2];
2178        if (c && a2 == 32) {
2179            tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
2180            break;
2181        }
2182        tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
2183        tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
2184        if (c || a0 != a2) {
2185            tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
2186        }
2187        break;
2188
2189    case INDEX_op_brcond_i32:
2190        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2191                       args[0], args[1], const_args[1]);
2192        tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
2193                           arg_label(args[3]));
2194        break;
2195    case INDEX_op_setcond_i32:
2196        tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2197                        args[1], args[2], const_args[2]);
2198        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
2199                        ARITH_MOV, args[0], 0, 1);
2200        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
2201                        ARITH_MOV, args[0], 0, 0);
2202        break;
2203
2204    case INDEX_op_brcond2_i32:
2205        c = tcg_out_cmp2(s, args, const_args);
2206        tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
2207        break;
2208    case INDEX_op_setcond2_i32:
2209        c = tcg_out_cmp2(s, args + 1, const_args + 1);
2210        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
2211        tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2212                        ARITH_MOV, args[0], 0, 0);
2213        break;
2214
2215    case INDEX_op_qemu_ld_i32:
2216        tcg_out_qemu_ld(s, args, 0);
2217        break;
2218    case INDEX_op_qemu_ld_i64:
2219        tcg_out_qemu_ld(s, args, 1);
2220        break;
2221    case INDEX_op_qemu_st_i32:
2222        tcg_out_qemu_st(s, args, 0);
2223        break;
2224    case INDEX_op_qemu_st_i64:
2225        tcg_out_qemu_st(s, args, 1);
2226        break;
2227
2228    case INDEX_op_bswap16_i32:
2229        tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
2230        break;
2231    case INDEX_op_bswap32_i32:
2232        tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2233        break;
2234
2235    case INDEX_op_ext8s_i32:
2236        tcg_out_ext8s(s, COND_AL, args[0], args[1]);
2237        break;
2238    case INDEX_op_ext16s_i32:
2239        tcg_out_ext16s(s, COND_AL, args[0], args[1]);
2240        break;
2241    case INDEX_op_ext16u_i32:
2242        tcg_out_ext16u(s, COND_AL, args[0], args[1]);
2243        break;
2244
2245    case INDEX_op_deposit_i32:
2246        tcg_out_deposit(s, COND_AL, args[0], args[2],
2247                        args[3], args[4], const_args[2]);
2248        break;
2249    case INDEX_op_extract_i32:
2250        tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2251        break;
2252    case INDEX_op_sextract_i32:
2253        tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2254        break;
2255    case INDEX_op_extract2_i32:
2256        /* ??? These optimization vs zero should be generic.  */
2257        /* ??? But we can't substitute 2 for 1 in the opcode stream yet.  */
2258        if (const_args[1]) {
2259            if (const_args[2]) {
2260                tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2261            } else {
2262                tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2263                                args[2], SHIFT_IMM_LSL(32 - args[3]));
2264            }
2265        } else if (const_args[2]) {
2266            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2267                            args[1], SHIFT_IMM_LSR(args[3]));
2268        } else {
2269            /* We can do extract2 in 2 insns, vs the 3 required otherwise.  */
2270            tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2271                            args[2], SHIFT_IMM_LSL(32 - args[3]));
2272            tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2273                            args[1], SHIFT_IMM_LSR(args[3]));
2274        }
2275        break;
2276
2277    case INDEX_op_div_i32:
2278        tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2279        break;
2280    case INDEX_op_divu_i32:
2281        tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2282        break;
2283
2284    case INDEX_op_mb:
2285        tcg_out_mb(s, args[0]);
2286        break;
2287
2288    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
2289    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2290    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2291    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2292    default:
2293        tcg_abort();
2294    }
2295}
2296
2297static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2298{
2299    switch (op) {
2300    case INDEX_op_goto_ptr:
2301        return C_O0_I1(r);
2302
2303    case INDEX_op_ld8u_i32:
2304    case INDEX_op_ld8s_i32:
2305    case INDEX_op_ld16u_i32:
2306    case INDEX_op_ld16s_i32:
2307    case INDEX_op_ld_i32:
2308    case INDEX_op_neg_i32:
2309    case INDEX_op_not_i32:
2310    case INDEX_op_bswap16_i32:
2311    case INDEX_op_bswap32_i32:
2312    case INDEX_op_ext8s_i32:
2313    case INDEX_op_ext16s_i32:
2314    case INDEX_op_ext16u_i32:
2315    case INDEX_op_extract_i32:
2316    case INDEX_op_sextract_i32:
2317        return C_O1_I1(r, r);
2318
2319    case INDEX_op_st8_i32:
2320    case INDEX_op_st16_i32:
2321    case INDEX_op_st_i32:
2322        return C_O0_I2(r, r);
2323
2324    case INDEX_op_add_i32:
2325    case INDEX_op_sub_i32:
2326    case INDEX_op_setcond_i32:
2327        return C_O1_I2(r, r, rIN);
2328
2329    case INDEX_op_and_i32:
2330    case INDEX_op_andc_i32:
2331    case INDEX_op_clz_i32:
2332    case INDEX_op_ctz_i32:
2333        return C_O1_I2(r, r, rIK);
2334
2335    case INDEX_op_mul_i32:
2336    case INDEX_op_div_i32:
2337    case INDEX_op_divu_i32:
2338        return C_O1_I2(r, r, r);
2339
2340    case INDEX_op_mulu2_i32:
2341    case INDEX_op_muls2_i32:
2342        return C_O2_I2(r, r, r, r);
2343
2344    case INDEX_op_or_i32:
2345    case INDEX_op_xor_i32:
2346        return C_O1_I2(r, r, rI);
2347
2348    case INDEX_op_shl_i32:
2349    case INDEX_op_shr_i32:
2350    case INDEX_op_sar_i32:
2351    case INDEX_op_rotl_i32:
2352    case INDEX_op_rotr_i32:
2353        return C_O1_I2(r, r, ri);
2354
2355    case INDEX_op_brcond_i32:
2356        return C_O0_I2(r, rIN);
2357    case INDEX_op_deposit_i32:
2358        return C_O1_I2(r, 0, rZ);
2359    case INDEX_op_extract2_i32:
2360        return C_O1_I2(r, rZ, rZ);
2361    case INDEX_op_movcond_i32:
2362        return C_O1_I4(r, r, rIN, rIK, 0);
2363    case INDEX_op_add2_i32:
2364        return C_O2_I4(r, r, r, r, rIN, rIK);
2365    case INDEX_op_sub2_i32:
2366        return C_O2_I4(r, r, rI, rI, rIN, rIK);
2367    case INDEX_op_brcond2_i32:
2368        return C_O0_I4(r, r, rI, rI);
2369    case INDEX_op_setcond2_i32:
2370        return C_O1_I4(r, r, r, rI, rI);
2371
2372    case INDEX_op_qemu_ld_i32:
2373        return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l);
2374    case INDEX_op_qemu_ld_i64:
2375        return TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, l) : C_O2_I2(r, r, l, l);
2376    case INDEX_op_qemu_st_i32:
2377        return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s);
2378    case INDEX_op_qemu_st_i64:
2379        return TARGET_LONG_BITS == 32 ? C_O0_I3(s, s, s) : C_O0_I4(s, s, s, s);
2380
2381    case INDEX_op_st_vec:
2382        return C_O0_I2(w, r);
2383    case INDEX_op_ld_vec:
2384    case INDEX_op_dupm_vec:
2385        return C_O1_I1(w, r);
2386    case INDEX_op_dup_vec:
2387        return C_O1_I1(w, wr);
2388    case INDEX_op_abs_vec:
2389    case INDEX_op_neg_vec:
2390    case INDEX_op_not_vec:
2391    case INDEX_op_shli_vec:
2392    case INDEX_op_shri_vec:
2393    case INDEX_op_sari_vec:
2394        return C_O1_I1(w, w);
2395    case INDEX_op_dup2_vec:
2396    case INDEX_op_add_vec:
2397    case INDEX_op_mul_vec:
2398    case INDEX_op_smax_vec:
2399    case INDEX_op_smin_vec:
2400    case INDEX_op_ssadd_vec:
2401    case INDEX_op_sssub_vec:
2402    case INDEX_op_sub_vec:
2403    case INDEX_op_umax_vec:
2404    case INDEX_op_umin_vec:
2405    case INDEX_op_usadd_vec:
2406    case INDEX_op_ussub_vec:
2407    case INDEX_op_xor_vec:
2408    case INDEX_op_arm_sshl_vec:
2409    case INDEX_op_arm_ushl_vec:
2410        return C_O1_I2(w, w, w);
2411    case INDEX_op_arm_sli_vec:
2412        return C_O1_I2(w, 0, w);
2413    case INDEX_op_or_vec:
2414    case INDEX_op_andc_vec:
2415        return C_O1_I2(w, w, wO);
2416    case INDEX_op_and_vec:
2417    case INDEX_op_orc_vec:
2418        return C_O1_I2(w, w, wV);
2419    case INDEX_op_cmp_vec:
2420        return C_O1_I2(w, w, wZ);
2421    case INDEX_op_bitsel_vec:
2422        return C_O1_I3(w, w, w, w);
2423    default:
2424        g_assert_not_reached();
2425    }
2426}
2427
2428static void tcg_target_init(TCGContext *s)
2429{
2430    /*
2431     * Only probe for the platform and capabilities if we haven't already
2432     * determined maximum values at compile time.
2433     */
2434#if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
2435    {
2436        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2437#ifndef use_idiv_instructions
2438        use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2439#endif
2440#ifndef use_neon_instructions
2441        use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
2442#endif
2443    }
2444#endif
2445
2446    if (__ARM_ARCH < 7) {
2447        const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2448        if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2449            arm_arch = pl[1] - '0';
2450        }
2451
2452        if (arm_arch < 6) {
2453            error_report("TCG: ARMv%d is unsupported; exiting", arm_arch);
2454            exit(EXIT_FAILURE);
2455        }
2456    }
2457
2458    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2459
2460    tcg_target_call_clobber_regs = 0;
2461    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2462    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2463    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2464    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2465    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2466    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2467
2468    if (use_neon_instructions) {
2469        tcg_target_available_regs[TCG_TYPE_V64]  = ALL_VECTOR_REGS;
2470        tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2471
2472        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
2473        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
2474        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
2475        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
2476        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
2477        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
2478        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
2479        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
2480        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
2481        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
2482        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
2483        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
2484    }
2485
2486    s->reserved_regs = 0;
2487    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2488    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2489    tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2490    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2491}
2492
2493static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2494                       TCGReg arg1, intptr_t arg2)
2495{
2496    switch (type) {
2497    case TCG_TYPE_I32:
2498        tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2499        return;
2500    case TCG_TYPE_V64:
2501        /* regs 1; size 8; align 8 */
2502        tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
2503        return;
2504    case TCG_TYPE_V128:
2505        /*
2506         * We have only 8-byte alignment for the stack per the ABI.
2507         * Rather than dynamically re-align the stack, it's easier
2508         * to simply not request alignment beyond that.  So:
2509         * regs 2; size 8; align 8
2510         */
2511        tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2);
2512        return;
2513    default:
2514        g_assert_not_reached();
2515    }
2516}
2517
2518static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2519                       TCGReg arg1, intptr_t arg2)
2520{
2521    switch (type) {
2522    case TCG_TYPE_I32:
2523        tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2524        return;
2525    case TCG_TYPE_V64:
2526        /* regs 1; size 8; align 8 */
2527        tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
2528        return;
2529    case TCG_TYPE_V128:
2530        /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */
2531        tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2);
2532        return;
2533    default:
2534        g_assert_not_reached();
2535    }
2536}
2537
2538static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2539                        TCGReg base, intptr_t ofs)
2540{
2541    return false;
2542}
2543
2544static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
2545{
2546    if (ret == arg) {
2547        return true;
2548    }
2549    switch (type) {
2550    case TCG_TYPE_I32:
2551        if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
2552            tcg_out_mov_reg(s, COND_AL, ret, arg);
2553            return true;
2554        }
2555        return false;
2556
2557    case TCG_TYPE_V64:
2558    case TCG_TYPE_V128:
2559        /* "VMOV D,N" is an alias for "VORR D,N,N". */
2560        tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
2561        return true;
2562
2563    default:
2564        g_assert_not_reached();
2565    }
2566}
2567
2568static void tcg_out_movi(TCGContext *s, TCGType type,
2569                         TCGReg ret, tcg_target_long arg)
2570{
2571    tcg_debug_assert(type == TCG_TYPE_I32);
2572    tcg_debug_assert(ret < TCG_REG_Q0);
2573    tcg_out_movi32(s, COND_AL, ret, arg);
2574}
2575
2576/* Type is always V128, with I64 elements.  */
2577static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
2578{
2579    /* Move high element into place first. */
2580    /* VMOV Dd+1, Ds */
2581    tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
2582    /* Move low element into place; tcg_out_mov will check for nop. */
2583    tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
2584}
2585
2586static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2587                            TCGReg rd, TCGReg rs)
2588{
2589    int q = type - TCG_TYPE_V64;
2590
2591    if (vece == MO_64) {
2592        if (type == TCG_TYPE_V128) {
2593            tcg_out_dup2_vec(s, rd, rs, rs);
2594        } else {
2595            tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
2596        }
2597    } else if (rs < TCG_REG_Q0) {
2598        int b = (vece == MO_8);
2599        int e = (vece == MO_16);
2600        tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
2601                  encode_vn(rd) | (rs << 12));
2602    } else {
2603        int imm4 = 1 << vece;
2604        tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
2605                  encode_vd(rd) | encode_vm(rs));
2606    }
2607    return true;
2608}
2609
2610static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2611                             TCGReg rd, TCGReg base, intptr_t offset)
2612{
2613    if (vece == MO_64) {
2614        tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
2615        if (type == TCG_TYPE_V128) {
2616            tcg_out_dup2_vec(s, rd, rd, rd);
2617        }
2618    } else {
2619        int q = type - TCG_TYPE_V64;
2620        tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
2621                      rd, base, offset);
2622    }
2623    return true;
2624}
2625
2626static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2627                             TCGReg rd, int64_t v64)
2628{
2629    int q = type - TCG_TYPE_V64;
2630    int cmode, imm8, i;
2631
2632    /* Test all bytes equal first.  */
2633    if (vece == MO_8) {
2634        tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
2635        return;
2636    }
2637
2638    /*
2639     * Test all bytes 0x00 or 0xff second.  This can match cases that
2640     * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
2641     */
2642    for (i = imm8 = 0; i < 8; i++) {
2643        uint8_t byte = v64 >> (i * 8);
2644        if (byte == 0xff) {
2645            imm8 |= 1 << i;
2646        } else if (byte != 0) {
2647            goto fail_bytes;
2648        }
2649    }
2650    tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
2651    return;
2652 fail_bytes:
2653
2654    /*
2655     * Tests for various replications.  For each element width, if we
2656     * cannot find an expansion there's no point checking a larger
2657     * width because we already know by replication it cannot match.
2658     */
2659    if (vece == MO_16) {
2660        uint16_t v16 = v64;
2661
2662        if (is_shimm16(v16, &cmode, &imm8)) {
2663            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2664            return;
2665        }
2666        if (is_shimm16(~v16, &cmode, &imm8)) {
2667            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2668            return;
2669        }
2670
2671        /*
2672         * Otherwise, all remaining constants can be loaded in two insns:
2673         * rd = v16 & 0xff, rd |= v16 & 0xff00.
2674         */
2675        tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
2676        tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8);   /* VORRI */
2677        return;
2678    }
2679
2680    if (vece == MO_32) {
2681        uint32_t v32 = v64;
2682
2683        if (is_shimm32(v32, &cmode, &imm8) ||
2684            is_soimm32(v32, &cmode, &imm8)) {
2685            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2686            return;
2687        }
2688        if (is_shimm32(~v32, &cmode, &imm8) ||
2689            is_soimm32(~v32, &cmode, &imm8)) {
2690            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2691            return;
2692        }
2693
2694        /*
2695         * Restrict the set of constants to those we can load with
2696         * two instructions.  Others we load from the pool.
2697         */
2698        i = is_shimm32_pair(v32, &cmode, &imm8);
2699        if (i) {
2700            tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2701            tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
2702            return;
2703        }
2704        i = is_shimm32_pair(~v32, &cmode, &imm8);
2705        if (i) {
2706            tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2707            tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
2708            return;
2709        }
2710    }
2711
2712    /*
2713     * As a last resort, load from the constant pool.
2714     */
2715    if (!q || vece == MO_64) {
2716        new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
2717        /* VLDR Dd, [pc + offset] */
2718        tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
2719        if (q) {
2720            tcg_out_dup2_vec(s, rd, rd, rd);
2721        }
2722    } else {
2723        new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
2724        /* add tmp, pc, offset */
2725        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
2726        tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
2727    }
2728}
2729
2730static const ARMInsn vec_cmp_insn[16] = {
2731    [TCG_COND_EQ] = INSN_VCEQ,
2732    [TCG_COND_GT] = INSN_VCGT,
2733    [TCG_COND_GE] = INSN_VCGE,
2734    [TCG_COND_GTU] = INSN_VCGT_U,
2735    [TCG_COND_GEU] = INSN_VCGE_U,
2736};
2737
2738static const ARMInsn vec_cmp0_insn[16] = {
2739    [TCG_COND_EQ] = INSN_VCEQ0,
2740    [TCG_COND_GT] = INSN_VCGT0,
2741    [TCG_COND_GE] = INSN_VCGE0,
2742    [TCG_COND_LT] = INSN_VCLT0,
2743    [TCG_COND_LE] = INSN_VCLE0,
2744};
2745
2746static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2747                           unsigned vecl, unsigned vece,
2748                           const TCGArg args[TCG_MAX_OP_ARGS],
2749                           const int const_args[TCG_MAX_OP_ARGS])
2750{
2751    TCGType type = vecl + TCG_TYPE_V64;
2752    unsigned q = vecl;
2753    TCGArg a0, a1, a2, a3;
2754    int cmode, imm8;
2755
2756    a0 = args[0];
2757    a1 = args[1];
2758    a2 = args[2];
2759
2760    switch (opc) {
2761    case INDEX_op_ld_vec:
2762        tcg_out_ld(s, type, a0, a1, a2);
2763        return;
2764    case INDEX_op_st_vec:
2765        tcg_out_st(s, type, a0, a1, a2);
2766        return;
2767    case INDEX_op_dupm_vec:
2768        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2769        return;
2770    case INDEX_op_dup2_vec:
2771        tcg_out_dup2_vec(s, a0, a1, a2);
2772        return;
2773    case INDEX_op_abs_vec:
2774        tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
2775        return;
2776    case INDEX_op_neg_vec:
2777        tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
2778        return;
2779    case INDEX_op_not_vec:
2780        tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
2781        return;
2782    case INDEX_op_add_vec:
2783        tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
2784        return;
2785    case INDEX_op_mul_vec:
2786        tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
2787        return;
2788    case INDEX_op_smax_vec:
2789        tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
2790        return;
2791    case INDEX_op_smin_vec:
2792        tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
2793        return;
2794    case INDEX_op_sub_vec:
2795        tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
2796        return;
2797    case INDEX_op_ssadd_vec:
2798        tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
2799        return;
2800    case INDEX_op_sssub_vec:
2801        tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
2802        return;
2803    case INDEX_op_umax_vec:
2804        tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
2805        return;
2806    case INDEX_op_umin_vec:
2807        tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
2808        return;
2809    case INDEX_op_usadd_vec:
2810        tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
2811        return;
2812    case INDEX_op_ussub_vec:
2813        tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
2814        return;
2815    case INDEX_op_xor_vec:
2816        tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
2817        return;
2818    case INDEX_op_arm_sshl_vec:
2819        /*
2820         * Note that Vm is the data and Vn is the shift count,
2821         * therefore the arguments appear reversed.
2822         */
2823        tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
2824        return;
2825    case INDEX_op_arm_ushl_vec:
2826        /* See above. */
2827        tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
2828        return;
2829    case INDEX_op_shli_vec:
2830        tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
2831        return;
2832    case INDEX_op_shri_vec:
2833        tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
2834        return;
2835    case INDEX_op_sari_vec:
2836        tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
2837        return;
2838    case INDEX_op_arm_sli_vec:
2839        tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
2840        return;
2841
2842    case INDEX_op_andc_vec:
2843        if (!const_args[2]) {
2844            tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
2845            return;
2846        }
2847        a2 = ~a2;
2848        /* fall through */
2849    case INDEX_op_and_vec:
2850        if (const_args[2]) {
2851            is_shimm1632(~a2, &cmode, &imm8);
2852            if (a0 == a1) {
2853                tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
2854                return;
2855            }
2856            tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
2857            a2 = a0;
2858        }
2859        tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
2860        return;
2861
2862    case INDEX_op_orc_vec:
2863        if (!const_args[2]) {
2864            tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
2865            return;
2866        }
2867        a2 = ~a2;
2868        /* fall through */
2869    case INDEX_op_or_vec:
2870        if (const_args[2]) {
2871            is_shimm1632(a2, &cmode, &imm8);
2872            if (a0 == a1) {
2873                tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
2874                return;
2875            }
2876            tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
2877            a2 = a0;
2878        }
2879        tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
2880        return;
2881
2882    case INDEX_op_cmp_vec:
2883        {
2884            TCGCond cond = args[3];
2885
2886            if (cond == TCG_COND_NE) {
2887                if (const_args[2]) {
2888                    tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
2889                } else {
2890                    tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
2891                    tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2892                }
2893            } else {
2894                ARMInsn insn;
2895
2896                if (const_args[2]) {
2897                    insn = vec_cmp0_insn[cond];
2898                    if (insn) {
2899                        tcg_out_vreg2(s, insn, q, vece, a0, a1);
2900                        return;
2901                    }
2902                    tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2903                    a2 = TCG_VEC_TMP;
2904                }
2905                insn = vec_cmp_insn[cond];
2906                if (insn == 0) {
2907                    TCGArg t;
2908                    t = a1, a1 = a2, a2 = t;
2909                    cond = tcg_swap_cond(cond);
2910                    insn = vec_cmp_insn[cond];
2911                    tcg_debug_assert(insn != 0);
2912                }
2913                tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
2914            }
2915        }
2916        return;
2917
2918    case INDEX_op_bitsel_vec:
2919        a3 = args[3];
2920        if (a0 == a3) {
2921            tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
2922        } else if (a0 == a2) {
2923            tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
2924        } else {
2925            tcg_out_mov(s, type, a0, a1);
2926            tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
2927        }
2928        return;
2929
2930    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
2931    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
2932    default:
2933        g_assert_not_reached();
2934    }
2935}
2936
2937int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2938{
2939    switch (opc) {
2940    case INDEX_op_add_vec:
2941    case INDEX_op_sub_vec:
2942    case INDEX_op_and_vec:
2943    case INDEX_op_andc_vec:
2944    case INDEX_op_or_vec:
2945    case INDEX_op_orc_vec:
2946    case INDEX_op_xor_vec:
2947    case INDEX_op_not_vec:
2948    case INDEX_op_shli_vec:
2949    case INDEX_op_shri_vec:
2950    case INDEX_op_sari_vec:
2951    case INDEX_op_ssadd_vec:
2952    case INDEX_op_sssub_vec:
2953    case INDEX_op_usadd_vec:
2954    case INDEX_op_ussub_vec:
2955    case INDEX_op_bitsel_vec:
2956        return 1;
2957    case INDEX_op_abs_vec:
2958    case INDEX_op_cmp_vec:
2959    case INDEX_op_mul_vec:
2960    case INDEX_op_neg_vec:
2961    case INDEX_op_smax_vec:
2962    case INDEX_op_smin_vec:
2963    case INDEX_op_umax_vec:
2964    case INDEX_op_umin_vec:
2965        return vece < MO_64;
2966    case INDEX_op_shlv_vec:
2967    case INDEX_op_shrv_vec:
2968    case INDEX_op_sarv_vec:
2969    case INDEX_op_rotli_vec:
2970    case INDEX_op_rotlv_vec:
2971    case INDEX_op_rotrv_vec:
2972        return -1;
2973    default:
2974        return 0;
2975    }
2976}
2977
2978void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2979                       TCGArg a0, ...)
2980{
2981    va_list va;
2982    TCGv_vec v0, v1, v2, t1, t2, c1;
2983    TCGArg a2;
2984
2985    va_start(va, a0);
2986    v0 = temp_tcgv_vec(arg_temp(a0));
2987    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
2988    a2 = va_arg(va, TCGArg);
2989    va_end(va);
2990
2991    switch (opc) {
2992    case INDEX_op_shlv_vec:
2993        /*
2994         * Merely propagate shlv_vec to arm_ushl_vec.
2995         * In this way we don't set TCG_TARGET_HAS_shv_vec
2996         * because everything is done via expansion.
2997         */
2998        v2 = temp_tcgv_vec(arg_temp(a2));
2999        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3000                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3001        break;
3002
3003    case INDEX_op_shrv_vec:
3004    case INDEX_op_sarv_vec:
3005        /* Right shifts are negative left shifts for NEON.  */
3006        v2 = temp_tcgv_vec(arg_temp(a2));
3007        t1 = tcg_temp_new_vec(type);
3008        tcg_gen_neg_vec(vece, t1, v2);
3009        if (opc == INDEX_op_shrv_vec) {
3010            opc = INDEX_op_arm_ushl_vec;
3011        } else {
3012            opc = INDEX_op_arm_sshl_vec;
3013        }
3014        vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
3015                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3016        tcg_temp_free_vec(t1);
3017        break;
3018
3019    case INDEX_op_rotli_vec:
3020        t1 = tcg_temp_new_vec(type);
3021        tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
3022        vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
3023                  tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
3024        tcg_temp_free_vec(t1);
3025        break;
3026
3027    case INDEX_op_rotlv_vec:
3028        v2 = temp_tcgv_vec(arg_temp(a2));
3029        t1 = tcg_temp_new_vec(type);
3030        c1 = tcg_constant_vec(type, vece, 8 << vece);
3031        tcg_gen_sub_vec(vece, t1, v2, c1);
3032        /* Right shifts are negative left shifts for NEON.  */
3033        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3034                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3035        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3036                  tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3037        tcg_gen_or_vec(vece, v0, v0, t1);
3038        tcg_temp_free_vec(t1);
3039        break;
3040
3041    case INDEX_op_rotrv_vec:
3042        v2 = temp_tcgv_vec(arg_temp(a2));
3043        t1 = tcg_temp_new_vec(type);
3044        t2 = tcg_temp_new_vec(type);
3045        c1 = tcg_constant_vec(type, vece, 8 << vece);
3046        tcg_gen_neg_vec(vece, t1, v2);
3047        tcg_gen_sub_vec(vece, t2, c1, v2);
3048        /* Right shifts are negative left shifts for NEON.  */
3049        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3050                  tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3051        vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
3052                  tcgv_vec_arg(v1), tcgv_vec_arg(t2));
3053        tcg_gen_or_vec(vece, v0, t1, t2);
3054        tcg_temp_free_vec(t1);
3055        tcg_temp_free_vec(t2);
3056        break;
3057
3058    default:
3059        g_assert_not_reached();
3060    }
3061}
3062
3063static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3064{
3065    int i;
3066    for (i = 0; i < count; ++i) {
3067        p[i] = INSN_NOP;
3068    }
3069}
3070
3071/* Compute frame size via macros, to share between tcg_target_qemu_prologue
3072   and tcg_register_jit.  */
3073
3074#define PUSH_SIZE  ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
3075
3076#define FRAME_SIZE \
3077    ((PUSH_SIZE \
3078      + TCG_STATIC_CALL_ARGS_SIZE \
3079      + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3080      + TCG_TARGET_STACK_ALIGN - 1) \
3081     & -TCG_TARGET_STACK_ALIGN)
3082
3083#define STACK_ADDEND  (FRAME_SIZE - PUSH_SIZE)
3084
3085static void tcg_target_qemu_prologue(TCGContext *s)
3086{
3087    /* Calling convention requires us to save r4-r11 and lr.  */
3088    /* stmdb sp!, { r4 - r11, lr } */
3089    tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
3090                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
3091                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
3092                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
3093
3094    /* Reserve callee argument and tcg temp space.  */
3095    tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
3096                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3097    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
3098                  CPU_TEMP_BUF_NLONGS * sizeof(long));
3099
3100    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3101
3102#ifndef CONFIG_SOFTMMU
3103    if (guest_base) {
3104        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
3105        tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
3106    }
3107#endif
3108
3109    tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
3110
3111    /*
3112     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3113     * and fall through to the rest of the epilogue.
3114     */
3115    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3116    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
3117    tcg_out_epilogue(s);
3118}
3119
3120static void tcg_out_epilogue(TCGContext *s)
3121{
3122    /* Release local stack frame.  */
3123    tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
3124                   TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3125
3126    /* ldmia sp!, { r4 - r11, pc } */
3127    tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
3128                  (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
3129                  (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
3130                  (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
3131}
3132
3133typedef struct {
3134    DebugFrameHeader h;
3135    uint8_t fde_def_cfa[4];
3136    uint8_t fde_reg_ofs[18];
3137} DebugFrame;
3138
3139#define ELF_HOST_MACHINE EM_ARM
3140
3141/* We're expecting a 2 byte uleb128 encoded value.  */
3142QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3143
3144static const DebugFrame debug_frame = {
3145    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3146    .h.cie.id = -1,
3147    .h.cie.version = 1,
3148    .h.cie.code_align = 1,
3149    .h.cie.data_align = 0x7c,             /* sleb128 -4 */
3150    .h.cie.return_column = 14,
3151
3152    /* Total FDE size does not include the "len" member.  */
3153    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3154
3155    .fde_def_cfa = {
3156        12, 13,                         /* DW_CFA_def_cfa sp, ... */
3157        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
3158        (FRAME_SIZE >> 7)
3159    },
3160    .fde_reg_ofs = {
3161        /* The following must match the stmdb in the prologue.  */
3162        0x8e, 1,                        /* DW_CFA_offset, lr, -4 */
3163        0x8b, 2,                        /* DW_CFA_offset, r11, -8 */
3164        0x8a, 3,                        /* DW_CFA_offset, r10, -12 */
3165        0x89, 4,                        /* DW_CFA_offset, r9, -16 */
3166        0x88, 5,                        /* DW_CFA_offset, r8, -20 */
3167        0x87, 6,                        /* DW_CFA_offset, r7, -24 */
3168        0x86, 7,                        /* DW_CFA_offset, r6, -28 */
3169        0x85, 8,                        /* DW_CFA_offset, r5, -32 */
3170        0x84, 9,                        /* DW_CFA_offset, r4, -36 */
3171    }
3172};
3173
3174void tcg_register_jit(const void *buf, size_t buf_size)
3175{
3176    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3177}
3178