1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Andrzej Zaborowski
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "elf.h"
26 #include "tcg-pool.inc.c"
27 
28 int arm_arch = __ARM_ARCH;
29 
30 #ifndef use_idiv_instructions
31 bool use_idiv_instructions;
32 #endif
33 
34 /* ??? Ought to think about changing CONFIG_SOFTMMU to always defined.  */
35 #ifdef CONFIG_SOFTMMU
36 # define USING_SOFTMMU 1
37 #else
38 # define USING_SOFTMMU 0
39 #endif
40 
41 #ifdef CONFIG_DEBUG_TCG
42 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
43     "%r0",
44     "%r1",
45     "%r2",
46     "%r3",
47     "%r4",
48     "%r5",
49     "%r6",
50     "%r7",
51     "%r8",
52     "%r9",
53     "%r10",
54     "%r11",
55     "%r12",
56     "%r13",
57     "%r14",
58     "%pc",
59 };
60 #endif
61 
62 static const int tcg_target_reg_alloc_order[] = {
63     TCG_REG_R4,
64     TCG_REG_R5,
65     TCG_REG_R6,
66     TCG_REG_R7,
67     TCG_REG_R8,
68     TCG_REG_R9,
69     TCG_REG_R10,
70     TCG_REG_R11,
71     TCG_REG_R13,
72     TCG_REG_R0,
73     TCG_REG_R1,
74     TCG_REG_R2,
75     TCG_REG_R3,
76     TCG_REG_R12,
77     TCG_REG_R14,
78 };
79 
80 static const int tcg_target_call_iarg_regs[4] = {
81     TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
82 };
83 static const int tcg_target_call_oarg_regs[2] = {
84     TCG_REG_R0, TCG_REG_R1
85 };
86 
87 #define TCG_REG_TMP  TCG_REG_R12
88 
89 enum arm_cond_code_e {
90     COND_EQ = 0x0,
91     COND_NE = 0x1,
92     COND_CS = 0x2,	/* Unsigned greater or equal */
93     COND_CC = 0x3,	/* Unsigned less than */
94     COND_MI = 0x4,	/* Negative */
95     COND_PL = 0x5,	/* Zero or greater */
96     COND_VS = 0x6,	/* Overflow */
97     COND_VC = 0x7,	/* No overflow */
98     COND_HI = 0x8,	/* Unsigned greater than */
99     COND_LS = 0x9,	/* Unsigned less or equal */
100     COND_GE = 0xa,
101     COND_LT = 0xb,
102     COND_GT = 0xc,
103     COND_LE = 0xd,
104     COND_AL = 0xe,
105 };
106 
107 #define TO_CPSR (1 << 20)
108 
109 #define SHIFT_IMM_LSL(im)	(((im) << 7) | 0x00)
110 #define SHIFT_IMM_LSR(im)	(((im) << 7) | 0x20)
111 #define SHIFT_IMM_ASR(im)	(((im) << 7) | 0x40)
112 #define SHIFT_IMM_ROR(im)	(((im) << 7) | 0x60)
113 #define SHIFT_REG_LSL(rs)	(((rs) << 8) | 0x10)
114 #define SHIFT_REG_LSR(rs)	(((rs) << 8) | 0x30)
115 #define SHIFT_REG_ASR(rs)	(((rs) << 8) | 0x50)
116 #define SHIFT_REG_ROR(rs)	(((rs) << 8) | 0x70)
117 
118 typedef enum {
119     ARITH_AND = 0x0 << 21,
120     ARITH_EOR = 0x1 << 21,
121     ARITH_SUB = 0x2 << 21,
122     ARITH_RSB = 0x3 << 21,
123     ARITH_ADD = 0x4 << 21,
124     ARITH_ADC = 0x5 << 21,
125     ARITH_SBC = 0x6 << 21,
126     ARITH_RSC = 0x7 << 21,
127     ARITH_TST = 0x8 << 21 | TO_CPSR,
128     ARITH_CMP = 0xa << 21 | TO_CPSR,
129     ARITH_CMN = 0xb << 21 | TO_CPSR,
130     ARITH_ORR = 0xc << 21,
131     ARITH_MOV = 0xd << 21,
132     ARITH_BIC = 0xe << 21,
133     ARITH_MVN = 0xf << 21,
134 
135     INSN_CLZ       = 0x016f0f10,
136     INSN_RBIT      = 0x06ff0f30,
137 
138     INSN_LDR_IMM   = 0x04100000,
139     INSN_LDR_REG   = 0x06100000,
140     INSN_STR_IMM   = 0x04000000,
141     INSN_STR_REG   = 0x06000000,
142 
143     INSN_LDRH_IMM  = 0x005000b0,
144     INSN_LDRH_REG  = 0x001000b0,
145     INSN_LDRSH_IMM = 0x005000f0,
146     INSN_LDRSH_REG = 0x001000f0,
147     INSN_STRH_IMM  = 0x004000b0,
148     INSN_STRH_REG  = 0x000000b0,
149 
150     INSN_LDRB_IMM  = 0x04500000,
151     INSN_LDRB_REG  = 0x06500000,
152     INSN_LDRSB_IMM = 0x005000d0,
153     INSN_LDRSB_REG = 0x001000d0,
154     INSN_STRB_IMM  = 0x04400000,
155     INSN_STRB_REG  = 0x06400000,
156 
157     INSN_LDRD_IMM  = 0x004000d0,
158     INSN_LDRD_REG  = 0x000000d0,
159     INSN_STRD_IMM  = 0x004000f0,
160     INSN_STRD_REG  = 0x000000f0,
161 
162     INSN_DMB_ISH   = 0xf57ff05b,
163     INSN_DMB_MCR   = 0xee070fba,
164 
165     /* Architected nop introduced in v6k.  */
166     /* ??? This is an MSR (imm) 0,0,0 insn.  Anyone know if this
167        also Just So Happened to do nothing on pre-v6k so that we
168        don't need to conditionalize it?  */
169     INSN_NOP_v6k   = 0xe320f000,
170     /* Otherwise the assembler uses mov r0,r0 */
171     INSN_NOP_v4    = (COND_AL << 28) | ARITH_MOV,
172 } ARMInsn;
173 
174 #define INSN_NOP   (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
175 
176 static const uint8_t tcg_cond_to_arm_cond[] = {
177     [TCG_COND_EQ] = COND_EQ,
178     [TCG_COND_NE] = COND_NE,
179     [TCG_COND_LT] = COND_LT,
180     [TCG_COND_GE] = COND_GE,
181     [TCG_COND_LE] = COND_LE,
182     [TCG_COND_GT] = COND_GT,
183     /* unsigned */
184     [TCG_COND_LTU] = COND_CC,
185     [TCG_COND_GEU] = COND_CS,
186     [TCG_COND_LEU] = COND_LS,
187     [TCG_COND_GTU] = COND_HI,
188 };
189 
reloc_pc24(tcg_insn_unit * code_ptr,tcg_insn_unit * target)190 static inline bool reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
191 {
192     ptrdiff_t offset = (tcg_ptr_byte_diff(target, code_ptr) - 8) >> 2;
193     if (offset == sextract32(offset, 0, 24)) {
194         *code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff);
195         return true;
196     }
197     return false;
198 }
199 
reloc_pc13(tcg_insn_unit * code_ptr,tcg_insn_unit * target)200 static inline bool reloc_pc13(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
201 {
202     ptrdiff_t offset = tcg_ptr_byte_diff(target, code_ptr) - 8;
203 
204     if (offset >= -0xfff && offset <= 0xfff) {
205         tcg_insn_unit insn = *code_ptr;
206         bool u = (offset >= 0);
207         if (!u) {
208             offset = -offset;
209         }
210         insn = deposit32(insn, 23, 1, u);
211         insn = deposit32(insn, 0, 12, offset);
212         *code_ptr = insn;
213         return true;
214     }
215     return false;
216 }
217 
patch_reloc(tcg_insn_unit * code_ptr,int type,intptr_t value,intptr_t addend)218 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
219                         intptr_t value, intptr_t addend)
220 {
221     tcg_debug_assert(addend == 0);
222 
223     if (type == R_ARM_PC24) {
224         return reloc_pc24(code_ptr, (tcg_insn_unit *)value);
225     } else if (type == R_ARM_PC13) {
226         return reloc_pc13(code_ptr, (tcg_insn_unit *)value);
227     } else {
228         g_assert_not_reached();
229     }
230 }
231 
232 #define TCG_CT_CONST_ARM  0x100
233 #define TCG_CT_CONST_INV  0x200
234 #define TCG_CT_CONST_NEG  0x400
235 #define TCG_CT_CONST_ZERO 0x800
236 
237 /* parse target specific constraints */
target_parse_constraint(TCGArgConstraint * ct,const char * ct_str,TCGType type)238 static const char *target_parse_constraint(TCGArgConstraint *ct,
239                                            const char *ct_str, TCGType type)
240 {
241     switch (*ct_str++) {
242     case 'I':
243         ct->ct |= TCG_CT_CONST_ARM;
244         break;
245     case 'K':
246         ct->ct |= TCG_CT_CONST_INV;
247         break;
248     case 'N': /* The gcc constraint letter is L, already used here.  */
249         ct->ct |= TCG_CT_CONST_NEG;
250         break;
251     case 'Z':
252         ct->ct |= TCG_CT_CONST_ZERO;
253         break;
254 
255     case 'r':
256         ct->ct |= TCG_CT_REG;
257         ct->u.regs = 0xffff;
258         break;
259 
260     /* qemu_ld address */
261     case 'l':
262         ct->ct |= TCG_CT_REG;
263         ct->u.regs = 0xffff;
264 #ifdef CONFIG_SOFTMMU
265         /* r0-r2,lr will be overwritten when reading the tlb entry,
266            so don't use these. */
267         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
268         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
269         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
270         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
271         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
272 #endif
273         break;
274 
275     /* qemu_st address & data */
276     case 's':
277         ct->ct |= TCG_CT_REG;
278         ct->u.regs = 0xffff;
279         /* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
280            and r0-r1 doing the byte swapping, so don't use these. */
281         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
282         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
283 #if defined(CONFIG_SOFTMMU)
284         /* Avoid clashes with registers being used for helper args */
285         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
286 #if TARGET_LONG_BITS == 64
287         /* Avoid clashes with registers being used for helper args */
288         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
289 #endif
290         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
291 #endif
292         break;
293 
294     default:
295         return NULL;
296     }
297     return ct_str;
298 }
299 
rotl(uint32_t val,int n)300 static inline uint32_t rotl(uint32_t val, int n)
301 {
302   return (val << n) | (val >> (32 - n));
303 }
304 
305 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
306    right-rotated by an even amount between 0 and 30. */
encode_imm(uint32_t imm)307 static inline int encode_imm(uint32_t imm)
308 {
309     int shift;
310 
311     /* simple case, only lower bits */
312     if ((imm & ~0xff) == 0)
313         return 0;
314     /* then try a simple even shift */
315     shift = ctz32(imm) & ~1;
316     if (((imm >> shift) & ~0xff) == 0)
317         return 32 - shift;
318     /* now try harder with rotations */
319     if ((rotl(imm, 2) & ~0xff) == 0)
320         return 2;
321     if ((rotl(imm, 4) & ~0xff) == 0)
322         return 4;
323     if ((rotl(imm, 6) & ~0xff) == 0)
324         return 6;
325     /* imm can't be encoded */
326     return -1;
327 }
328 
check_fit_imm(uint32_t imm)329 static inline int check_fit_imm(uint32_t imm)
330 {
331     return encode_imm(imm) >= 0;
332 }
333 
334 /* Test if a constant matches the constraint.
335  * TODO: define constraints for:
336  *
337  * ldr/str offset:   between -0xfff and 0xfff
338  * ldrh/strh offset: between -0xff and 0xff
339  * mov operand2:     values represented with x << (2 * y), x < 0x100
340  * add, sub, eor...: ditto
341  */
tcg_target_const_match(tcg_target_long val,TCGType type,const TCGArgConstraint * arg_ct)342 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
343                                          const TCGArgConstraint *arg_ct)
344 {
345     int ct;
346     ct = arg_ct->ct;
347     if (ct & TCG_CT_CONST) {
348         return 1;
349     } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
350         return 1;
351     } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
352         return 1;
353     } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
354         return 1;
355     } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
356         return 1;
357     } else {
358         return 0;
359     }
360 }
361 
tcg_out_b(TCGContext * s,int cond,int32_t offset)362 static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
363 {
364     tcg_out32(s, (cond << 28) | 0x0a000000 |
365                     (((offset - 8) >> 2) & 0x00ffffff));
366 }
367 
tcg_out_bl(TCGContext * s,int cond,int32_t offset)368 static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
369 {
370     tcg_out32(s, (cond << 28) | 0x0b000000 |
371                     (((offset - 8) >> 2) & 0x00ffffff));
372 }
373 
tcg_out_blx(TCGContext * s,int cond,int rn)374 static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
375 {
376     tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
377 }
378 
tcg_out_blx_imm(TCGContext * s,int32_t offset)379 static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
380 {
381     tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
382                 (((offset - 8) >> 2) & 0x00ffffff));
383 }
384 
tcg_out_dat_reg(TCGContext * s,int cond,int opc,int rd,int rn,int rm,int shift)385 static inline void tcg_out_dat_reg(TCGContext *s,
386                 int cond, int opc, int rd, int rn, int rm, int shift)
387 {
388     tcg_out32(s, (cond << 28) | (0 << 25) | opc |
389                     (rn << 16) | (rd << 12) | shift | rm);
390 }
391 
tcg_out_nop(TCGContext * s)392 static inline void tcg_out_nop(TCGContext *s)
393 {
394     tcg_out32(s, INSN_NOP);
395 }
396 
tcg_out_mov_reg(TCGContext * s,int cond,int rd,int rm)397 static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
398 {
399     /* Simple reg-reg move, optimising out the 'do nothing' case */
400     if (rd != rm) {
401         tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
402     }
403 }
404 
tcg_out_bx(TCGContext * s,int cond,TCGReg rn)405 static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn)
406 {
407     /* Unless the C portion of QEMU is compiled as thumb, we don't
408        actually need true BX semantics; merely a branch to an address
409        held in a register.  */
410     if (use_armv5t_instructions) {
411         tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
412     } else {
413         tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
414     }
415 }
416 
tcg_out_dat_imm(TCGContext * s,int cond,int opc,int rd,int rn,int im)417 static inline void tcg_out_dat_imm(TCGContext *s,
418                 int cond, int opc, int rd, int rn, int im)
419 {
420     tcg_out32(s, (cond << 28) | (1 << 25) | opc |
421                     (rn << 16) | (rd << 12) | im);
422 }
423 
424 /* Note that this routine is used for both LDR and LDRH formats, so we do
425    not wish to include an immediate shift at this point.  */
tcg_out_memop_r(TCGContext * s,int cond,ARMInsn opc,TCGReg rt,TCGReg rn,TCGReg rm,bool u,bool p,bool w)426 static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
427                             TCGReg rn, TCGReg rm, bool u, bool p, bool w)
428 {
429     tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
430               | (w << 21) | (rn << 16) | (rt << 12) | rm);
431 }
432 
tcg_out_memop_8(TCGContext * s,int cond,ARMInsn opc,TCGReg rt,TCGReg rn,int imm8,bool p,bool w)433 static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
434                             TCGReg rn, int imm8, bool p, bool w)
435 {
436     bool u = 1;
437     if (imm8 < 0) {
438         imm8 = -imm8;
439         u = 0;
440     }
441     tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
442               (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
443 }
444 
tcg_out_memop_12(TCGContext * s,int cond,ARMInsn opc,TCGReg rt,TCGReg rn,int imm12,bool p,bool w)445 static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
446                              TCGReg rn, int imm12, bool p, bool w)
447 {
448     bool u = 1;
449     if (imm12 < 0) {
450         imm12 = -imm12;
451         u = 0;
452     }
453     tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
454               (rn << 16) | (rt << 12) | imm12);
455 }
456 
tcg_out_ld32_12(TCGContext * s,int cond,TCGReg rt,TCGReg rn,int imm12)457 static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
458                                    TCGReg rn, int imm12)
459 {
460     tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
461 }
462 
tcg_out_st32_12(TCGContext * s,int cond,TCGReg rt,TCGReg rn,int imm12)463 static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
464                                    TCGReg rn, int imm12)
465 {
466     tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
467 }
468 
tcg_out_ld32_r(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)469 static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
470                                   TCGReg rn, TCGReg rm)
471 {
472     tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
473 }
474 
tcg_out_st32_r(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)475 static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
476                                   TCGReg rn, TCGReg rm)
477 {
478     tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
479 }
480 
tcg_out_ldrd_8(TCGContext * s,int cond,TCGReg rt,TCGReg rn,int imm8)481 static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt,
482                                    TCGReg rn, int imm8)
483 {
484     tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
485 }
486 
tcg_out_ldrd_r(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)487 static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt,
488                                   TCGReg rn, TCGReg rm)
489 {
490     tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
491 }
492 
tcg_out_ldrd_rwb(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)493 static inline void tcg_out_ldrd_rwb(TCGContext *s, int cond, TCGReg rt,
494                                     TCGReg rn, TCGReg rm)
495 {
496     tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
497 }
498 
tcg_out_strd_8(TCGContext * s,int cond,TCGReg rt,TCGReg rn,int imm8)499 static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt,
500                                    TCGReg rn, int imm8)
501 {
502     tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
503 }
504 
tcg_out_strd_r(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)505 static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt,
506                                   TCGReg rn, TCGReg rm)
507 {
508     tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
509 }
510 
511 /* Register pre-increment with base writeback.  */
tcg_out_ld32_rwb(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)512 static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
513                                     TCGReg rn, TCGReg rm)
514 {
515     tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
516 }
517 
tcg_out_st32_rwb(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)518 static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
519                                     TCGReg rn, TCGReg rm)
520 {
521     tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
522 }
523 
tcg_out_ld16u_8(TCGContext * s,int cond,TCGReg rt,TCGReg rn,int imm8)524 static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
525                                    TCGReg rn, int imm8)
526 {
527     tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
528 }
529 
tcg_out_st16_8(TCGContext * s,int cond,TCGReg rt,TCGReg rn,int imm8)530 static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
531                                   TCGReg rn, int imm8)
532 {
533     tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
534 }
535 
tcg_out_ld16u_r(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)536 static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
537                                    TCGReg rn, TCGReg rm)
538 {
539     tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
540 }
541 
tcg_out_st16_r(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)542 static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
543                                   TCGReg rn, TCGReg rm)
544 {
545     tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
546 }
547 
tcg_out_ld16s_8(TCGContext * s,int cond,TCGReg rt,TCGReg rn,int imm8)548 static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
549                                    TCGReg rn, int imm8)
550 {
551     tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
552 }
553 
tcg_out_ld16s_r(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)554 static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
555                                    TCGReg rn, TCGReg rm)
556 {
557     tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
558 }
559 
tcg_out_ld8_12(TCGContext * s,int cond,TCGReg rt,TCGReg rn,int imm12)560 static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
561                                   TCGReg rn, int imm12)
562 {
563     tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
564 }
565 
tcg_out_st8_12(TCGContext * s,int cond,TCGReg rt,TCGReg rn,int imm12)566 static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
567                                   TCGReg rn, int imm12)
568 {
569     tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
570 }
571 
tcg_out_ld8_r(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)572 static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
573                                  TCGReg rn, TCGReg rm)
574 {
575     tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
576 }
577 
tcg_out_st8_r(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)578 static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
579                                  TCGReg rn, TCGReg rm)
580 {
581     tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
582 }
583 
tcg_out_ld8s_8(TCGContext * s,int cond,TCGReg rt,TCGReg rn,int imm8)584 static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
585                                   TCGReg rn, int imm8)
586 {
587     tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
588 }
589 
tcg_out_ld8s_r(TCGContext * s,int cond,TCGReg rt,TCGReg rn,TCGReg rm)590 static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
591                                   TCGReg rn, TCGReg rm)
592 {
593     tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
594 }
595 
tcg_out_movi_pool(TCGContext * s,int cond,int rd,uint32_t arg)596 static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg)
597 {
598     new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
599     tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
600 }
601 
tcg_out_movi32(TCGContext * s,int cond,int rd,uint32_t arg)602 static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
603 {
604     int rot, diff, opc, sh1, sh2;
605     uint32_t tt0, tt1, tt2;
606 
607     /* Check a single MOV/MVN before anything else.  */
608     rot = encode_imm(arg);
609     if (rot >= 0) {
610         tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
611                         rotl(arg, rot) | (rot << 7));
612         return;
613     }
614     rot = encode_imm(~arg);
615     if (rot >= 0) {
616         tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
617                         rotl(~arg, rot) | (rot << 7));
618         return;
619     }
620 
621     /* Check for a pc-relative address.  This will usually be the TB,
622        or within the TB, which is immediately before the code block.  */
623     diff = arg - ((intptr_t)s->code_ptr + 8);
624     if (diff >= 0) {
625         rot = encode_imm(diff);
626         if (rot >= 0) {
627             tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC,
628                             rotl(diff, rot) | (rot << 7));
629             return;
630         }
631     } else {
632         rot = encode_imm(-diff);
633         if (rot >= 0) {
634             tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC,
635                             rotl(-diff, rot) | (rot << 7));
636             return;
637         }
638     }
639 
640     /* Use movw + movt.  */
641     if (use_armv7_instructions) {
642         /* movw */
643         tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
644                   | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
645         if (arg & 0xffff0000) {
646             /* movt */
647             tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
648                       | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
649         }
650         return;
651     }
652 
653     /* Look for sequences of two insns.  If we have lots of 1's, we can
654        shorten the sequence by beginning with mvn and then clearing
655        higher bits with eor.  */
656     tt0 = arg;
657     opc = ARITH_MOV;
658     if (ctpop32(arg) > 16) {
659         tt0 = ~arg;
660         opc = ARITH_MVN;
661     }
662     sh1 = ctz32(tt0) & ~1;
663     tt1 = tt0 & ~(0xff << sh1);
664     sh2 = ctz32(tt1) & ~1;
665     tt2 = tt1 & ~(0xff << sh2);
666     if (tt2 == 0) {
667         rot = ((32 - sh1) << 7) & 0xf00;
668         tcg_out_dat_imm(s, cond, opc, rd,  0, ((tt0 >> sh1) & 0xff) | rot);
669         rot = ((32 - sh2) << 7) & 0xf00;
670         tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
671                         ((tt0 >> sh2) & 0xff) | rot);
672         return;
673     }
674 
675     /* Otherwise, drop it into the constant pool.  */
676     tcg_out_movi_pool(s, cond, rd, arg);
677 }
678 
tcg_out_dat_rI(TCGContext * s,int cond,int opc,TCGArg dst,TCGArg lhs,TCGArg rhs,int rhs_is_const)679 static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
680                                   TCGArg lhs, TCGArg rhs, int rhs_is_const)
681 {
682     /* Emit either the reg,imm or reg,reg form of a data-processing insn.
683      * rhs must satisfy the "rI" constraint.
684      */
685     if (rhs_is_const) {
686         int rot = encode_imm(rhs);
687         tcg_debug_assert(rot >= 0);
688         tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
689     } else {
690         tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
691     }
692 }
693 
tcg_out_dat_rIK(TCGContext * s,int cond,int opc,int opinv,TCGReg dst,TCGReg lhs,TCGArg rhs,bool rhs_is_const)694 static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
695                             TCGReg dst, TCGReg lhs, TCGArg rhs,
696                             bool rhs_is_const)
697 {
698     /* Emit either the reg,imm or reg,reg form of a data-processing insn.
699      * rhs must satisfy the "rIK" constraint.
700      */
701     if (rhs_is_const) {
702         int rot = encode_imm(rhs);
703         if (rot < 0) {
704             rhs = ~rhs;
705             rot = encode_imm(rhs);
706             tcg_debug_assert(rot >= 0);
707             opc = opinv;
708         }
709         tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
710     } else {
711         tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
712     }
713 }
714 
tcg_out_dat_rIN(TCGContext * s,int cond,int opc,int opneg,TCGArg dst,TCGArg lhs,TCGArg rhs,bool rhs_is_const)715 static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
716                             TCGArg dst, TCGArg lhs, TCGArg rhs,
717                             bool rhs_is_const)
718 {
719     /* Emit either the reg,imm or reg,reg form of a data-processing insn.
720      * rhs must satisfy the "rIN" constraint.
721      */
722     if (rhs_is_const) {
723         int rot = encode_imm(rhs);
724         if (rot < 0) {
725             rhs = -rhs;
726             rot = encode_imm(rhs);
727             tcg_debug_assert(rot >= 0);
728             opc = opneg;
729         }
730         tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
731     } else {
732         tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
733     }
734 }
735 
tcg_out_mul32(TCGContext * s,int cond,TCGReg rd,TCGReg rn,TCGReg rm)736 static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
737                                  TCGReg rn, TCGReg rm)
738 {
739     /* if ArchVersion() < 6 && d == n then UNPREDICTABLE;  */
740     if (!use_armv6_instructions && rd == rn) {
741         if (rd == rm) {
742             /* rd == rn == rm; copy an input to tmp first.  */
743             tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
744             rm = rn = TCG_REG_TMP;
745         } else {
746             rn = rm;
747             rm = rd;
748         }
749     }
750     /* mul */
751     tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
752 }
753 
tcg_out_umull32(TCGContext * s,int cond,TCGReg rd0,TCGReg rd1,TCGReg rn,TCGReg rm)754 static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
755                                    TCGReg rd1, TCGReg rn, TCGReg rm)
756 {
757     /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE;  */
758     if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
759         if (rd0 == rm || rd1 == rm) {
760             tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
761             rn = TCG_REG_TMP;
762         } else {
763             TCGReg t = rn;
764             rn = rm;
765             rm = t;
766         }
767     }
768     /* umull */
769     tcg_out32(s, (cond << 28) | 0x00800090 |
770               (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
771 }
772 
tcg_out_smull32(TCGContext * s,int cond,TCGReg rd0,TCGReg rd1,TCGReg rn,TCGReg rm)773 static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
774                                    TCGReg rd1, TCGReg rn, TCGReg rm)
775 {
776     /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE;  */
777     if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
778         if (rd0 == rm || rd1 == rm) {
779             tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
780             rn = TCG_REG_TMP;
781         } else {
782             TCGReg t = rn;
783             rn = rm;
784             rm = t;
785         }
786     }
787     /* smull */
788     tcg_out32(s, (cond << 28) | 0x00c00090 |
789               (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
790 }
791 
tcg_out_sdiv(TCGContext * s,int cond,int rd,int rn,int rm)792 static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
793 {
794     tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
795 }
796 
tcg_out_udiv(TCGContext * s,int cond,int rd,int rn,int rm)797 static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
798 {
799     tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
800 }
801 
tcg_out_ext8s(TCGContext * s,int cond,int rd,int rn)802 static inline void tcg_out_ext8s(TCGContext *s, int cond,
803                                  int rd, int rn)
804 {
805     if (use_armv6_instructions) {
806         /* sxtb */
807         tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
808     } else {
809         tcg_out_dat_reg(s, cond, ARITH_MOV,
810                         rd, 0, rn, SHIFT_IMM_LSL(24));
811         tcg_out_dat_reg(s, cond, ARITH_MOV,
812                         rd, 0, rd, SHIFT_IMM_ASR(24));
813     }
814 }
815 
tcg_out_ext8u(TCGContext * s,int cond,int rd,int rn)816 static inline void tcg_out_ext8u(TCGContext *s, int cond,
817                                  int rd, int rn)
818 {
819     tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
820 }
821 
tcg_out_ext16s(TCGContext * s,int cond,int rd,int rn)822 static inline void tcg_out_ext16s(TCGContext *s, int cond,
823                                   int rd, int rn)
824 {
825     if (use_armv6_instructions) {
826         /* sxth */
827         tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
828     } else {
829         tcg_out_dat_reg(s, cond, ARITH_MOV,
830                         rd, 0, rn, SHIFT_IMM_LSL(16));
831         tcg_out_dat_reg(s, cond, ARITH_MOV,
832                         rd, 0, rd, SHIFT_IMM_ASR(16));
833     }
834 }
835 
tcg_out_ext16u(TCGContext * s,int cond,int rd,int rn)836 static inline void tcg_out_ext16u(TCGContext *s, int cond,
837                                   int rd, int rn)
838 {
839     if (use_armv6_instructions) {
840         /* uxth */
841         tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
842     } else {
843         tcg_out_dat_reg(s, cond, ARITH_MOV,
844                         rd, 0, rn, SHIFT_IMM_LSL(16));
845         tcg_out_dat_reg(s, cond, ARITH_MOV,
846                         rd, 0, rd, SHIFT_IMM_LSR(16));
847     }
848 }
849 
tcg_out_bswap16s(TCGContext * s,int cond,int rd,int rn)850 static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
851 {
852     if (use_armv6_instructions) {
853         /* revsh */
854         tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
855     } else {
856         tcg_out_dat_reg(s, cond, ARITH_MOV,
857                         TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
858         tcg_out_dat_reg(s, cond, ARITH_MOV,
859                         TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
860         tcg_out_dat_reg(s, cond, ARITH_ORR,
861                         rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
862     }
863 }
864 
tcg_out_bswap16(TCGContext * s,int cond,int rd,int rn)865 static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
866 {
867     if (use_armv6_instructions) {
868         /* rev16 */
869         tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
870     } else {
871         tcg_out_dat_reg(s, cond, ARITH_MOV,
872                         TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
873         tcg_out_dat_reg(s, cond, ARITH_MOV,
874                         TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
875         tcg_out_dat_reg(s, cond, ARITH_ORR,
876                         rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
877     }
878 }
879 
880 /* swap the two low bytes assuming that the two high input bytes and the
881    two high output bit can hold any value. */
tcg_out_bswap16st(TCGContext * s,int cond,int rd,int rn)882 static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
883 {
884     if (use_armv6_instructions) {
885         /* rev16 */
886         tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
887     } else {
888         tcg_out_dat_reg(s, cond, ARITH_MOV,
889                         TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
890         tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
891         tcg_out_dat_reg(s, cond, ARITH_ORR,
892                         rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
893     }
894 }
895 
tcg_out_bswap32(TCGContext * s,int cond,int rd,int rn)896 static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
897 {
898     if (use_armv6_instructions) {
899         /* rev */
900         tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
901     } else {
902         tcg_out_dat_reg(s, cond, ARITH_EOR,
903                         TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
904         tcg_out_dat_imm(s, cond, ARITH_BIC,
905                         TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
906         tcg_out_dat_reg(s, cond, ARITH_MOV,
907                         rd, 0, rn, SHIFT_IMM_ROR(8));
908         tcg_out_dat_reg(s, cond, ARITH_EOR,
909                         rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
910     }
911 }
912 
tcg_out_deposit(TCGContext * s,int cond,TCGReg rd,TCGArg a1,int ofs,int len,bool const_a1)913 static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
914                                    TCGArg a1, int ofs, int len, bool const_a1)
915 {
916     if (const_a1) {
917         /* bfi becomes bfc with rn == 15.  */
918         a1 = 15;
919     }
920     /* bfi/bfc */
921     tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
922               | (ofs << 7) | ((ofs + len - 1) << 16));
923 }
924 
tcg_out_extract(TCGContext * s,int cond,TCGReg rd,TCGArg a1,int ofs,int len)925 static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd,
926                                    TCGArg a1, int ofs, int len)
927 {
928     /* ubfx */
929     tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1
930               | (ofs << 7) | ((len - 1) << 16));
931 }
932 
tcg_out_sextract(TCGContext * s,int cond,TCGReg rd,TCGArg a1,int ofs,int len)933 static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd,
934                                     TCGArg a1, int ofs, int len)
935 {
936     /* sbfx */
937     tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1
938               | (ofs << 7) | ((len - 1) << 16));
939 }
940 
tcg_out_ld32u(TCGContext * s,int cond,int rd,int rn,int32_t offset)941 static inline void tcg_out_ld32u(TCGContext *s, int cond,
942                 int rd, int rn, int32_t offset)
943 {
944     if (offset > 0xfff || offset < -0xfff) {
945         tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
946         tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
947     } else
948         tcg_out_ld32_12(s, cond, rd, rn, offset);
949 }
950 
tcg_out_st32(TCGContext * s,int cond,int rd,int rn,int32_t offset)951 static inline void tcg_out_st32(TCGContext *s, int cond,
952                 int rd, int rn, int32_t offset)
953 {
954     if (offset > 0xfff || offset < -0xfff) {
955         tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
956         tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
957     } else
958         tcg_out_st32_12(s, cond, rd, rn, offset);
959 }
960 
tcg_out_ld16u(TCGContext * s,int cond,int rd,int rn,int32_t offset)961 static inline void tcg_out_ld16u(TCGContext *s, int cond,
962                 int rd, int rn, int32_t offset)
963 {
964     if (offset > 0xff || offset < -0xff) {
965         tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
966         tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
967     } else
968         tcg_out_ld16u_8(s, cond, rd, rn, offset);
969 }
970 
tcg_out_ld16s(TCGContext * s,int cond,int rd,int rn,int32_t offset)971 static inline void tcg_out_ld16s(TCGContext *s, int cond,
972                 int rd, int rn, int32_t offset)
973 {
974     if (offset > 0xff || offset < -0xff) {
975         tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
976         tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
977     } else
978         tcg_out_ld16s_8(s, cond, rd, rn, offset);
979 }
980 
tcg_out_st16(TCGContext * s,int cond,int rd,int rn,int32_t offset)981 static inline void tcg_out_st16(TCGContext *s, int cond,
982                 int rd, int rn, int32_t offset)
983 {
984     if (offset > 0xff || offset < -0xff) {
985         tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
986         tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
987     } else
988         tcg_out_st16_8(s, cond, rd, rn, offset);
989 }
990 
tcg_out_ld8u(TCGContext * s,int cond,int rd,int rn,int32_t offset)991 static inline void tcg_out_ld8u(TCGContext *s, int cond,
992                 int rd, int rn, int32_t offset)
993 {
994     if (offset > 0xfff || offset < -0xfff) {
995         tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
996         tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
997     } else
998         tcg_out_ld8_12(s, cond, rd, rn, offset);
999 }
1000 
tcg_out_ld8s(TCGContext * s,int cond,int rd,int rn,int32_t offset)1001 static inline void tcg_out_ld8s(TCGContext *s, int cond,
1002                 int rd, int rn, int32_t offset)
1003 {
1004     if (offset > 0xff || offset < -0xff) {
1005         tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1006         tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1007     } else
1008         tcg_out_ld8s_8(s, cond, rd, rn, offset);
1009 }
1010 
tcg_out_st8(TCGContext * s,int cond,int rd,int rn,int32_t offset)1011 static inline void tcg_out_st8(TCGContext *s, int cond,
1012                 int rd, int rn, int32_t offset)
1013 {
1014     if (offset > 0xfff || offset < -0xfff) {
1015         tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1016         tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1017     } else
1018         tcg_out_st8_12(s, cond, rd, rn, offset);
1019 }
1020 
1021 /* The _goto case is normally between TBs within the same code buffer, and
1022  * with the code buffer limited to 16MB we wouldn't need the long case.
1023  * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1024  */
tcg_out_goto(TCGContext * s,int cond,tcg_insn_unit * addr)1025 static void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
1026 {
1027     intptr_t addri = (intptr_t)addr;
1028     ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1029 
1030     if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1031         tcg_out_b(s, cond, disp);
1032         return;
1033     }
1034     tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1035 }
1036 
1037 /* The call case is mostly used for helpers - so it's not unreasonable
1038  * for them to be beyond branch range */
tcg_out_call(TCGContext * s,tcg_insn_unit * addr)1039 static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr)
1040 {
1041     intptr_t addri = (intptr_t)addr;
1042     ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1043 
1044     if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1045         if (addri & 1) {
1046             /* Use BLX if the target is in Thumb mode */
1047             if (!use_armv5t_instructions) {
1048                 tcg_abort();
1049             }
1050             tcg_out_blx_imm(s, disp);
1051         } else {
1052             tcg_out_bl(s, COND_AL, disp);
1053         }
1054     } else if (use_armv7_instructions) {
1055         tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1056         tcg_out_blx(s, COND_AL, TCG_REG_TMP);
1057     } else {
1058         /* ??? Know that movi_pool emits exactly 1 insn.  */
1059         tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0);
1060         tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
1061     }
1062 }
1063 
tcg_out_goto_label(TCGContext * s,int cond,TCGLabel * l)1064 static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l)
1065 {
1066     if (l->has_value) {
1067         tcg_out_goto(s, cond, l->u.value_ptr);
1068     } else {
1069         tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1070         tcg_out_b(s, cond, 0);
1071     }
1072 }
1073 
tcg_out_mb(TCGContext * s,TCGArg a0)1074 static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
1075 {
1076     if (use_armv7_instructions) {
1077         tcg_out32(s, INSN_DMB_ISH);
1078     } else if (use_armv6_instructions) {
1079         tcg_out32(s, INSN_DMB_MCR);
1080     }
1081 }
1082 
tcg_out_cmp2(TCGContext * s,const TCGArg * args,const int * const_args)1083 static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1084                             const int *const_args)
1085 {
1086     TCGReg al = args[0];
1087     TCGReg ah = args[1];
1088     TCGArg bl = args[2];
1089     TCGArg bh = args[3];
1090     TCGCond cond = args[4];
1091     int const_bl = const_args[2];
1092     int const_bh = const_args[3];
1093 
1094     switch (cond) {
1095     case TCG_COND_EQ:
1096     case TCG_COND_NE:
1097     case TCG_COND_LTU:
1098     case TCG_COND_LEU:
1099     case TCG_COND_GTU:
1100     case TCG_COND_GEU:
1101         /* We perform a conditional comparision.  If the high half is
1102            equal, then overwrite the flags with the comparison of the
1103            low half.  The resulting flags cover the whole.  */
1104         tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1105         tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1106         return cond;
1107 
1108     case TCG_COND_LT:
1109     case TCG_COND_GE:
1110         /* We perform a double-word subtraction and examine the result.
1111            We do not actually need the result of the subtract, so the
1112            low part "subtract" is a compare.  For the high half we have
1113            no choice but to compute into a temporary.  */
1114         tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1115         tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1116                        TCG_REG_TMP, ah, bh, const_bh);
1117         return cond;
1118 
1119     case TCG_COND_LE:
1120     case TCG_COND_GT:
1121         /* Similar, but with swapped arguments, via reversed subtract.  */
1122         tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1123                        TCG_REG_TMP, al, bl, const_bl);
1124         tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1125                        TCG_REG_TMP, ah, bh, const_bh);
1126         return tcg_swap_cond(cond);
1127 
1128     default:
1129         g_assert_not_reached();
1130     }
1131 }
1132 
1133 #ifdef CONFIG_SOFTMMU
1134 #include "tcg-ldst.inc.c"
1135 
1136 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1137  *                                     int mmu_idx, uintptr_t ra)
1138  */
1139 static void * const qemu_ld_helpers[16] = {
1140     [MO_UB]   = helper_ret_ldub_mmu,
1141     [MO_SB]   = helper_ret_ldsb_mmu,
1142 
1143     [MO_LEUW] = helper_le_lduw_mmu,
1144     [MO_LEUL] = helper_le_ldul_mmu,
1145     [MO_LEQ]  = helper_le_ldq_mmu,
1146     [MO_LESW] = helper_le_ldsw_mmu,
1147     [MO_LESL] = helper_le_ldul_mmu,
1148 
1149     [MO_BEUW] = helper_be_lduw_mmu,
1150     [MO_BEUL] = helper_be_ldul_mmu,
1151     [MO_BEQ]  = helper_be_ldq_mmu,
1152     [MO_BESW] = helper_be_ldsw_mmu,
1153     [MO_BESL] = helper_be_ldul_mmu,
1154 };
1155 
1156 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1157  *                                     uintxx_t val, int mmu_idx, uintptr_t ra)
1158  */
1159 static void * const qemu_st_helpers[16] = {
1160     [MO_UB]   = helper_ret_stb_mmu,
1161     [MO_LEUW] = helper_le_stw_mmu,
1162     [MO_LEUL] = helper_le_stl_mmu,
1163     [MO_LEQ]  = helper_le_stq_mmu,
1164     [MO_BEUW] = helper_be_stw_mmu,
1165     [MO_BEUL] = helper_be_stl_mmu,
1166     [MO_BEQ]  = helper_be_stq_mmu,
1167 };
1168 
1169 /* Helper routines for marshalling helper function arguments into
1170  * the correct registers and stack.
1171  * argreg is where we want to put this argument, arg is the argument itself.
1172  * Return value is the updated argreg ready for the next call.
1173  * Note that argreg 0..3 is real registers, 4+ on stack.
1174  *
1175  * We provide routines for arguments which are: immediate, 32 bit
1176  * value in register, 16 and 8 bit values in register (which must be zero
1177  * extended before use) and 64 bit value in a lo:hi register pair.
1178  */
1179 #define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG)                \
1180 static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg)              \
1181 {                                                                          \
1182     if (argreg < 4) {                                                      \
1183         MOV_ARG(s, COND_AL, argreg, arg);                                  \
1184     } else {                                                               \
1185         int ofs = (argreg - 4) * 4;                                        \
1186         EXT_ARG;                                                           \
1187         tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE);            \
1188         tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs);         \
1189     }                                                                      \
1190     return argreg + 1;                                                     \
1191 }
1192 
1193 DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
1194     (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1195 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
1196     (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1197 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
1198     (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1199 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1200 
tcg_out_arg_reg64(TCGContext * s,TCGReg argreg,TCGReg arglo,TCGReg arghi)1201 static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1202                                 TCGReg arglo, TCGReg arghi)
1203 {
1204     /* 64 bit arguments must go in even/odd register pairs
1205      * and in 8-aligned stack slots.
1206      */
1207     if (argreg & 1) {
1208         argreg++;
1209     }
1210     if (use_armv6_instructions && argreg >= 4
1211         && (arglo & 1) == 0 && arghi == arglo + 1) {
1212         tcg_out_strd_8(s, COND_AL, arglo,
1213                        TCG_REG_CALL_STACK, (argreg - 4) * 4);
1214         return argreg + 2;
1215     } else {
1216         argreg = tcg_out_arg_reg32(s, argreg, arglo);
1217         argreg = tcg_out_arg_reg32(s, argreg, arghi);
1218         return argreg;
1219     }
1220 }
1221 
1222 #define TLB_SHIFT	(CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1223 
1224 /* We expect to use an 9-bit sign-magnitude negative offset from ENV.  */
1225 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1226 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256);
1227 
1228 /* These offsets are built into the LDRD below.  */
1229 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1230 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1231 
1232 /* Load and compare a TLB entry, leaving the flags set.  Returns the register
1233    containing the addend of the tlb entry.  Clobbers R0, R1, R2, TMP.  */
1234 
tcg_out_tlb_read(TCGContext * s,TCGReg addrlo,TCGReg addrhi,MemOp opc,int mem_index,bool is_load)1235 static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1236                                MemOp opc, int mem_index, bool is_load)
1237 {
1238     int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
1239                    : offsetof(CPUTLBEntry, addr_write));
1240     int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1241     int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1242     int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1243     unsigned s_bits = opc & MO_SIZE;
1244     unsigned a_bits = get_alignment_bits(opc);
1245 
1246     /*
1247      * We don't support inline unaligned acceses, but we can easily
1248      * support overalignment checks.
1249      */
1250     if (a_bits < s_bits) {
1251         a_bits = s_bits;
1252     }
1253 
1254     /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}.  */
1255     if (use_armv6_instructions) {
1256         tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1257     } else {
1258         tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off);
1259         tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off);
1260     }
1261 
1262     /* Extract the tlb index from the address into R0.  */
1263     tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
1264                     SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
1265 
1266     /*
1267      * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1268      * Load the tlb comparator into R2/R3 and the fast path addend into R1.
1269      */
1270     if (cmp_off == 0) {
1271         if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1272             tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1273         } else {
1274             tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1275         }
1276     } else {
1277         tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1278                         TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1279         if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1280             tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1281         } else {
1282             tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1283         }
1284     }
1285     if (!use_armv6_instructions && TARGET_LONG_BITS == 64) {
1286         tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4);
1287     }
1288 
1289     /* Load the tlb addend.  */
1290     tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1291                     offsetof(CPUTLBEntry, addend));
1292 
1293     /*
1294      * Check alignment, check comparators.
1295      * Do this in no more than 3 insns.  Use MOVW for v7, if possible,
1296      * to reduce the number of sequential conditional instructions.
1297      * Almost all guests have at least 4k pages, which means that we need
1298      * to clear at least 9 bits even for an 8-byte memory, which means it
1299      * isn't worth checking for an immediate operand for BIC.
1300      */
1301     if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
1302         tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1));
1303 
1304         tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask);
1305         tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1306                         addrlo, TCG_REG_TMP, 0);
1307         tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
1308     } else {
1309         if (a_bits) {
1310             tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo,
1311                             (1 << a_bits) - 1);
1312         }
1313         tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo,
1314                         SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1315         tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP,
1316                         0, TCG_REG_R2, TCG_REG_TMP,
1317                         SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1318     }
1319 
1320     if (TARGET_LONG_BITS == 64) {
1321         tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
1322     }
1323 
1324     return TCG_REG_R1;
1325 }
1326 
1327 /* Record the context of a call to the out of line helper code for the slow
1328    path for a load or store, so that we can later generate the correct
1329    helper code.  */
add_qemu_ldst_label(TCGContext * s,bool is_ld,TCGMemOpIdx oi,TCGReg datalo,TCGReg datahi,TCGReg addrlo,TCGReg addrhi,tcg_insn_unit * raddr,tcg_insn_unit * label_ptr)1330 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1331                                 TCGReg datalo, TCGReg datahi, TCGReg addrlo,
1332                                 TCGReg addrhi, tcg_insn_unit *raddr,
1333                                 tcg_insn_unit *label_ptr)
1334 {
1335     TCGLabelQemuLdst *label = new_ldst_label(s);
1336 
1337     label->is_ld = is_ld;
1338     label->oi = oi;
1339     label->datalo_reg = datalo;
1340     label->datahi_reg = datahi;
1341     label->addrlo_reg = addrlo;
1342     label->addrhi_reg = addrhi;
1343     label->raddr = raddr;
1344     label->label_ptr[0] = label_ptr;
1345 }
1346 
tcg_out_qemu_ld_slow_path(TCGContext * s,TCGLabelQemuLdst * lb)1347 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1348 {
1349     TCGReg argreg, datalo, datahi;
1350     TCGMemOpIdx oi = lb->oi;
1351     MemOp opc = get_memop(oi);
1352     void *func;
1353 
1354     if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
1355         return false;
1356     }
1357 
1358     argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1359     if (TARGET_LONG_BITS == 64) {
1360         argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1361     } else {
1362         argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1363     }
1364     argreg = tcg_out_arg_imm32(s, argreg, oi);
1365     argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1366 
1367     /* For armv6 we can use the canonical unsigned helpers and minimize
1368        icache usage.  For pre-armv6, use the signed helpers since we do
1369        not have a single insn sign-extend.  */
1370     if (use_armv6_instructions) {
1371         func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)];
1372     } else {
1373         func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)];
1374         if (opc & MO_SIGN) {
1375             opc = MO_UL;
1376         }
1377     }
1378     tcg_out_call(s, func);
1379 
1380     datalo = lb->datalo_reg;
1381     datahi = lb->datahi_reg;
1382     switch (opc & MO_SSIZE) {
1383     case MO_SB:
1384         tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0);
1385         break;
1386     case MO_SW:
1387         tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0);
1388         break;
1389     default:
1390         tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1391         break;
1392     case MO_Q:
1393         if (datalo != TCG_REG_R1) {
1394             tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1395             tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1396         } else if (datahi != TCG_REG_R0) {
1397             tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1398             tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1399         } else {
1400             tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
1401             tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1402             tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
1403         }
1404         break;
1405     }
1406 
1407     tcg_out_goto(s, COND_AL, lb->raddr);
1408     return true;
1409 }
1410 
tcg_out_qemu_st_slow_path(TCGContext * s,TCGLabelQemuLdst * lb)1411 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1412 {
1413     TCGReg argreg, datalo, datahi;
1414     TCGMemOpIdx oi = lb->oi;
1415     MemOp opc = get_memop(oi);
1416 
1417     if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
1418         return false;
1419     }
1420 
1421     argreg = TCG_REG_R0;
1422     argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1423     if (TARGET_LONG_BITS == 64) {
1424         argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1425     } else {
1426         argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1427     }
1428 
1429     datalo = lb->datalo_reg;
1430     datahi = lb->datahi_reg;
1431     switch (opc & MO_SIZE) {
1432     case MO_8:
1433         argreg = tcg_out_arg_reg8(s, argreg, datalo);
1434         break;
1435     case MO_16:
1436         argreg = tcg_out_arg_reg16(s, argreg, datalo);
1437         break;
1438     case MO_32:
1439     default:
1440         argreg = tcg_out_arg_reg32(s, argreg, datalo);
1441         break;
1442     case MO_64:
1443         argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi);
1444         break;
1445     }
1446 
1447     argreg = tcg_out_arg_imm32(s, argreg, oi);
1448     argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1449 
1450     /* Tail-call to the helper, which will return to the fast path.  */
1451     tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1452     return true;
1453 }
1454 #endif /* SOFTMMU */
1455 
tcg_out_qemu_ld_index(TCGContext * s,MemOp opc,TCGReg datalo,TCGReg datahi,TCGReg addrlo,TCGReg addend)1456 static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
1457                                          TCGReg datalo, TCGReg datahi,
1458                                          TCGReg addrlo, TCGReg addend)
1459 {
1460     MemOp bswap = opc & MO_BSWAP;
1461 
1462     switch (opc & MO_SSIZE) {
1463     case MO_UB:
1464         tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
1465         break;
1466     case MO_SB:
1467         tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
1468         break;
1469     case MO_UW:
1470         tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1471         if (bswap) {
1472             tcg_out_bswap16(s, COND_AL, datalo, datalo);
1473         }
1474         break;
1475     case MO_SW:
1476         if (bswap) {
1477             tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1478             tcg_out_bswap16s(s, COND_AL, datalo, datalo);
1479         } else {
1480             tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
1481         }
1482         break;
1483     case MO_UL:
1484     default:
1485         tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
1486         if (bswap) {
1487             tcg_out_bswap32(s, COND_AL, datalo, datalo);
1488         }
1489         break;
1490     case MO_Q:
1491         {
1492             TCGReg dl = (bswap ? datahi : datalo);
1493             TCGReg dh = (bswap ? datalo : datahi);
1494 
1495             /* Avoid ldrd for user-only emulation, to handle unaligned.  */
1496             if (USING_SOFTMMU && use_armv6_instructions
1497                 && (dl & 1) == 0 && dh == dl + 1) {
1498                 tcg_out_ldrd_r(s, COND_AL, dl, addrlo, addend);
1499             } else if (dl != addend) {
1500                 tcg_out_ld32_rwb(s, COND_AL, dl, addend, addrlo);
1501                 tcg_out_ld32_12(s, COND_AL, dh, addend, 4);
1502             } else {
1503                 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
1504                                 addend, addrlo, SHIFT_IMM_LSL(0));
1505                 tcg_out_ld32_12(s, COND_AL, dl, TCG_REG_TMP, 0);
1506                 tcg_out_ld32_12(s, COND_AL, dh, TCG_REG_TMP, 4);
1507             }
1508             if (bswap) {
1509                 tcg_out_bswap32(s, COND_AL, dl, dl);
1510                 tcg_out_bswap32(s, COND_AL, dh, dh);
1511             }
1512         }
1513         break;
1514     }
1515 }
1516 
tcg_out_qemu_ld_direct(TCGContext * s,MemOp opc,TCGReg datalo,TCGReg datahi,TCGReg addrlo)1517 static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
1518                                           TCGReg datalo, TCGReg datahi,
1519                                           TCGReg addrlo)
1520 {
1521     MemOp bswap = opc & MO_BSWAP;
1522 
1523     switch (opc & MO_SSIZE) {
1524     case MO_UB:
1525         tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
1526         break;
1527     case MO_SB:
1528         tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
1529         break;
1530     case MO_UW:
1531         tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1532         if (bswap) {
1533             tcg_out_bswap16(s, COND_AL, datalo, datalo);
1534         }
1535         break;
1536     case MO_SW:
1537         if (bswap) {
1538             tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1539             tcg_out_bswap16s(s, COND_AL, datalo, datalo);
1540         } else {
1541             tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
1542         }
1543         break;
1544     case MO_UL:
1545     default:
1546         tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1547         if (bswap) {
1548             tcg_out_bswap32(s, COND_AL, datalo, datalo);
1549         }
1550         break;
1551     case MO_Q:
1552         {
1553             TCGReg dl = (bswap ? datahi : datalo);
1554             TCGReg dh = (bswap ? datalo : datahi);
1555 
1556             /* Avoid ldrd for user-only emulation, to handle unaligned.  */
1557             if (USING_SOFTMMU && use_armv6_instructions
1558                 && (dl & 1) == 0 && dh == dl + 1) {
1559                 tcg_out_ldrd_8(s, COND_AL, dl, addrlo, 0);
1560             } else if (dl == addrlo) {
1561                 tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
1562                 tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
1563             } else {
1564                 tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
1565                 tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
1566             }
1567             if (bswap) {
1568                 tcg_out_bswap32(s, COND_AL, dl, dl);
1569                 tcg_out_bswap32(s, COND_AL, dh, dh);
1570             }
1571         }
1572         break;
1573     }
1574 }
1575 
tcg_out_qemu_ld(TCGContext * s,const TCGArg * args,bool is64)1576 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1577 {
1578     TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1579     TCGMemOpIdx oi;
1580     MemOp opc;
1581 #ifdef CONFIG_SOFTMMU
1582     int mem_index;
1583     TCGReg addend;
1584     tcg_insn_unit *label_ptr;
1585 #endif
1586 
1587     datalo = *args++;
1588     datahi = (is64 ? *args++ : 0);
1589     addrlo = *args++;
1590     addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1591     oi = *args++;
1592     opc = get_memop(oi);
1593 
1594 #ifdef CONFIG_SOFTMMU
1595     mem_index = get_mmuidx(oi);
1596     addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
1597 
1598     /* This a conditional BL only to load a pointer within this opcode into LR
1599        for the slow path.  We will not be using the value for a tail call.  */
1600     label_ptr = s->code_ptr;
1601     tcg_out_bl(s, COND_NE, 0);
1602 
1603     tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
1604 
1605     add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
1606                         s->code_ptr, label_ptr);
1607 #else /* !CONFIG_SOFTMMU */
1608     if (guest_base) {
1609         tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
1610         tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP);
1611     } else {
1612         tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
1613     }
1614 #endif
1615 }
1616 
tcg_out_qemu_st_index(TCGContext * s,int cond,MemOp opc,TCGReg datalo,TCGReg datahi,TCGReg addrlo,TCGReg addend)1617 static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
1618                                          TCGReg datalo, TCGReg datahi,
1619                                          TCGReg addrlo, TCGReg addend)
1620 {
1621     MemOp bswap = opc & MO_BSWAP;
1622 
1623     switch (opc & MO_SIZE) {
1624     case MO_8:
1625         tcg_out_st8_r(s, cond, datalo, addrlo, addend);
1626         break;
1627     case MO_16:
1628         if (bswap) {
1629             tcg_out_bswap16st(s, cond, TCG_REG_R0, datalo);
1630             tcg_out_st16_r(s, cond, TCG_REG_R0, addrlo, addend);
1631         } else {
1632             tcg_out_st16_r(s, cond, datalo, addrlo, addend);
1633         }
1634         break;
1635     case MO_32:
1636     default:
1637         if (bswap) {
1638             tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
1639             tcg_out_st32_r(s, cond, TCG_REG_R0, addrlo, addend);
1640         } else {
1641             tcg_out_st32_r(s, cond, datalo, addrlo, addend);
1642         }
1643         break;
1644     case MO_64:
1645         /* Avoid strd for user-only emulation, to handle unaligned.  */
1646         if (bswap) {
1647             tcg_out_bswap32(s, cond, TCG_REG_R0, datahi);
1648             tcg_out_st32_rwb(s, cond, TCG_REG_R0, addend, addrlo);
1649             tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
1650             tcg_out_st32_12(s, cond, TCG_REG_R0, addend, 4);
1651         } else if (USING_SOFTMMU && use_armv6_instructions
1652                    && (datalo & 1) == 0 && datahi == datalo + 1) {
1653             tcg_out_strd_r(s, cond, datalo, addrlo, addend);
1654         } else {
1655             tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
1656             tcg_out_st32_12(s, cond, datahi, addend, 4);
1657         }
1658         break;
1659     }
1660 }
1661 
tcg_out_qemu_st_direct(TCGContext * s,MemOp opc,TCGReg datalo,TCGReg datahi,TCGReg addrlo)1662 static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
1663                                           TCGReg datalo, TCGReg datahi,
1664                                           TCGReg addrlo)
1665 {
1666     MemOp bswap = opc & MO_BSWAP;
1667 
1668     switch (opc & MO_SIZE) {
1669     case MO_8:
1670         tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
1671         break;
1672     case MO_16:
1673         if (bswap) {
1674             tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, datalo);
1675             tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addrlo, 0);
1676         } else {
1677             tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
1678         }
1679         break;
1680     case MO_32:
1681     default:
1682         if (bswap) {
1683             tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
1684             tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
1685         } else {
1686             tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1687         }
1688         break;
1689     case MO_64:
1690         /* Avoid strd for user-only emulation, to handle unaligned.  */
1691         if (bswap) {
1692             tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datahi);
1693             tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
1694             tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
1695             tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 4);
1696         } else if (USING_SOFTMMU && use_armv6_instructions
1697                    && (datalo & 1) == 0 && datahi == datalo + 1) {
1698             tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
1699         } else {
1700             tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1701             tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
1702         }
1703         break;
1704     }
1705 }
1706 
tcg_out_qemu_st(TCGContext * s,const TCGArg * args,bool is64)1707 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1708 {
1709     TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1710     TCGMemOpIdx oi;
1711     MemOp opc;
1712 #ifdef CONFIG_SOFTMMU
1713     int mem_index;
1714     TCGReg addend;
1715     tcg_insn_unit *label_ptr;
1716 #endif
1717 
1718     datalo = *args++;
1719     datahi = (is64 ? *args++ : 0);
1720     addrlo = *args++;
1721     addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1722     oi = *args++;
1723     opc = get_memop(oi);
1724 
1725 #ifdef CONFIG_SOFTMMU
1726     mem_index = get_mmuidx(oi);
1727     addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
1728 
1729     tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);
1730 
1731     /* The conditional call must come last, as we're going to return here.  */
1732     label_ptr = s->code_ptr;
1733     tcg_out_bl(s, COND_NE, 0);
1734 
1735     add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
1736                         s->code_ptr, label_ptr);
1737 #else /* !CONFIG_SOFTMMU */
1738     if (guest_base) {
1739         tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
1740         tcg_out_qemu_st_index(s, COND_AL, opc, datalo,
1741                               datahi, addrlo, TCG_REG_TMP);
1742     } else {
1743         tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
1744     }
1745 #endif
1746 }
1747 
1748 static tcg_insn_unit *tb_ret_addr;
1749 
tcg_out_op(TCGContext * s,TCGOpcode opc,const TCGArg * args,const int * const_args)1750 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1751                 const TCGArg *args, const int *const_args)
1752 {
1753     TCGArg a0, a1, a2, a3, a4, a5;
1754     int c;
1755 
1756     switch (opc) {
1757     case INDEX_op_exit_tb:
1758         /* Reuse the zeroing that exists for goto_ptr.  */
1759         a0 = args[0];
1760         if (a0 == 0) {
1761             tcg_out_goto(s, COND_AL, s->code_gen_epilogue);
1762         } else {
1763             tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
1764             tcg_out_goto(s, COND_AL, tb_ret_addr);
1765         }
1766         break;
1767     case INDEX_op_goto_tb:
1768         {
1769             /* Indirect jump method */
1770             intptr_t ptr, dif, dil;
1771             TCGReg base = TCG_REG_PC;
1772 
1773             tcg_debug_assert(s->tb_jmp_insn_offset == 0);
1774             ptr = (intptr_t)(s->tb_jmp_target_addr + args[0]);
1775             dif = ptr - ((intptr_t)s->code_ptr + 8);
1776             dil = sextract32(dif, 0, 12);
1777             if (dif != dil) {
1778                 /* The TB is close, but outside the 12 bits addressable by
1779                    the load.  We can extend this to 20 bits with a sub of a
1780                    shifted immediate from pc.  In the vastly unlikely event
1781                    the code requires more than 1MB, we'll use 2 insns and
1782                    be no worse off.  */
1783                 base = TCG_REG_R0;
1784                 tcg_out_movi32(s, COND_AL, base, ptr - dil);
1785             }
1786             tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
1787             set_jmp_reset_offset(s, args[0]);
1788         }
1789         break;
1790     case INDEX_op_goto_ptr:
1791         tcg_out_bx(s, COND_AL, args[0]);
1792         break;
1793     case INDEX_op_br:
1794         tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
1795         break;
1796 
1797     case INDEX_op_ld8u_i32:
1798         tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1799         break;
1800     case INDEX_op_ld8s_i32:
1801         tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1802         break;
1803     case INDEX_op_ld16u_i32:
1804         tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1805         break;
1806     case INDEX_op_ld16s_i32:
1807         tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1808         break;
1809     case INDEX_op_ld_i32:
1810         tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1811         break;
1812     case INDEX_op_st8_i32:
1813         tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1814         break;
1815     case INDEX_op_st16_i32:
1816         tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1817         break;
1818     case INDEX_op_st_i32:
1819         tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1820         break;
1821 
1822     case INDEX_op_movcond_i32:
1823         /* Constraints mean that v2 is always in the same register as dest,
1824          * so we only need to do "if condition passed, move v1 to dest".
1825          */
1826         tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1827                         args[1], args[2], const_args[2]);
1828         tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
1829                         ARITH_MVN, args[0], 0, args[3], const_args[3]);
1830         break;
1831     case INDEX_op_add_i32:
1832         tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1833                         args[0], args[1], args[2], const_args[2]);
1834         break;
1835     case INDEX_op_sub_i32:
1836         if (const_args[1]) {
1837             if (const_args[2]) {
1838                 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1839             } else {
1840                 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1841                                args[0], args[2], args[1], 1);
1842             }
1843         } else {
1844             tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1845                             args[0], args[1], args[2], const_args[2]);
1846         }
1847         break;
1848     case INDEX_op_and_i32:
1849         tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1850                         args[0], args[1], args[2], const_args[2]);
1851         break;
1852     case INDEX_op_andc_i32:
1853         tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1854                         args[0], args[1], args[2], const_args[2]);
1855         break;
1856     case INDEX_op_or_i32:
1857         c = ARITH_ORR;
1858         goto gen_arith;
1859     case INDEX_op_xor_i32:
1860         c = ARITH_EOR;
1861         /* Fall through.  */
1862     gen_arith:
1863         tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
1864         break;
1865     case INDEX_op_add2_i32:
1866         a0 = args[0], a1 = args[1], a2 = args[2];
1867         a3 = args[3], a4 = args[4], a5 = args[5];
1868         if (a0 == a3 || (a0 == a5 && !const_args[5])) {
1869             a0 = TCG_REG_TMP;
1870         }
1871         tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1872                         a0, a2, a4, const_args[4]);
1873         tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1874                         a1, a3, a5, const_args[5]);
1875         tcg_out_mov_reg(s, COND_AL, args[0], a0);
1876         break;
1877     case INDEX_op_sub2_i32:
1878         a0 = args[0], a1 = args[1], a2 = args[2];
1879         a3 = args[3], a4 = args[4], a5 = args[5];
1880         if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
1881             a0 = TCG_REG_TMP;
1882         }
1883         if (const_args[2]) {
1884             if (const_args[4]) {
1885                 tcg_out_movi32(s, COND_AL, a0, a4);
1886                 a4 = a0;
1887             }
1888             tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1889         } else {
1890             tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1891                             ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1892         }
1893         if (const_args[3]) {
1894             if (const_args[5]) {
1895                 tcg_out_movi32(s, COND_AL, a1, a5);
1896                 a5 = a1;
1897             }
1898             tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1899         } else {
1900             tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1901                             a1, a3, a5, const_args[5]);
1902         }
1903         tcg_out_mov_reg(s, COND_AL, args[0], a0);
1904         break;
1905     case INDEX_op_neg_i32:
1906         tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1907         break;
1908     case INDEX_op_not_i32:
1909         tcg_out_dat_reg(s, COND_AL,
1910                         ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1911         break;
1912     case INDEX_op_mul_i32:
1913         tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1914         break;
1915     case INDEX_op_mulu2_i32:
1916         tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1917         break;
1918     case INDEX_op_muls2_i32:
1919         tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1920         break;
1921     /* XXX: Perhaps args[2] & 0x1f is wrong */
1922     case INDEX_op_shl_i32:
1923         c = const_args[2] ?
1924                 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1925         goto gen_shift32;
1926     case INDEX_op_shr_i32:
1927         c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1928                 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1929         goto gen_shift32;
1930     case INDEX_op_sar_i32:
1931         c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1932                 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1933         goto gen_shift32;
1934     case INDEX_op_rotr_i32:
1935         c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1936                 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1937         /* Fall through.  */
1938     gen_shift32:
1939         tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1940         break;
1941 
1942     case INDEX_op_rotl_i32:
1943         if (const_args[2]) {
1944             tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1945                             ((0x20 - args[2]) & 0x1f) ?
1946                             SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1947                             SHIFT_IMM_LSL(0));
1948         } else {
1949             tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
1950             tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1951                             SHIFT_REG_ROR(TCG_REG_TMP));
1952         }
1953         break;
1954 
1955     case INDEX_op_ctz_i32:
1956         tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
1957         a1 = TCG_REG_TMP;
1958         goto do_clz;
1959 
1960     case INDEX_op_clz_i32:
1961         a1 = args[1];
1962     do_clz:
1963         a0 = args[0];
1964         a2 = args[2];
1965         c = const_args[2];
1966         if (c && a2 == 32) {
1967             tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
1968             break;
1969         }
1970         tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
1971         tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
1972         if (c || a0 != a2) {
1973             tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
1974         }
1975         break;
1976 
1977     case INDEX_op_brcond_i32:
1978         tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1979                        args[0], args[1], const_args[1]);
1980         tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
1981                            arg_label(args[3]));
1982         break;
1983     case INDEX_op_setcond_i32:
1984         tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1985                         args[1], args[2], const_args[2]);
1986         tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1987                         ARITH_MOV, args[0], 0, 1);
1988         tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1989                         ARITH_MOV, args[0], 0, 0);
1990         break;
1991 
1992     case INDEX_op_brcond2_i32:
1993         c = tcg_out_cmp2(s, args, const_args);
1994         tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
1995         break;
1996     case INDEX_op_setcond2_i32:
1997         c = tcg_out_cmp2(s, args + 1, const_args + 1);
1998         tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
1999         tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2000                         ARITH_MOV, args[0], 0, 0);
2001         break;
2002 
2003     case INDEX_op_qemu_ld_i32:
2004         tcg_out_qemu_ld(s, args, 0);
2005         break;
2006     case INDEX_op_qemu_ld_i64:
2007         tcg_out_qemu_ld(s, args, 1);
2008         break;
2009     case INDEX_op_qemu_st_i32:
2010         tcg_out_qemu_st(s, args, 0);
2011         break;
2012     case INDEX_op_qemu_st_i64:
2013         tcg_out_qemu_st(s, args, 1);
2014         break;
2015 
2016     case INDEX_op_bswap16_i32:
2017         tcg_out_bswap16(s, COND_AL, args[0], args[1]);
2018         break;
2019     case INDEX_op_bswap32_i32:
2020         tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2021         break;
2022 
2023     case INDEX_op_ext8s_i32:
2024         tcg_out_ext8s(s, COND_AL, args[0], args[1]);
2025         break;
2026     case INDEX_op_ext16s_i32:
2027         tcg_out_ext16s(s, COND_AL, args[0], args[1]);
2028         break;
2029     case INDEX_op_ext16u_i32:
2030         tcg_out_ext16u(s, COND_AL, args[0], args[1]);
2031         break;
2032 
2033     case INDEX_op_deposit_i32:
2034         tcg_out_deposit(s, COND_AL, args[0], args[2],
2035                         args[3], args[4], const_args[2]);
2036         break;
2037     case INDEX_op_extract_i32:
2038         tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2039         break;
2040     case INDEX_op_sextract_i32:
2041         tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2042         break;
2043     case INDEX_op_extract2_i32:
2044         /* ??? These optimization vs zero should be generic.  */
2045         /* ??? But we can't substitute 2 for 1 in the opcode stream yet.  */
2046         if (const_args[1]) {
2047             if (const_args[2]) {
2048                 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2049             } else {
2050                 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2051                                 args[2], SHIFT_IMM_LSL(32 - args[3]));
2052             }
2053         } else if (const_args[2]) {
2054             tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2055                             args[1], SHIFT_IMM_LSR(args[3]));
2056         } else {
2057             /* We can do extract2 in 2 insns, vs the 3 required otherwise.  */
2058             tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2059                             args[2], SHIFT_IMM_LSL(32 - args[3]));
2060             tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2061                             args[1], SHIFT_IMM_LSR(args[3]));
2062         }
2063         break;
2064 
2065     case INDEX_op_div_i32:
2066         tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2067         break;
2068     case INDEX_op_divu_i32:
2069         tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2070         break;
2071 
2072     case INDEX_op_mb:
2073         tcg_out_mb(s, args[0]);
2074         break;
2075 
2076     case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
2077     case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi.  */
2078     case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2079     default:
2080         tcg_abort();
2081     }
2082 }
2083 
tcg_target_op_def(TCGOpcode op)2084 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
2085 {
2086     static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
2087     static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
2088     static const TCGTargetOpDef s_s = { .args_ct_str = { "s", "s" } };
2089     static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } };
2090     static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } };
2091     static const TCGTargetOpDef r_r_l = { .args_ct_str = { "r", "r", "l" } };
2092     static const TCGTargetOpDef r_l_l = { .args_ct_str = { "r", "l", "l" } };
2093     static const TCGTargetOpDef s_s_s = { .args_ct_str = { "s", "s", "s" } };
2094     static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
2095     static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } };
2096     static const TCGTargetOpDef r_r_rIN
2097         = { .args_ct_str = { "r", "r", "rIN" } };
2098     static const TCGTargetOpDef r_r_rIK
2099         = { .args_ct_str = { "r", "r", "rIK" } };
2100     static const TCGTargetOpDef r_r_r_r
2101         = { .args_ct_str = { "r", "r", "r", "r" } };
2102     static const TCGTargetOpDef r_r_l_l
2103         = { .args_ct_str = { "r", "r", "l", "l" } };
2104     static const TCGTargetOpDef s_s_s_s
2105         = { .args_ct_str = { "s", "s", "s", "s" } };
2106     static const TCGTargetOpDef br
2107         = { .args_ct_str = { "r", "rIN" } };
2108     static const TCGTargetOpDef ext2
2109         = { .args_ct_str = { "r", "rZ", "rZ" } };
2110     static const TCGTargetOpDef dep
2111         = { .args_ct_str = { "r", "0", "rZ" } };
2112     static const TCGTargetOpDef movc
2113         = { .args_ct_str = { "r", "r", "rIN", "rIK", "0" } };
2114     static const TCGTargetOpDef add2
2115         = { .args_ct_str = { "r", "r", "r", "r", "rIN", "rIK" } };
2116     static const TCGTargetOpDef sub2
2117         = { .args_ct_str = { "r", "r", "rI", "rI", "rIN", "rIK" } };
2118     static const TCGTargetOpDef br2
2119         = { .args_ct_str = { "r", "r", "rI", "rI" } };
2120     static const TCGTargetOpDef setc2
2121         = { .args_ct_str = { "r", "r", "r", "rI", "rI" } };
2122 
2123     switch (op) {
2124     case INDEX_op_goto_ptr:
2125         return &r;
2126 
2127     case INDEX_op_ld8u_i32:
2128     case INDEX_op_ld8s_i32:
2129     case INDEX_op_ld16u_i32:
2130     case INDEX_op_ld16s_i32:
2131     case INDEX_op_ld_i32:
2132     case INDEX_op_st8_i32:
2133     case INDEX_op_st16_i32:
2134     case INDEX_op_st_i32:
2135     case INDEX_op_neg_i32:
2136     case INDEX_op_not_i32:
2137     case INDEX_op_bswap16_i32:
2138     case INDEX_op_bswap32_i32:
2139     case INDEX_op_ext8s_i32:
2140     case INDEX_op_ext16s_i32:
2141     case INDEX_op_ext16u_i32:
2142     case INDEX_op_extract_i32:
2143     case INDEX_op_sextract_i32:
2144         return &r_r;
2145 
2146     case INDEX_op_add_i32:
2147     case INDEX_op_sub_i32:
2148     case INDEX_op_setcond_i32:
2149         return &r_r_rIN;
2150     case INDEX_op_and_i32:
2151     case INDEX_op_andc_i32:
2152     case INDEX_op_clz_i32:
2153     case INDEX_op_ctz_i32:
2154         return &r_r_rIK;
2155     case INDEX_op_mul_i32:
2156     case INDEX_op_div_i32:
2157     case INDEX_op_divu_i32:
2158         return &r_r_r;
2159     case INDEX_op_mulu2_i32:
2160     case INDEX_op_muls2_i32:
2161         return &r_r_r_r;
2162     case INDEX_op_or_i32:
2163     case INDEX_op_xor_i32:
2164         return &r_r_rI;
2165     case INDEX_op_shl_i32:
2166     case INDEX_op_shr_i32:
2167     case INDEX_op_sar_i32:
2168     case INDEX_op_rotl_i32:
2169     case INDEX_op_rotr_i32:
2170         return &r_r_ri;
2171 
2172     case INDEX_op_brcond_i32:
2173         return &br;
2174     case INDEX_op_deposit_i32:
2175         return &dep;
2176     case INDEX_op_extract2_i32:
2177         return &ext2;
2178     case INDEX_op_movcond_i32:
2179         return &movc;
2180     case INDEX_op_add2_i32:
2181         return &add2;
2182     case INDEX_op_sub2_i32:
2183         return &sub2;
2184     case INDEX_op_brcond2_i32:
2185         return &br2;
2186     case INDEX_op_setcond2_i32:
2187         return &setc2;
2188 
2189     case INDEX_op_qemu_ld_i32:
2190         return TARGET_LONG_BITS == 32 ? &r_l : &r_l_l;
2191     case INDEX_op_qemu_ld_i64:
2192         return TARGET_LONG_BITS == 32 ? &r_r_l : &r_r_l_l;
2193     case INDEX_op_qemu_st_i32:
2194         return TARGET_LONG_BITS == 32 ? &s_s : &s_s_s;
2195     case INDEX_op_qemu_st_i64:
2196         return TARGET_LONG_BITS == 32 ? &s_s_s : &s_s_s_s;
2197 
2198     default:
2199         return NULL;
2200     }
2201 }
2202 
tcg_target_init(TCGContext * s)2203 static void tcg_target_init(TCGContext *s)
2204 {
2205     /* Only probe for the platform and capabilities if we havn't already
2206        determined maximum values at compile time.  */
2207 #ifndef use_idiv_instructions
2208     {
2209         unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2210         use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2211     }
2212 #endif
2213     if (__ARM_ARCH < 7) {
2214         const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2215         if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2216             arm_arch = pl[1] - '0';
2217         }
2218     }
2219 
2220     tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
2221 
2222     tcg_target_call_clobber_regs = 0;
2223     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2224     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2225     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2226     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2227     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2228     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2229 
2230     s->reserved_regs = 0;
2231     tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2232     tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2233     tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2234 }
2235 
tcg_out_ld(TCGContext * s,TCGType type,TCGReg arg,TCGReg arg1,intptr_t arg2)2236 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2237                               TCGReg arg1, intptr_t arg2)
2238 {
2239     tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2240 }
2241 
tcg_out_st(TCGContext * s,TCGType type,TCGReg arg,TCGReg arg1,intptr_t arg2)2242 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2243                               TCGReg arg1, intptr_t arg2)
2244 {
2245     tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2246 }
2247 
tcg_out_sti(TCGContext * s,TCGType type,TCGArg val,TCGReg base,intptr_t ofs)2248 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2249                                TCGReg base, intptr_t ofs)
2250 {
2251     return false;
2252 }
2253 
tcg_out_mov(TCGContext * s,TCGType type,TCGReg ret,TCGReg arg)2254 static inline bool tcg_out_mov(TCGContext *s, TCGType type,
2255                                TCGReg ret, TCGReg arg)
2256 {
2257     tcg_out_mov_reg(s, COND_AL, ret, arg);
2258     return true;
2259 }
2260 
tcg_out_movi(TCGContext * s,TCGType type,TCGReg ret,tcg_target_long arg)2261 static inline void tcg_out_movi(TCGContext *s, TCGType type,
2262                                 TCGReg ret, tcg_target_long arg)
2263 {
2264     tcg_out_movi32(s, COND_AL, ret, arg);
2265 }
2266 
tcg_out_nop_fill(tcg_insn_unit * p,int count)2267 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2268 {
2269     int i;
2270     for (i = 0; i < count; ++i) {
2271         p[i] = INSN_NOP;
2272     }
2273 }
2274 
2275 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2276    and tcg_register_jit.  */
2277 
2278 #define PUSH_SIZE  ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
2279 
2280 #define FRAME_SIZE \
2281     ((PUSH_SIZE \
2282       + TCG_STATIC_CALL_ARGS_SIZE \
2283       + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2284       + TCG_TARGET_STACK_ALIGN - 1) \
2285      & -TCG_TARGET_STACK_ALIGN)
2286 
tcg_target_qemu_prologue(TCGContext * s)2287 static void tcg_target_qemu_prologue(TCGContext *s)
2288 {
2289     int stack_addend;
2290 
2291     /* Calling convention requires us to save r4-r11 and lr.  */
2292     /* stmdb sp!, { r4 - r11, lr } */
2293     tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
2294 
2295     /* Reserve callee argument and tcg temp space.  */
2296     stack_addend = FRAME_SIZE - PUSH_SIZE;
2297 
2298     tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2299                    TCG_REG_CALL_STACK, stack_addend, 1);
2300     tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2301                   CPU_TEMP_BUF_NLONGS * sizeof(long));
2302 
2303     tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2304 
2305     tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
2306 
2307     /*
2308      * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2309      * and fall through to the rest of the epilogue.
2310      */
2311     s->code_gen_epilogue = s->code_ptr;
2312     tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
2313 
2314     /* TB epilogue */
2315     tb_ret_addr = s->code_ptr;
2316     tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
2317                    TCG_REG_CALL_STACK, stack_addend, 1);
2318 
2319     /* ldmia sp!, { r4 - r11, pc } */
2320     tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
2321 }
2322 
2323 typedef struct {
2324     DebugFrameHeader h;
2325     uint8_t fde_def_cfa[4];
2326     uint8_t fde_reg_ofs[18];
2327 } DebugFrame;
2328 
2329 #define ELF_HOST_MACHINE EM_ARM
2330 
2331 /* We're expecting a 2 byte uleb128 encoded value.  */
2332 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2333 
2334 static const DebugFrame debug_frame = {
2335     .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2336     .h.cie.id = -1,
2337     .h.cie.version = 1,
2338     .h.cie.code_align = 1,
2339     .h.cie.data_align = 0x7c,             /* sleb128 -4 */
2340     .h.cie.return_column = 14,
2341 
2342     /* Total FDE size does not include the "len" member.  */
2343     .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2344 
2345     .fde_def_cfa = {
2346         12, 13,                         /* DW_CFA_def_cfa sp, ... */
2347         (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
2348         (FRAME_SIZE >> 7)
2349     },
2350     .fde_reg_ofs = {
2351         /* The following must match the stmdb in the prologue.  */
2352         0x8e, 1,                        /* DW_CFA_offset, lr, -4 */
2353         0x8b, 2,                        /* DW_CFA_offset, r11, -8 */
2354         0x8a, 3,                        /* DW_CFA_offset, r10, -12 */
2355         0x89, 4,                        /* DW_CFA_offset, r9, -16 */
2356         0x88, 5,                        /* DW_CFA_offset, r8, -20 */
2357         0x87, 6,                        /* DW_CFA_offset, r7, -24 */
2358         0x86, 7,                        /* DW_CFA_offset, r6, -28 */
2359         0x85, 8,                        /* DW_CFA_offset, r5, -32 */
2360         0x84, 9,                        /* DW_CFA_offset, r4, -36 */
2361     }
2362 };
2363 
tcg_register_jit(void * buf,size_t buf_size)2364 void tcg_register_jit(void *buf, size_t buf_size)
2365 {
2366     tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2367 }
2368