xref: /qemu/tcg/sparc64/tcg-target.c.inc (revision e4418354)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* We only support generating code for 64-bit mode.  */
26#ifndef __arch64__
27#error "unsupported code generation mode"
28#endif
29
30#include "../tcg-pool.c.inc"
31
32#ifdef CONFIG_DEBUG_TCG
33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
34    "%g0",
35    "%g1",
36    "%g2",
37    "%g3",
38    "%g4",
39    "%g5",
40    "%g6",
41    "%g7",
42    "%o0",
43    "%o1",
44    "%o2",
45    "%o3",
46    "%o4",
47    "%o5",
48    "%o6",
49    "%o7",
50    "%l0",
51    "%l1",
52    "%l2",
53    "%l3",
54    "%l4",
55    "%l5",
56    "%l6",
57    "%l7",
58    "%i0",
59    "%i1",
60    "%i2",
61    "%i3",
62    "%i4",
63    "%i5",
64    "%i6",
65    "%i7",
66};
67#endif
68
69#define TCG_CT_CONST_S11  0x100
70#define TCG_CT_CONST_S13  0x200
71#define TCG_CT_CONST_ZERO 0x400
72
73/*
74 * For softmmu, we need to avoid conflicts with the first 3
75 * argument registers to perform the tlb lookup, and to call
76 * the helper function.
77 */
78#ifdef CONFIG_SOFTMMU
79#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
80#else
81#define SOFTMMU_RESERVE_REGS 0
82#endif
83#define ALL_GENERAL_REGS     MAKE_64BIT_MASK(0, 32)
84#define ALL_QLDST_REGS       (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
85
86/* Define some temporary registers.  T2 is used for constant generation.  */
87#define TCG_REG_T1  TCG_REG_G1
88#define TCG_REG_T2  TCG_REG_O7
89
90#ifndef CONFIG_SOFTMMU
91# define TCG_GUEST_BASE_REG TCG_REG_I5
92#endif
93
94#define TCG_REG_TB  TCG_REG_I1
95#define USE_REG_TB  (sizeof(void *) > 4)
96
97static const int tcg_target_reg_alloc_order[] = {
98    TCG_REG_L0,
99    TCG_REG_L1,
100    TCG_REG_L2,
101    TCG_REG_L3,
102    TCG_REG_L4,
103    TCG_REG_L5,
104    TCG_REG_L6,
105    TCG_REG_L7,
106
107    TCG_REG_I0,
108    TCG_REG_I1,
109    TCG_REG_I2,
110    TCG_REG_I3,
111    TCG_REG_I4,
112    TCG_REG_I5,
113
114    TCG_REG_G2,
115    TCG_REG_G3,
116    TCG_REG_G4,
117    TCG_REG_G5,
118
119    TCG_REG_O0,
120    TCG_REG_O1,
121    TCG_REG_O2,
122    TCG_REG_O3,
123    TCG_REG_O4,
124    TCG_REG_O5,
125};
126
127static const int tcg_target_call_iarg_regs[6] = {
128    TCG_REG_O0,
129    TCG_REG_O1,
130    TCG_REG_O2,
131    TCG_REG_O3,
132    TCG_REG_O4,
133    TCG_REG_O5,
134};
135
136static const int tcg_target_call_oarg_regs[] = {
137    TCG_REG_O0,
138    TCG_REG_O1,
139    TCG_REG_O2,
140    TCG_REG_O3,
141};
142
143#define INSN_OP(x)  ((x) << 30)
144#define INSN_OP2(x) ((x) << 22)
145#define INSN_OP3(x) ((x) << 19)
146#define INSN_OPF(x) ((x) << 5)
147#define INSN_RD(x)  ((x) << 25)
148#define INSN_RS1(x) ((x) << 14)
149#define INSN_RS2(x) (x)
150#define INSN_ASI(x) ((x) << 5)
151
152#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
153#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
154#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
155#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
156#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
157#define INSN_COND(x) ((x) << 25)
158
159#define COND_N     0x0
160#define COND_E     0x1
161#define COND_LE    0x2
162#define COND_L     0x3
163#define COND_LEU   0x4
164#define COND_CS    0x5
165#define COND_NEG   0x6
166#define COND_VS    0x7
167#define COND_A     0x8
168#define COND_NE    0x9
169#define COND_G     0xa
170#define COND_GE    0xb
171#define COND_GU    0xc
172#define COND_CC    0xd
173#define COND_POS   0xe
174#define COND_VC    0xf
175#define BA         (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
176
177#define RCOND_Z    1
178#define RCOND_LEZ  2
179#define RCOND_LZ   3
180#define RCOND_NZ   5
181#define RCOND_GZ   6
182#define RCOND_GEZ  7
183
184#define MOVCC_ICC  (1 << 18)
185#define MOVCC_XCC  (1 << 18 | 1 << 12)
186
187#define BPCC_ICC   0
188#define BPCC_XCC   (2 << 20)
189#define BPCC_PT    (1 << 19)
190#define BPCC_PN    0
191#define BPCC_A     (1 << 29)
192
193#define BPR_PT     BPCC_PT
194
195#define ARITH_ADD  (INSN_OP(2) | INSN_OP3(0x00))
196#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
197#define ARITH_AND  (INSN_OP(2) | INSN_OP3(0x01))
198#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
199#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
200#define ARITH_OR   (INSN_OP(2) | INSN_OP3(0x02))
201#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
202#define ARITH_ORN  (INSN_OP(2) | INSN_OP3(0x06))
203#define ARITH_XOR  (INSN_OP(2) | INSN_OP3(0x03))
204#define ARITH_SUB  (INSN_OP(2) | INSN_OP3(0x04))
205#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
206#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
207#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
208#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
209#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
210#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
211#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
212#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
213#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
214#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
215#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
216#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
217
218#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
219#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
220
221#define SHIFT_SLL  (INSN_OP(2) | INSN_OP3(0x25))
222#define SHIFT_SRL  (INSN_OP(2) | INSN_OP3(0x26))
223#define SHIFT_SRA  (INSN_OP(2) | INSN_OP3(0x27))
224
225#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
226#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
227#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
228
229#define RDY        (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
230#define WRY        (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
231#define JMPL       (INSN_OP(2) | INSN_OP3(0x38))
232#define RETURN     (INSN_OP(2) | INSN_OP3(0x39))
233#define SAVE       (INSN_OP(2) | INSN_OP3(0x3c))
234#define RESTORE    (INSN_OP(2) | INSN_OP3(0x3d))
235#define SETHI      (INSN_OP(0) | INSN_OP2(0x4))
236#define CALL       INSN_OP(1)
237#define LDUB       (INSN_OP(3) | INSN_OP3(0x01))
238#define LDSB       (INSN_OP(3) | INSN_OP3(0x09))
239#define LDUH       (INSN_OP(3) | INSN_OP3(0x02))
240#define LDSH       (INSN_OP(3) | INSN_OP3(0x0a))
241#define LDUW       (INSN_OP(3) | INSN_OP3(0x00))
242#define LDSW       (INSN_OP(3) | INSN_OP3(0x08))
243#define LDX        (INSN_OP(3) | INSN_OP3(0x0b))
244#define STB        (INSN_OP(3) | INSN_OP3(0x05))
245#define STH        (INSN_OP(3) | INSN_OP3(0x06))
246#define STW        (INSN_OP(3) | INSN_OP3(0x04))
247#define STX        (INSN_OP(3) | INSN_OP3(0x0e))
248#define LDUBA      (INSN_OP(3) | INSN_OP3(0x11))
249#define LDSBA      (INSN_OP(3) | INSN_OP3(0x19))
250#define LDUHA      (INSN_OP(3) | INSN_OP3(0x12))
251#define LDSHA      (INSN_OP(3) | INSN_OP3(0x1a))
252#define LDUWA      (INSN_OP(3) | INSN_OP3(0x10))
253#define LDSWA      (INSN_OP(3) | INSN_OP3(0x18))
254#define LDXA       (INSN_OP(3) | INSN_OP3(0x1b))
255#define STBA       (INSN_OP(3) | INSN_OP3(0x15))
256#define STHA       (INSN_OP(3) | INSN_OP3(0x16))
257#define STWA       (INSN_OP(3) | INSN_OP3(0x14))
258#define STXA       (INSN_OP(3) | INSN_OP3(0x1e))
259
260#define MEMBAR     (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
261
262#define NOP        (SETHI | INSN_RD(TCG_REG_G0) | 0)
263
264#ifndef ASI_PRIMARY_LITTLE
265#define ASI_PRIMARY_LITTLE 0x88
266#endif
267
268#define LDUH_LE    (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
269#define LDSH_LE    (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define LDUW_LE    (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define LDSW_LE    (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
272#define LDX_LE     (LDXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
273
274#define STH_LE     (STHA  | INSN_ASI(ASI_PRIMARY_LITTLE))
275#define STW_LE     (STWA  | INSN_ASI(ASI_PRIMARY_LITTLE))
276#define STX_LE     (STXA  | INSN_ASI(ASI_PRIMARY_LITTLE))
277
278#ifndef use_vis3_instructions
279bool use_vis3_instructions;
280#endif
281
282static bool check_fit_i64(int64_t val, unsigned int bits)
283{
284    return val == sextract64(val, 0, bits);
285}
286
287static bool check_fit_i32(int32_t val, unsigned int bits)
288{
289    return val == sextract32(val, 0, bits);
290}
291
292#define check_fit_tl    check_fit_i64
293#define check_fit_ptr   check_fit_i64
294
295static bool patch_reloc(tcg_insn_unit *src_rw, int type,
296                        intptr_t value, intptr_t addend)
297{
298    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
299    uint32_t insn = *src_rw;
300    intptr_t pcrel;
301
302    value += addend;
303    pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
304
305    switch (type) {
306    case R_SPARC_WDISP16:
307        if (!check_fit_ptr(pcrel >> 2, 16)) {
308            return false;
309        }
310        insn &= ~INSN_OFF16(-1);
311        insn |= INSN_OFF16(pcrel);
312        break;
313    case R_SPARC_WDISP19:
314        if (!check_fit_ptr(pcrel >> 2, 19)) {
315            return false;
316        }
317        insn &= ~INSN_OFF19(-1);
318        insn |= INSN_OFF19(pcrel);
319        break;
320    case R_SPARC_13:
321        if (!check_fit_ptr(value, 13)) {
322            return false;
323        }
324        insn &= ~INSN_IMM13(-1);
325        insn |= INSN_IMM13(value);
326        break;
327    default:
328        g_assert_not_reached();
329    }
330
331    *src_rw = insn;
332    return true;
333}
334
335/* test if a constant matches the constraint */
336static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
337{
338    if (ct & TCG_CT_CONST) {
339        return 1;
340    }
341
342    if (type == TCG_TYPE_I32) {
343        val = (int32_t)val;
344    }
345
346    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
347        return 1;
348    } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
349        return 1;
350    } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
351        return 1;
352    } else {
353        return 0;
354    }
355}
356
357static void tcg_out_nop(TCGContext *s)
358{
359    tcg_out32(s, NOP);
360}
361
362static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
363                          TCGReg rs2, int op)
364{
365    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
366}
367
368static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
369                           int32_t offset, int op)
370{
371    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
372}
373
374static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
375			   int32_t val2, int val2const, int op)
376{
377    tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
378              | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
379}
380
381static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
382{
383    if (ret != arg) {
384        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
385    }
386    return true;
387}
388
389static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
390{
391    if (ret != arg) {
392        tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
393    } else {
394        tcg_out_nop(s);
395    }
396}
397
398static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
399{
400    tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
401}
402
403static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
404{
405    tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
406}
407
408static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
409{
410    if (check_fit_i32(arg, 13)) {
411        /* A 13-bit constant sign-extended to 64-bits.  */
412        tcg_out_movi_imm13(s, ret, arg);
413    } else {
414        /* A 32-bit constant zero-extended to 64 bits.  */
415        tcg_out_sethi(s, ret, arg);
416        if (arg & 0x3ff) {
417            tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
418        }
419    }
420}
421
422static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
423                             tcg_target_long arg, bool in_prologue,
424                             TCGReg scratch)
425{
426    tcg_target_long hi, lo = (int32_t)arg;
427    tcg_target_long test, lsb;
428
429    /* A 32-bit constant, or 32-bit zero-extended to 64-bits.  */
430    if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
431        tcg_out_movi_imm32(s, ret, arg);
432        return;
433    }
434
435    /* A 13-bit constant sign-extended to 64-bits.  */
436    if (check_fit_tl(arg, 13)) {
437        tcg_out_movi_imm13(s, ret, arg);
438        return;
439    }
440
441    /* A 13-bit constant relative to the TB.  */
442    if (!in_prologue && USE_REG_TB) {
443        test = tcg_tbrel_diff(s, (void *)arg);
444        if (check_fit_ptr(test, 13)) {
445            tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
446            return;
447        }
448    }
449
450    /* A 32-bit constant sign-extended to 64-bits.  */
451    if (arg == lo) {
452        tcg_out_sethi(s, ret, ~arg);
453        tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
454        return;
455    }
456
457    /* A 32-bit constant, shifted.  */
458    lsb = ctz64(arg);
459    test = (tcg_target_long)arg >> lsb;
460    if (lsb > 10 && test == extract64(test, 0, 21)) {
461        tcg_out_sethi(s, ret, test << 10);
462        tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
463        return;
464    } else if (test == (uint32_t)test || test == (int32_t)test) {
465        tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
466        tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
467        return;
468    }
469
470    /* Use the constant pool, if possible. */
471    if (!in_prologue && USE_REG_TB) {
472        new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
473                       tcg_tbrel_diff(s, NULL));
474        tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
475        return;
476    }
477
478    /* A 64-bit constant decomposed into 2 32-bit pieces.  */
479    if (check_fit_i32(lo, 13)) {
480        hi = (arg - lo) >> 32;
481        tcg_out_movi_imm32(s, ret, hi);
482        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
483        tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
484    } else {
485        hi = arg >> 32;
486        tcg_out_movi_imm32(s, ret, hi);
487        tcg_out_movi_imm32(s, scratch, lo);
488        tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
489        tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
490    }
491}
492
493static void tcg_out_movi(TCGContext *s, TCGType type,
494                         TCGReg ret, tcg_target_long arg)
495{
496    tcg_debug_assert(ret != TCG_REG_T2);
497    tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
498}
499
500static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
501                            TCGReg a2, int op)
502{
503    tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
504}
505
506static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
507                         intptr_t offset, int op)
508{
509    if (check_fit_ptr(offset, 13)) {
510        tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
511                  INSN_IMM13(offset));
512    } else {
513        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
514        tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
515    }
516}
517
518static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
519                       TCGReg arg1, intptr_t arg2)
520{
521    tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
522}
523
524static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
525                       TCGReg arg1, intptr_t arg2)
526{
527    tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
528}
529
530static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
531                        TCGReg base, intptr_t ofs)
532{
533    if (val == 0) {
534        tcg_out_st(s, type, TCG_REG_G0, base, ofs);
535        return true;
536    }
537    return false;
538}
539
540static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
541{
542    intptr_t diff = tcg_tbrel_diff(s, arg);
543    if (USE_REG_TB && check_fit_ptr(diff, 13)) {
544        tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
545        return;
546    }
547    tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
548    tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
549}
550
551static void tcg_out_sety(TCGContext *s, TCGReg rs)
552{
553    tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
554}
555
556static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
557                          int32_t val2, int val2const, int uns)
558{
559    /* Load Y with the sign/zero extension of RS1 to 64-bits.  */
560    if (uns) {
561        tcg_out_sety(s, TCG_REG_G0);
562    } else {
563        tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
564        tcg_out_sety(s, TCG_REG_T1);
565    }
566
567    tcg_out_arithc(s, rd, rs1, val2, val2const,
568                   uns ? ARITH_UDIV : ARITH_SDIV);
569}
570
571static const uint8_t tcg_cond_to_bcond[] = {
572    [TCG_COND_EQ] = COND_E,
573    [TCG_COND_NE] = COND_NE,
574    [TCG_COND_LT] = COND_L,
575    [TCG_COND_GE] = COND_GE,
576    [TCG_COND_LE] = COND_LE,
577    [TCG_COND_GT] = COND_G,
578    [TCG_COND_LTU] = COND_CS,
579    [TCG_COND_GEU] = COND_CC,
580    [TCG_COND_LEU] = COND_LEU,
581    [TCG_COND_GTU] = COND_GU,
582};
583
584static const uint8_t tcg_cond_to_rcond[] = {
585    [TCG_COND_EQ] = RCOND_Z,
586    [TCG_COND_NE] = RCOND_NZ,
587    [TCG_COND_LT] = RCOND_LZ,
588    [TCG_COND_GT] = RCOND_GZ,
589    [TCG_COND_LE] = RCOND_LEZ,
590    [TCG_COND_GE] = RCOND_GEZ
591};
592
593static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
594{
595    tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
596}
597
598static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
599{
600    int off19 = 0;
601
602    if (l->has_value) {
603        off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
604    } else {
605        tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
606    }
607    tcg_out_bpcc0(s, scond, flags, off19);
608}
609
610static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
611{
612    tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
613}
614
615static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
616                               int32_t arg2, int const_arg2, TCGLabel *l)
617{
618    tcg_out_cmp(s, arg1, arg2, const_arg2);
619    tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
620    tcg_out_nop(s);
621}
622
623static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
624                          int32_t v1, int v1const)
625{
626    tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
627              | INSN_RS1(tcg_cond_to_bcond[cond])
628              | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
629}
630
631static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
632                                TCGReg c1, int32_t c2, int c2const,
633                                int32_t v1, int v1const)
634{
635    tcg_out_cmp(s, c1, c2, c2const);
636    tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
637}
638
639static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
640                               int32_t arg2, int const_arg2, TCGLabel *l)
641{
642    /* For 64-bit signed comparisons vs zero, we can avoid the compare.  */
643    if (arg2 == 0 && !is_unsigned_cond(cond)) {
644        int off16 = 0;
645
646        if (l->has_value) {
647            off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
648        } else {
649            tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
650        }
651        tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
652                  | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
653    } else {
654        tcg_out_cmp(s, arg1, arg2, const_arg2);
655        tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
656    }
657    tcg_out_nop(s);
658}
659
660static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
661                         int32_t v1, int v1const)
662{
663    tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
664              | (tcg_cond_to_rcond[cond] << 10)
665              | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
666}
667
668static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
669                                TCGReg c1, int32_t c2, int c2const,
670                                int32_t v1, int v1const)
671{
672    /* For 64-bit signed comparisons vs zero, we can avoid the compare.
673       Note that the immediate range is one bit smaller, so we must check
674       for that as well.  */
675    if (c2 == 0 && !is_unsigned_cond(cond)
676        && (!v1const || check_fit_i32(v1, 10))) {
677        tcg_out_movr(s, cond, ret, c1, v1, v1const);
678    } else {
679        tcg_out_cmp(s, c1, c2, c2const);
680        tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
681    }
682}
683
684static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
685                                TCGReg c1, int32_t c2, int c2const)
686{
687    /* For 32-bit comparisons, we can play games with ADDC/SUBC.  */
688    switch (cond) {
689    case TCG_COND_LTU:
690    case TCG_COND_GEU:
691        /* The result of the comparison is in the carry bit.  */
692        break;
693
694    case TCG_COND_EQ:
695    case TCG_COND_NE:
696        /* For equality, we can transform to inequality vs zero.  */
697        if (c2 != 0) {
698            tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
699            c2 = TCG_REG_T1;
700        } else {
701            c2 = c1;
702        }
703        c1 = TCG_REG_G0, c2const = 0;
704        cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
705	break;
706
707    case TCG_COND_GTU:
708    case TCG_COND_LEU:
709        /* If we don't need to load a constant into a register, we can
710           swap the operands on GTU/LEU.  There's no benefit to loading
711           the constant into a temporary register.  */
712        if (!c2const || c2 == 0) {
713            TCGReg t = c1;
714            c1 = c2;
715            c2 = t;
716            c2const = 0;
717            cond = tcg_swap_cond(cond);
718            break;
719        }
720        /* FALLTHRU */
721
722    default:
723        tcg_out_cmp(s, c1, c2, c2const);
724        tcg_out_movi_imm13(s, ret, 0);
725        tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
726        return;
727    }
728
729    tcg_out_cmp(s, c1, c2, c2const);
730    if (cond == TCG_COND_LTU) {
731        tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
732    } else {
733        tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
734    }
735}
736
737static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
738                                TCGReg c1, int32_t c2, int c2const)
739{
740    if (use_vis3_instructions) {
741        switch (cond) {
742        case TCG_COND_NE:
743            if (c2 != 0) {
744                break;
745            }
746            c2 = c1, c2const = 0, c1 = TCG_REG_G0;
747            /* FALLTHRU */
748        case TCG_COND_LTU:
749            tcg_out_cmp(s, c1, c2, c2const);
750            tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
751            return;
752        default:
753            break;
754        }
755    }
756
757    /* For 64-bit signed comparisons vs zero, we can avoid the compare
758       if the input does not overlap the output.  */
759    if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
760        tcg_out_movi_imm13(s, ret, 0);
761        tcg_out_movr(s, cond, ret, c1, 1, 1);
762    } else {
763        tcg_out_cmp(s, c1, c2, c2const);
764        tcg_out_movi_imm13(s, ret, 0);
765        tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
766    }
767}
768
769static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
770                                TCGReg al, TCGReg ah, int32_t bl, int blconst,
771                                int32_t bh, int bhconst, int opl, int oph)
772{
773    TCGReg tmp = TCG_REG_T1;
774
775    /* Note that the low parts are fully consumed before tmp is set.  */
776    if (rl != ah && (bhconst || rl != bh)) {
777        tmp = rl;
778    }
779
780    tcg_out_arithc(s, tmp, al, bl, blconst, opl);
781    tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
782    tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
783}
784
785static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
786                                TCGReg al, TCGReg ah, int32_t bl, int blconst,
787                                int32_t bh, int bhconst, bool is_sub)
788{
789    TCGReg tmp = TCG_REG_T1;
790
791    /* Note that the low parts are fully consumed before tmp is set.  */
792    if (rl != ah && (bhconst || rl != bh)) {
793        tmp = rl;
794    }
795
796    tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
797
798    if (use_vis3_instructions && !is_sub) {
799        /* Note that ADDXC doesn't accept immediates.  */
800        if (bhconst && bh != 0) {
801           tcg_out_movi_imm13(s, TCG_REG_T2, bh);
802           bh = TCG_REG_T2;
803        }
804        tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
805    } else if (bh == TCG_REG_G0) {
806	/* If we have a zero, we can perform the operation in two insns,
807           with the arithmetic first, and a conditional move into place.  */
808	if (rh == ah) {
809            tcg_out_arithi(s, TCG_REG_T2, ah, 1,
810			   is_sub ? ARITH_SUB : ARITH_ADD);
811            tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
812	} else {
813            tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
814	    tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
815	}
816    } else {
817        /*
818         * Otherwise adjust BH as if there is carry into T2.
819         * Note that constant BH is constrained to 11 bits for the MOVCC,
820         * so the adjustment fits 12 bits.
821         */
822        if (bhconst) {
823            tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
824        } else {
825            tcg_out_arithi(s, TCG_REG_T2, bh, 1,
826                           is_sub ? ARITH_SUB : ARITH_ADD);
827        }
828        /* ... smoosh T2 back to original BH if carry is clear ... */
829        tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
830	/* ... and finally perform the arithmetic with the new operand.  */
831        tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
832    }
833
834    tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
835}
836
837static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
838                               bool in_prologue, bool tail_call)
839{
840    uintptr_t desti = (uintptr_t)dest;
841
842    /* Be careful not to clobber %o7 for a tail call. */
843    tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
844                     desti & ~0xfff, in_prologue,
845                     tail_call ? TCG_REG_G2 : TCG_REG_O7);
846    tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
847                   TCG_REG_T1, desti & 0xfff, JMPL);
848}
849
850static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
851                                 bool in_prologue)
852{
853    ptrdiff_t disp = tcg_pcrel_diff(s, dest);
854
855    if (disp == (int32_t)disp) {
856        tcg_out32(s, CALL | (uint32_t)disp >> 2);
857    } else {
858        tcg_out_jmpl_const(s, dest, in_prologue, false);
859    }
860}
861
862static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
863                         const TCGHelperInfo *info)
864{
865    tcg_out_call_nodelay(s, dest, false);
866    tcg_out_nop(s);
867}
868
869static void tcg_out_mb(TCGContext *s, TCGArg a0)
870{
871    /* Note that the TCG memory order constants mirror the Sparc MEMBAR.  */
872    tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
873}
874
875#ifdef CONFIG_SOFTMMU
876static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
877static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
878
879static void emit_extend(TCGContext *s, TCGReg r, int op)
880{
881    /* Emit zero extend of 8, 16 or 32 bit data as
882     * required by the MO_* value op; do nothing for 64 bit.
883     */
884    switch (op & MO_SIZE) {
885    case MO_8:
886        tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
887        break;
888    case MO_16:
889        tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
890        tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
891        break;
892    case MO_32:
893        tcg_out_arith(s, r, r, 0, SHIFT_SRL);
894        break;
895    case MO_64:
896        break;
897    }
898}
899
900static void build_trampolines(TCGContext *s)
901{
902    static void * const qemu_ld_helpers[] = {
903        [MO_UB]   = helper_ret_ldub_mmu,
904        [MO_SB]   = helper_ret_ldsb_mmu,
905        [MO_LEUW] = helper_le_lduw_mmu,
906        [MO_LESW] = helper_le_ldsw_mmu,
907        [MO_LEUL] = helper_le_ldul_mmu,
908        [MO_LEUQ] = helper_le_ldq_mmu,
909        [MO_BEUW] = helper_be_lduw_mmu,
910        [MO_BESW] = helper_be_ldsw_mmu,
911        [MO_BEUL] = helper_be_ldul_mmu,
912        [MO_BEUQ] = helper_be_ldq_mmu,
913    };
914    static void * const qemu_st_helpers[] = {
915        [MO_UB]   = helper_ret_stb_mmu,
916        [MO_LEUW] = helper_le_stw_mmu,
917        [MO_LEUL] = helper_le_stl_mmu,
918        [MO_LEUQ] = helper_le_stq_mmu,
919        [MO_BEUW] = helper_be_stw_mmu,
920        [MO_BEUL] = helper_be_stl_mmu,
921        [MO_BEUQ] = helper_be_stq_mmu,
922    };
923
924    int i;
925
926    for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
927        if (qemu_ld_helpers[i] == NULL) {
928            continue;
929        }
930
931        /* May as well align the trampoline.  */
932        while ((uintptr_t)s->code_ptr & 15) {
933            tcg_out_nop(s);
934        }
935        qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
936
937        /* Set the retaddr operand.  */
938        tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
939        /* Tail call.  */
940        tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
941        /* delay slot -- set the env argument */
942        tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
943    }
944
945    for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
946        if (qemu_st_helpers[i] == NULL) {
947            continue;
948        }
949
950        /* May as well align the trampoline.  */
951        while ((uintptr_t)s->code_ptr & 15) {
952            tcg_out_nop(s);
953        }
954        qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
955
956        emit_extend(s, TCG_REG_O2, i);
957
958        /* Set the retaddr operand.  */
959        tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
960
961        /* Tail call.  */
962        tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
963        /* delay slot -- set the env argument */
964        tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
965    }
966}
967#else
968static const tcg_insn_unit *qemu_unalign_ld_trampoline;
969static const tcg_insn_unit *qemu_unalign_st_trampoline;
970
971static void build_trampolines(TCGContext *s)
972{
973    for (int ld = 0; ld < 2; ++ld) {
974        void *helper;
975
976        while ((uintptr_t)s->code_ptr & 15) {
977            tcg_out_nop(s);
978        }
979
980        if (ld) {
981            helper = helper_unaligned_ld;
982            qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
983        } else {
984            helper = helper_unaligned_st;
985            qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
986        }
987
988        /* Tail call.  */
989        tcg_out_jmpl_const(s, helper, true, true);
990        /* delay slot -- set the env argument */
991        tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
992    }
993}
994#endif
995
996/* Generate global QEMU prologue and epilogue code */
997static void tcg_target_qemu_prologue(TCGContext *s)
998{
999    int tmp_buf_size, frame_size;
1000
1001    /*
1002     * The TCG temp buffer is at the top of the frame, immediately
1003     * below the frame pointer.  Use the logical (aligned) offset here;
1004     * the stack bias is applied in temp_allocate_frame().
1005     */
1006    tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
1007    tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
1008
1009    /*
1010     * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1011     * otherwise the minimal frame usable by callees.
1012     */
1013    frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1014    frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1015    frame_size += TCG_TARGET_STACK_ALIGN - 1;
1016    frame_size &= -TCG_TARGET_STACK_ALIGN;
1017    tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1018              INSN_IMM13(-frame_size));
1019
1020#ifndef CONFIG_SOFTMMU
1021    if (guest_base != 0) {
1022        tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1023                         guest_base, true, TCG_REG_T1);
1024        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1025    }
1026#endif
1027
1028    /* We choose TCG_REG_TB such that no move is required.  */
1029    if (USE_REG_TB) {
1030        QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1031        tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1032    }
1033
1034    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1035    /* delay slot */
1036    tcg_out_nop(s);
1037
1038    /* Epilogue for goto_ptr.  */
1039    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1040    tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1041    /* delay slot */
1042    tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1043
1044    build_trampolines(s);
1045}
1046
1047static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1048{
1049    int i;
1050    for (i = 0; i < count; ++i) {
1051        p[i] = NOP;
1052    }
1053}
1054
1055#if defined(CONFIG_SOFTMMU)
1056
1057/* We expect to use a 13-bit negative offset from ENV.  */
1058QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1059QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1060
1061/* Perform the TLB load and compare.
1062
1063   Inputs:
1064   ADDRLO and ADDRHI contain the possible two parts of the address.
1065
1066   MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1067
1068   WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1069   This should be offsetof addr_read or addr_write.
1070
1071   The result of the TLB comparison is in %[ix]cc.  The sanitized address
1072   is in the returned register, maybe %o0.  The TLB addend is in %o1.  */
1073
1074static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1075                               MemOp opc, int which)
1076{
1077    int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1078    int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1079    int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1080    const TCGReg r0 = TCG_REG_O0;
1081    const TCGReg r1 = TCG_REG_O1;
1082    const TCGReg r2 = TCG_REG_O2;
1083    unsigned s_bits = opc & MO_SIZE;
1084    unsigned a_bits = get_alignment_bits(opc);
1085    tcg_target_long compare_mask;
1086
1087    /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx].  */
1088    tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1089    tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
1090
1091    /* Extract the page index, shifted into place for tlb index.  */
1092    tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1093                   SHIFT_SRL);
1094    tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1095
1096    /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2.  */
1097    tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1098
1099    /* Load the tlb comparator and the addend.  */
1100    tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1101    tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1102
1103    /* Mask out the page offset, except for the required alignment.
1104       We don't support unaligned accesses.  */
1105    if (a_bits < s_bits) {
1106        a_bits = s_bits;
1107    }
1108    compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1109    if (check_fit_tl(compare_mask, 13)) {
1110        tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1111    } else {
1112        tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1113        tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1114    }
1115    tcg_out_cmp(s, r0, r2, 0);
1116
1117    /* If the guest address must be zero-extended, do so now.  */
1118    if (TARGET_LONG_BITS == 32) {
1119        tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1120        return r0;
1121    }
1122    return addr;
1123}
1124#endif /* CONFIG_SOFTMMU */
1125
1126static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1127    [MO_UB]   = LDUB,
1128    [MO_SB]   = LDSB,
1129    [MO_UB | MO_LE] = LDUB,
1130    [MO_SB | MO_LE] = LDSB,
1131
1132    [MO_BEUW] = LDUH,
1133    [MO_BESW] = LDSH,
1134    [MO_BEUL] = LDUW,
1135    [MO_BESL] = LDSW,
1136    [MO_BEUQ] = LDX,
1137    [MO_BESQ] = LDX,
1138
1139    [MO_LEUW] = LDUH_LE,
1140    [MO_LESW] = LDSH_LE,
1141    [MO_LEUL] = LDUW_LE,
1142    [MO_LESL] = LDSW_LE,
1143    [MO_LEUQ] = LDX_LE,
1144    [MO_LESQ] = LDX_LE,
1145};
1146
1147static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1148    [MO_UB]   = STB,
1149
1150    [MO_BEUW] = STH,
1151    [MO_BEUL] = STW,
1152    [MO_BEUQ] = STX,
1153
1154    [MO_LEUW] = STH_LE,
1155    [MO_LEUL] = STW_LE,
1156    [MO_LEUQ] = STX_LE,
1157};
1158
1159static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1160                            MemOpIdx oi, bool is_64)
1161{
1162    MemOp memop = get_memop(oi);
1163    tcg_insn_unit *label_ptr;
1164
1165#ifdef CONFIG_SOFTMMU
1166    unsigned memi = get_mmuidx(oi);
1167    TCGReg addrz;
1168    const tcg_insn_unit *func;
1169
1170    addrz = tcg_out_tlb_load(s, addr, memi, memop,
1171                             offsetof(CPUTLBEntry, addr_read));
1172
1173    /* The fast path is exactly one insn.  Thus we can perform the
1174       entire TLB Hit in the (annulled) delay slot of the branch
1175       over the TLB Miss case.  */
1176
1177    /* beq,a,pt %[xi]cc, label0 */
1178    label_ptr = s->code_ptr;
1179    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1180                  | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1181    /* delay slot */
1182    tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1183                    qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1184
1185    /* TLB Miss.  */
1186
1187    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1188
1189    /* We use the helpers to extend SB and SW data, leaving the case
1190       of SL needing explicit extending below.  */
1191    if ((memop & MO_SSIZE) == MO_SL) {
1192        func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1193    } else {
1194        func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1195    }
1196    tcg_debug_assert(func != NULL);
1197    tcg_out_call_nodelay(s, func, false);
1198    /* delay slot */
1199    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
1200
1201    /* We let the helper sign-extend SB and SW, but leave SL for here.  */
1202    if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1203        tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1204    } else {
1205        tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1206    }
1207
1208    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1209#else
1210    TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1211    unsigned a_bits = get_alignment_bits(memop);
1212    unsigned s_bits = memop & MO_SIZE;
1213    unsigned t_bits;
1214
1215    if (TARGET_LONG_BITS == 32) {
1216        tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1217        addr = TCG_REG_T1;
1218    }
1219
1220    /*
1221     * Normal case: alignment equal to access size.
1222     */
1223    if (a_bits == s_bits) {
1224        tcg_out_ldst_rr(s, data, addr, index,
1225                        qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1226        return;
1227    }
1228
1229    /*
1230     * Test for at least natural alignment, and assume most accesses
1231     * will be aligned -- perform a straight load in the delay slot.
1232     * This is required to preserve atomicity for aligned accesses.
1233     */
1234    t_bits = MAX(a_bits, s_bits);
1235    tcg_debug_assert(t_bits < 13);
1236    tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1237
1238    /* beq,a,pt %icc, label */
1239    label_ptr = s->code_ptr;
1240    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1241    /* delay slot */
1242    tcg_out_ldst_rr(s, data, addr, index,
1243                    qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1244
1245    if (a_bits >= s_bits) {
1246        /*
1247         * Overalignment: A successful alignment test will perform the memory
1248         * operation in the delay slot, and failure need only invoke the
1249         * handler for SIGBUS.
1250         */
1251        tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
1252        /* delay slot -- move to low part of argument reg */
1253        tcg_out_mov_delay(s, TCG_REG_O1, addr);
1254    } else {
1255        /* Underalignment: load by pieces of minimum alignment. */
1256        int ld_opc, a_size, s_size, i;
1257
1258        /*
1259         * Force full address into T1 early; avoids problems with
1260         * overlap between @addr and @data.
1261         */
1262        tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1263
1264        a_size = 1 << a_bits;
1265        s_size = 1 << s_bits;
1266        if ((memop & MO_BSWAP) == MO_BE) {
1267            ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
1268            tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1269            ld_opc = qemu_ld_opc[a_bits | MO_BE];
1270            for (i = a_size; i < s_size; i += a_size) {
1271                tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1272                tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
1273                tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1274            }
1275        } else if (a_bits == 0) {
1276            ld_opc = LDUB;
1277            tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1278            for (i = a_size; i < s_size; i += a_size) {
1279                if ((memop & MO_SIGN) && i == s_size - a_size) {
1280                    ld_opc = LDSB;
1281                }
1282                tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1283                tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1284                tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1285            }
1286        } else {
1287            ld_opc = qemu_ld_opc[a_bits | MO_LE];
1288            tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
1289            for (i = a_size; i < s_size; i += a_size) {
1290                tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1291                if ((memop & MO_SIGN) && i == s_size - a_size) {
1292                    ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
1293                }
1294                tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
1295                tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1296                tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1297            }
1298        }
1299    }
1300
1301    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1302#endif /* CONFIG_SOFTMMU */
1303}
1304
1305static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1306                            MemOpIdx oi)
1307{
1308    MemOp memop = get_memop(oi);
1309    tcg_insn_unit *label_ptr;
1310
1311#ifdef CONFIG_SOFTMMU
1312    unsigned memi = get_mmuidx(oi);
1313    TCGReg addrz;
1314    const tcg_insn_unit *func;
1315
1316    addrz = tcg_out_tlb_load(s, addr, memi, memop,
1317                             offsetof(CPUTLBEntry, addr_write));
1318
1319    /* The fast path is exactly one insn.  Thus we can perform the entire
1320       TLB Hit in the (annulled) delay slot of the branch over TLB Miss.  */
1321    /* beq,a,pt %[xi]cc, label0 */
1322    label_ptr = s->code_ptr;
1323    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1324                  | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1325    /* delay slot */
1326    tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1327                    qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1328
1329    /* TLB Miss.  */
1330
1331    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1332    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data);
1333
1334    func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1335    tcg_debug_assert(func != NULL);
1336    tcg_out_call_nodelay(s, func, false);
1337    /* delay slot */
1338    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
1339
1340    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1341#else
1342    TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1343    unsigned a_bits = get_alignment_bits(memop);
1344    unsigned s_bits = memop & MO_SIZE;
1345    unsigned t_bits;
1346
1347    if (TARGET_LONG_BITS == 32) {
1348        tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1349        addr = TCG_REG_T1;
1350    }
1351
1352    /*
1353     * Normal case: alignment equal to access size.
1354     */
1355    if (a_bits == s_bits) {
1356        tcg_out_ldst_rr(s, data, addr, index,
1357                        qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1358        return;
1359    }
1360
1361    /*
1362     * Test for at least natural alignment, and assume most accesses
1363     * will be aligned -- perform a straight store in the delay slot.
1364     * This is required to preserve atomicity for aligned accesses.
1365     */
1366    t_bits = MAX(a_bits, s_bits);
1367    tcg_debug_assert(t_bits < 13);
1368    tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1369
1370    /* beq,a,pt %icc, label */
1371    label_ptr = s->code_ptr;
1372    tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1373    /* delay slot */
1374    tcg_out_ldst_rr(s, data, addr, index,
1375                    qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1376
1377    if (a_bits >= s_bits) {
1378        /*
1379         * Overalignment: A successful alignment test will perform the memory
1380         * operation in the delay slot, and failure need only invoke the
1381         * handler for SIGBUS.
1382         */
1383        tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
1384        /* delay slot -- move to low part of argument reg */
1385        tcg_out_mov_delay(s, TCG_REG_O1, addr);
1386    } else {
1387        /* Underalignment: store by pieces of minimum alignment. */
1388        int st_opc, a_size, s_size, i;
1389
1390        /*
1391         * Force full address into T1 early; avoids problems with
1392         * overlap between @addr and @data.
1393         */
1394        tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1395
1396        a_size = 1 << a_bits;
1397        s_size = 1 << s_bits;
1398        if ((memop & MO_BSWAP) == MO_BE) {
1399            st_opc = qemu_st_opc[a_bits | MO_BE];
1400            for (i = 0; i < s_size; i += a_size) {
1401                TCGReg d = data;
1402                int shift = (s_size - a_size - i) * 8;
1403                if (shift) {
1404                    d = TCG_REG_T2;
1405                    tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
1406                }
1407                tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
1408            }
1409        } else if (a_bits == 0) {
1410            tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
1411            for (i = 1; i < s_size; i++) {
1412                tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1413                tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
1414            }
1415        } else {
1416            /* Note that ST*A with immediate asi must use indexed address. */
1417            st_opc = qemu_st_opc[a_bits + MO_LE];
1418            tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
1419            for (i = a_size; i < s_size; i += a_size) {
1420                tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1421                tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1422                tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
1423            }
1424        }
1425    }
1426
1427    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1428#endif /* CONFIG_SOFTMMU */
1429}
1430
1431static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1432                       const TCGArg args[TCG_MAX_OP_ARGS],
1433                       const int const_args[TCG_MAX_OP_ARGS])
1434{
1435    TCGArg a0, a1, a2;
1436    int c, c2;
1437
1438    /* Hoist the loads of the most common arguments.  */
1439    a0 = args[0];
1440    a1 = args[1];
1441    a2 = args[2];
1442    c2 = const_args[2];
1443
1444    switch (opc) {
1445    case INDEX_op_exit_tb:
1446        if (check_fit_ptr(a0, 13)) {
1447            tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1448            tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1449            break;
1450        } else if (USE_REG_TB) {
1451            intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1452            if (check_fit_ptr(tb_diff, 13)) {
1453                tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1454                /* Note that TCG_REG_TB has been unwound to O1.  */
1455                tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1456                break;
1457            }
1458        }
1459        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1460        tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1461        tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1462        break;
1463    case INDEX_op_goto_tb:
1464        if (s->tb_jmp_insn_offset) {
1465            /* direct jump method */
1466            if (USE_REG_TB) {
1467                /* make sure the patch is 8-byte aligned.  */
1468                if ((intptr_t)s->code_ptr & 4) {
1469                    tcg_out_nop(s);
1470                }
1471                s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1472                tcg_out_sethi(s, TCG_REG_T1, 0);
1473                tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
1474                tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
1475                tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1476            } else {
1477                s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1478                tcg_out32(s, CALL);
1479                tcg_out_nop(s);
1480            }
1481        } else {
1482            /* indirect jump method */
1483            tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
1484            tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1485            tcg_out_nop(s);
1486        }
1487        set_jmp_reset_offset(s, a0);
1488
1489        /* For the unlinked path of goto_tb, we need to reset
1490           TCG_REG_TB to the beginning of this TB.  */
1491        if (USE_REG_TB) {
1492            c = -tcg_current_code_size(s);
1493            if (check_fit_i32(c, 13)) {
1494                tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
1495            } else {
1496                tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
1497                tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
1498                              TCG_REG_T1, ARITH_ADD);
1499            }
1500        }
1501        break;
1502    case INDEX_op_goto_ptr:
1503        tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1504        if (USE_REG_TB) {
1505            tcg_out_mov_delay(s, TCG_REG_TB, a0);
1506        } else {
1507            tcg_out_nop(s);
1508        }
1509        break;
1510    case INDEX_op_br:
1511        tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1512        tcg_out_nop(s);
1513        break;
1514
1515#define OP_32_64(x)                             \
1516        glue(glue(case INDEX_op_, x), _i32):    \
1517        glue(glue(case INDEX_op_, x), _i64)
1518
1519    OP_32_64(ld8u):
1520        tcg_out_ldst(s, a0, a1, a2, LDUB);
1521        break;
1522    OP_32_64(ld8s):
1523        tcg_out_ldst(s, a0, a1, a2, LDSB);
1524        break;
1525    OP_32_64(ld16u):
1526        tcg_out_ldst(s, a0, a1, a2, LDUH);
1527        break;
1528    OP_32_64(ld16s):
1529        tcg_out_ldst(s, a0, a1, a2, LDSH);
1530        break;
1531    case INDEX_op_ld_i32:
1532    case INDEX_op_ld32u_i64:
1533        tcg_out_ldst(s, a0, a1, a2, LDUW);
1534        break;
1535    OP_32_64(st8):
1536        tcg_out_ldst(s, a0, a1, a2, STB);
1537        break;
1538    OP_32_64(st16):
1539        tcg_out_ldst(s, a0, a1, a2, STH);
1540        break;
1541    case INDEX_op_st_i32:
1542    case INDEX_op_st32_i64:
1543        tcg_out_ldst(s, a0, a1, a2, STW);
1544        break;
1545    OP_32_64(add):
1546        c = ARITH_ADD;
1547        goto gen_arith;
1548    OP_32_64(sub):
1549        c = ARITH_SUB;
1550        goto gen_arith;
1551    OP_32_64(and):
1552        c = ARITH_AND;
1553        goto gen_arith;
1554    OP_32_64(andc):
1555        c = ARITH_ANDN;
1556        goto gen_arith;
1557    OP_32_64(or):
1558        c = ARITH_OR;
1559        goto gen_arith;
1560    OP_32_64(orc):
1561        c = ARITH_ORN;
1562        goto gen_arith;
1563    OP_32_64(xor):
1564        c = ARITH_XOR;
1565        goto gen_arith;
1566    case INDEX_op_shl_i32:
1567        c = SHIFT_SLL;
1568    do_shift32:
1569        /* Limit immediate shift count lest we create an illegal insn.  */
1570        tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1571        break;
1572    case INDEX_op_shr_i32:
1573        c = SHIFT_SRL;
1574        goto do_shift32;
1575    case INDEX_op_sar_i32:
1576        c = SHIFT_SRA;
1577        goto do_shift32;
1578    case INDEX_op_mul_i32:
1579        c = ARITH_UMUL;
1580        goto gen_arith;
1581
1582    OP_32_64(neg):
1583	c = ARITH_SUB;
1584	goto gen_arith1;
1585    OP_32_64(not):
1586	c = ARITH_ORN;
1587	goto gen_arith1;
1588
1589    case INDEX_op_div_i32:
1590        tcg_out_div32(s, a0, a1, a2, c2, 0);
1591        break;
1592    case INDEX_op_divu_i32:
1593        tcg_out_div32(s, a0, a1, a2, c2, 1);
1594        break;
1595
1596    case INDEX_op_brcond_i32:
1597        tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1598        break;
1599    case INDEX_op_setcond_i32:
1600        tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1601        break;
1602    case INDEX_op_movcond_i32:
1603        tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1604        break;
1605
1606    case INDEX_op_add2_i32:
1607        tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1608                            args[4], const_args[4], args[5], const_args[5],
1609                            ARITH_ADDCC, ARITH_ADDC);
1610        break;
1611    case INDEX_op_sub2_i32:
1612        tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1613                            args[4], const_args[4], args[5], const_args[5],
1614                            ARITH_SUBCC, ARITH_SUBC);
1615        break;
1616    case INDEX_op_mulu2_i32:
1617        c = ARITH_UMUL;
1618        goto do_mul2;
1619    case INDEX_op_muls2_i32:
1620        c = ARITH_SMUL;
1621    do_mul2:
1622        /* The 32-bit multiply insns produce a full 64-bit result. */
1623        tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1624        tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1625        break;
1626
1627    case INDEX_op_qemu_ld_i32:
1628        tcg_out_qemu_ld(s, a0, a1, a2, false);
1629        break;
1630    case INDEX_op_qemu_ld_i64:
1631        tcg_out_qemu_ld(s, a0, a1, a2, true);
1632        break;
1633    case INDEX_op_qemu_st_i32:
1634    case INDEX_op_qemu_st_i64:
1635        tcg_out_qemu_st(s, a0, a1, a2);
1636        break;
1637
1638    case INDEX_op_ld32s_i64:
1639        tcg_out_ldst(s, a0, a1, a2, LDSW);
1640        break;
1641    case INDEX_op_ld_i64:
1642        tcg_out_ldst(s, a0, a1, a2, LDX);
1643        break;
1644    case INDEX_op_st_i64:
1645        tcg_out_ldst(s, a0, a1, a2, STX);
1646        break;
1647    case INDEX_op_shl_i64:
1648        c = SHIFT_SLLX;
1649    do_shift64:
1650        /* Limit immediate shift count lest we create an illegal insn.  */
1651        tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1652        break;
1653    case INDEX_op_shr_i64:
1654        c = SHIFT_SRLX;
1655        goto do_shift64;
1656    case INDEX_op_sar_i64:
1657        c = SHIFT_SRAX;
1658        goto do_shift64;
1659    case INDEX_op_mul_i64:
1660        c = ARITH_MULX;
1661        goto gen_arith;
1662    case INDEX_op_div_i64:
1663        c = ARITH_SDIVX;
1664        goto gen_arith;
1665    case INDEX_op_divu_i64:
1666        c = ARITH_UDIVX;
1667        goto gen_arith;
1668    case INDEX_op_ext_i32_i64:
1669    case INDEX_op_ext32s_i64:
1670        tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1671        break;
1672    case INDEX_op_extu_i32_i64:
1673    case INDEX_op_ext32u_i64:
1674        tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1675        break;
1676    case INDEX_op_extrl_i64_i32:
1677        tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1678        break;
1679    case INDEX_op_extrh_i64_i32:
1680        tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1681        break;
1682
1683    case INDEX_op_brcond_i64:
1684        tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1685        break;
1686    case INDEX_op_setcond_i64:
1687        tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1688        break;
1689    case INDEX_op_movcond_i64:
1690        tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1691        break;
1692    case INDEX_op_add2_i64:
1693        tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1694                            const_args[4], args[5], const_args[5], false);
1695        break;
1696    case INDEX_op_sub2_i64:
1697        tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1698                            const_args[4], args[5], const_args[5], true);
1699        break;
1700    case INDEX_op_muluh_i64:
1701        tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1702        break;
1703
1704    gen_arith:
1705        tcg_out_arithc(s, a0, a1, a2, c2, c);
1706        break;
1707
1708    gen_arith1:
1709	tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1710	break;
1711
1712    case INDEX_op_mb:
1713        tcg_out_mb(s, a0);
1714        break;
1715
1716    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
1717    case INDEX_op_mov_i64:
1718    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1719    default:
1720        tcg_abort();
1721    }
1722}
1723
1724static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1725{
1726    switch (op) {
1727    case INDEX_op_goto_ptr:
1728        return C_O0_I1(r);
1729
1730    case INDEX_op_ld8u_i32:
1731    case INDEX_op_ld8u_i64:
1732    case INDEX_op_ld8s_i32:
1733    case INDEX_op_ld8s_i64:
1734    case INDEX_op_ld16u_i32:
1735    case INDEX_op_ld16u_i64:
1736    case INDEX_op_ld16s_i32:
1737    case INDEX_op_ld16s_i64:
1738    case INDEX_op_ld_i32:
1739    case INDEX_op_ld32u_i64:
1740    case INDEX_op_ld32s_i64:
1741    case INDEX_op_ld_i64:
1742    case INDEX_op_neg_i32:
1743    case INDEX_op_neg_i64:
1744    case INDEX_op_not_i32:
1745    case INDEX_op_not_i64:
1746    case INDEX_op_ext32s_i64:
1747    case INDEX_op_ext32u_i64:
1748    case INDEX_op_ext_i32_i64:
1749    case INDEX_op_extu_i32_i64:
1750    case INDEX_op_extrl_i64_i32:
1751    case INDEX_op_extrh_i64_i32:
1752        return C_O1_I1(r, r);
1753
1754    case INDEX_op_st8_i32:
1755    case INDEX_op_st8_i64:
1756    case INDEX_op_st16_i32:
1757    case INDEX_op_st16_i64:
1758    case INDEX_op_st_i32:
1759    case INDEX_op_st32_i64:
1760    case INDEX_op_st_i64:
1761        return C_O0_I2(rZ, r);
1762
1763    case INDEX_op_add_i32:
1764    case INDEX_op_add_i64:
1765    case INDEX_op_mul_i32:
1766    case INDEX_op_mul_i64:
1767    case INDEX_op_div_i32:
1768    case INDEX_op_div_i64:
1769    case INDEX_op_divu_i32:
1770    case INDEX_op_divu_i64:
1771    case INDEX_op_sub_i32:
1772    case INDEX_op_sub_i64:
1773    case INDEX_op_and_i32:
1774    case INDEX_op_and_i64:
1775    case INDEX_op_andc_i32:
1776    case INDEX_op_andc_i64:
1777    case INDEX_op_or_i32:
1778    case INDEX_op_or_i64:
1779    case INDEX_op_orc_i32:
1780    case INDEX_op_orc_i64:
1781    case INDEX_op_xor_i32:
1782    case INDEX_op_xor_i64:
1783    case INDEX_op_shl_i32:
1784    case INDEX_op_shl_i64:
1785    case INDEX_op_shr_i32:
1786    case INDEX_op_shr_i64:
1787    case INDEX_op_sar_i32:
1788    case INDEX_op_sar_i64:
1789    case INDEX_op_setcond_i32:
1790    case INDEX_op_setcond_i64:
1791        return C_O1_I2(r, rZ, rJ);
1792
1793    case INDEX_op_brcond_i32:
1794    case INDEX_op_brcond_i64:
1795        return C_O0_I2(rZ, rJ);
1796    case INDEX_op_movcond_i32:
1797    case INDEX_op_movcond_i64:
1798        return C_O1_I4(r, rZ, rJ, rI, 0);
1799    case INDEX_op_add2_i32:
1800    case INDEX_op_add2_i64:
1801    case INDEX_op_sub2_i32:
1802    case INDEX_op_sub2_i64:
1803        return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1804    case INDEX_op_mulu2_i32:
1805    case INDEX_op_muls2_i32:
1806        return C_O2_I2(r, r, rZ, rJ);
1807    case INDEX_op_muluh_i64:
1808        return C_O1_I2(r, r, r);
1809
1810    case INDEX_op_qemu_ld_i32:
1811    case INDEX_op_qemu_ld_i64:
1812        return C_O1_I1(r, s);
1813    case INDEX_op_qemu_st_i32:
1814    case INDEX_op_qemu_st_i64:
1815        return C_O0_I2(sZ, s);
1816
1817    default:
1818        g_assert_not_reached();
1819    }
1820}
1821
1822static void tcg_target_init(TCGContext *s)
1823{
1824    /*
1825     * Only probe for the platform and capabilities if we haven't already
1826     * determined maximum values at compile time.
1827     */
1828#ifndef use_vis3_instructions
1829    {
1830        unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1831        use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1832    }
1833#endif
1834
1835    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1836    tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1837
1838    tcg_target_call_clobber_regs = 0;
1839    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1840    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1841    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1842    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1843    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1844    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1845    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1846    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1847    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1848    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1849    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1850    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1851    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1852    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1853    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1854
1855    s->reserved_regs = 0;
1856    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1857    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1858    tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1859    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1860    tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1861    tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1862    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1863    tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1864}
1865
1866#define ELF_HOST_MACHINE  EM_SPARCV9
1867
1868typedef struct {
1869    DebugFrameHeader h;
1870    uint8_t fde_def_cfa[4];
1871    uint8_t fde_win_save;
1872    uint8_t fde_ret_save[3];
1873} DebugFrame;
1874
1875static const DebugFrame debug_frame = {
1876    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1877    .h.cie.id = -1,
1878    .h.cie.version = 1,
1879    .h.cie.code_align = 1,
1880    .h.cie.data_align = -sizeof(void *) & 0x7f,
1881    .h.cie.return_column = 15,            /* o7 */
1882
1883    /* Total FDE size does not include the "len" member.  */
1884    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1885
1886    .fde_def_cfa = {
1887        12, 30,                         /* DW_CFA_def_cfa i6, 2047 */
1888        (2047 & 0x7f) | 0x80, (2047 >> 7)
1889    },
1890    .fde_win_save = 0x2d,               /* DW_CFA_GNU_window_save */
1891    .fde_ret_save = { 9, 15, 31 },      /* DW_CFA_register o7, i7 */
1892};
1893
1894void tcg_register_jit(const void *buf, size_t buf_size)
1895{
1896    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1897}
1898
1899void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1900                              uintptr_t jmp_rw, uintptr_t addr)
1901{
1902    intptr_t tb_disp = addr - tc_ptr;
1903    intptr_t br_disp = addr - jmp_rx;
1904    tcg_insn_unit i1, i2;
1905
1906    /* We can reach the entire address space for ILP32.
1907       For LP64, the code_gen_buffer can't be larger than 2GB.  */
1908    tcg_debug_assert(tb_disp == (int32_t)tb_disp);
1909    tcg_debug_assert(br_disp == (int32_t)br_disp);
1910
1911    if (!USE_REG_TB) {
1912        qatomic_set((uint32_t *)jmp_rw,
1913		    deposit32(CALL, 0, 30, br_disp >> 2));
1914        flush_idcache_range(jmp_rx, jmp_rw, 4);
1915        return;
1916    }
1917
1918    /* This does not exercise the range of the branch, but we do
1919       still need to be able to load the new value of TCG_REG_TB.
1920       But this does still happen quite often.  */
1921    if (check_fit_ptr(tb_disp, 13)) {
1922        /* ba,pt %icc, addr */
1923        i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1924              | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
1925        i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
1926              | INSN_IMM13(tb_disp));
1927    } else if (tb_disp >= 0) {
1928        i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
1929        i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1930              | INSN_IMM13(tb_disp & 0x3ff));
1931    } else {
1932        i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
1933        i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1934              | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
1935    }
1936
1937    qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
1938    flush_idcache_range(jmp_rx, jmp_rw, 8);
1939}
1940