xref: /qemu/tcg/loongarch64/tcg-target.c.inc (revision 25851080)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
5 *
6 * Based on tcg/riscv/tcg-target.c.inc
7 *
8 * Copyright (c) 2018 SiFive, Inc
9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
11 * Copyright (c) 2008 Fabrice Bellard
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to deal
15 * in the Software without restriction, including without limitation the rights
16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 * copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 * THE SOFTWARE.
30 */
31
32#include "../tcg-ldst.c.inc"
33#include <asm/hwcap.h>
34
35#ifdef CONFIG_DEBUG_TCG
36static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
37    "zero",
38    "ra",
39    "tp",
40    "sp",
41    "a0",
42    "a1",
43    "a2",
44    "a3",
45    "a4",
46    "a5",
47    "a6",
48    "a7",
49    "t0",
50    "t1",
51    "t2",
52    "t3",
53    "t4",
54    "t5",
55    "t6",
56    "t7",
57    "t8",
58    "r21", /* reserved in the LP64* ABI, hence no ABI name */
59    "s9",
60    "s0",
61    "s1",
62    "s2",
63    "s3",
64    "s4",
65    "s5",
66    "s6",
67    "s7",
68    "s8",
69    "vr0",
70    "vr1",
71    "vr2",
72    "vr3",
73    "vr4",
74    "vr5",
75    "vr6",
76    "vr7",
77    "vr8",
78    "vr9",
79    "vr10",
80    "vr11",
81    "vr12",
82    "vr13",
83    "vr14",
84    "vr15",
85    "vr16",
86    "vr17",
87    "vr18",
88    "vr19",
89    "vr20",
90    "vr21",
91    "vr22",
92    "vr23",
93    "vr24",
94    "vr25",
95    "vr26",
96    "vr27",
97    "vr28",
98    "vr29",
99    "vr30",
100    "vr31",
101};
102#endif
103
104static const int tcg_target_reg_alloc_order[] = {
105    /* Registers preserved across calls */
106    /* TCG_REG_S0 reserved for TCG_AREG0 */
107    TCG_REG_S1,
108    TCG_REG_S2,
109    TCG_REG_S3,
110    TCG_REG_S4,
111    TCG_REG_S5,
112    TCG_REG_S6,
113    TCG_REG_S7,
114    TCG_REG_S8,
115    TCG_REG_S9,
116
117    /* Registers (potentially) clobbered across calls */
118    TCG_REG_T0,
119    TCG_REG_T1,
120    TCG_REG_T2,
121    TCG_REG_T3,
122    TCG_REG_T4,
123    TCG_REG_T5,
124    TCG_REG_T6,
125    TCG_REG_T7,
126    TCG_REG_T8,
127
128    /* Argument registers, opposite order of allocation.  */
129    TCG_REG_A7,
130    TCG_REG_A6,
131    TCG_REG_A5,
132    TCG_REG_A4,
133    TCG_REG_A3,
134    TCG_REG_A2,
135    TCG_REG_A1,
136    TCG_REG_A0,
137
138    /* Vector registers */
139    TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
140    TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
141    TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
142    TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
143    TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
144    TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
145    /* V24 - V31 are caller-saved, and skipped.  */
146};
147
148static const int tcg_target_call_iarg_regs[] = {
149    TCG_REG_A0,
150    TCG_REG_A1,
151    TCG_REG_A2,
152    TCG_REG_A3,
153    TCG_REG_A4,
154    TCG_REG_A5,
155    TCG_REG_A6,
156    TCG_REG_A7,
157};
158
159static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
160{
161    tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
162    tcg_debug_assert(slot >= 0 && slot <= 1);
163    return TCG_REG_A0 + slot;
164}
165
166#define TCG_GUEST_BASE_REG TCG_REG_S1
167
168#define TCG_CT_CONST_ZERO  0x100
169#define TCG_CT_CONST_S12   0x200
170#define TCG_CT_CONST_S32   0x400
171#define TCG_CT_CONST_U12   0x800
172#define TCG_CT_CONST_C12   0x1000
173#define TCG_CT_CONST_WSZ   0x2000
174#define TCG_CT_CONST_VCMP  0x4000
175#define TCG_CT_CONST_VADD  0x8000
176
177#define ALL_GENERAL_REGS   MAKE_64BIT_MASK(0, 32)
178#define ALL_VECTOR_REGS    MAKE_64BIT_MASK(32, 32)
179
180static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
181{
182    return sextract64(val, pos, len);
183}
184
185/* test if a constant matches the constraint */
186static bool tcg_target_const_match(int64_t val, int ct,
187                                   TCGType type, TCGCond cond, int vece)
188{
189    if (ct & TCG_CT_CONST) {
190        return true;
191    }
192    if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
193        return true;
194    }
195    if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
196        return true;
197    }
198    if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
199        return true;
200    }
201    if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
202        return true;
203    }
204    if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
205        return true;
206    }
207    if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
208        return true;
209    }
210    int64_t vec_val = sextract64(val, 0, 8 << vece);
211    if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
212        return true;
213    }
214    if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
215        return true;
216    }
217    return false;
218}
219
220/*
221 * Relocations
222 */
223
224/*
225 * Relocation records defined in LoongArch ELF psABI v1.00 is way too
226 * complicated; a whopping stack machine is needed to stuff the fields, at
227 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
228 * needed.
229 *
230 * Hence, define our own simpler relocation types. Numbers are chosen as to
231 * not collide with potential future additions to the true ELF relocation
232 * type enum.
233 */
234
235/* Field Sk16, shifted right by 2; suitable for conditional jumps */
236#define R_LOONGARCH_BR_SK16     256
237/* Field Sd10k16, shifted right by 2; suitable for B and BL */
238#define R_LOONGARCH_BR_SD10K16  257
239
240static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
241{
242    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
243    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
244
245    tcg_debug_assert((offset & 3) == 0);
246    offset >>= 2;
247    if (offset == sextreg(offset, 0, 16)) {
248        *src_rw = deposit64(*src_rw, 10, 16, offset);
249        return true;
250    }
251
252    return false;
253}
254
255static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
256                             const tcg_insn_unit *target)
257{
258    const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
259    intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
260
261    tcg_debug_assert((offset & 3) == 0);
262    offset >>= 2;
263    if (offset == sextreg(offset, 0, 26)) {
264        *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
265        *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
266        return true;
267    }
268
269    return false;
270}
271
272static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
273                        intptr_t value, intptr_t addend)
274{
275    tcg_debug_assert(addend == 0);
276    switch (type) {
277    case R_LOONGARCH_BR_SK16:
278        return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
279    case R_LOONGARCH_BR_SD10K16:
280        return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
281    default:
282        g_assert_not_reached();
283    }
284}
285
286#include "tcg-insn-defs.c.inc"
287
288/*
289 * TCG intrinsics
290 */
291
292static void tcg_out_mb(TCGContext *s, TCGArg a0)
293{
294    /* Baseline LoongArch only has the full barrier, unfortunately.  */
295    tcg_out_opc_dbar(s, 0);
296}
297
298static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
299{
300    if (ret == arg) {
301        return true;
302    }
303    switch (type) {
304    case TCG_TYPE_I32:
305    case TCG_TYPE_I64:
306        if (ret < TCG_REG_V0) {
307            if (arg < TCG_REG_V0) {
308                /*
309                 * Conventional register-register move used in LoongArch is
310                 * `or dst, src, zero`.
311                 */
312                tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
313            } else {
314                tcg_out_opc_movfr2gr_d(s, ret, arg);
315            }
316        } else {
317            if (arg < TCG_REG_V0) {
318                tcg_out_opc_movgr2fr_d(s, ret, arg);
319            } else {
320                tcg_out_opc_fmov_d(s, ret, arg);
321            }
322        }
323        break;
324    case TCG_TYPE_V64:
325    case TCG_TYPE_V128:
326        tcg_out_opc_vori_b(s, ret, arg, 0);
327        break;
328    case TCG_TYPE_V256:
329        tcg_out_opc_xvori_b(s, ret, arg, 0);
330        break;
331    default:
332        g_assert_not_reached();
333    }
334    return true;
335}
336
337/* Loads a 32-bit immediate into rd, sign-extended.  */
338static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
339{
340    tcg_target_long lo = sextreg(val, 0, 12);
341    tcg_target_long hi12 = sextreg(val, 12, 20);
342
343    /* Single-instruction cases.  */
344    if (hi12 == 0) {
345        /* val fits in uimm12: ori rd, zero, val */
346        tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
347        return;
348    }
349    if (hi12 == sextreg(lo, 12, 20)) {
350        /* val fits in simm12: addi.w rd, zero, val */
351        tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
352        return;
353    }
354
355    /* High bits must be set; load with lu12i.w + optional ori.  */
356    tcg_out_opc_lu12i_w(s, rd, hi12);
357    if (lo != 0) {
358        tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
359    }
360}
361
362static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
363                         tcg_target_long val)
364{
365    /*
366     * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
367     * with dedicated instructions for filling the respective bitfields
368     * below:
369     *
370     *        6                   5                   4               3
371     *  3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
372     * +-----------------------+---------------------------------------+...
373     * |          hi52         |                  hi32                 |
374     * +-----------------------+---------------------------------------+...
375     *       3                   2                   1
376     *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
377     * ...+-------------------------------------+-------------------------+
378     *    |                 hi12                |            lo           |
379     * ...+-------------------------------------+-------------------------+
380     *
381     * Check if val belong to one of the several fast cases, before falling
382     * back to the slow path.
383     */
384
385    intptr_t src_rx, pc_offset;
386    tcg_target_long hi12, hi32, hi52;
387
388    /* Value fits in signed i32.  */
389    if (type == TCG_TYPE_I32 || val == (int32_t)val) {
390        tcg_out_movi_i32(s, rd, val);
391        return;
392    }
393
394    /* PC-relative cases.  */
395    src_rx = (intptr_t)tcg_splitwx_to_rx(s->code_ptr);
396    if ((val & 3) == 0) {
397        pc_offset = val - src_rx;
398        if (pc_offset == sextreg(pc_offset, 0, 22)) {
399            /* Single pcaddu2i.  */
400            tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
401            return;
402        }
403    }
404
405    pc_offset = (val >> 12) - (src_rx >> 12);
406    if (pc_offset == sextreg(pc_offset, 0, 20)) {
407        /* Load with pcalau12i + ori.  */
408        tcg_target_long val_lo = val & 0xfff;
409        tcg_out_opc_pcalau12i(s, rd, pc_offset);
410        if (val_lo != 0) {
411            tcg_out_opc_ori(s, rd, rd, val_lo);
412        }
413        return;
414    }
415
416    hi12 = sextreg(val, 12, 20);
417    hi32 = sextreg(val, 32, 20);
418    hi52 = sextreg(val, 52, 12);
419
420    /* Single cu52i.d case.  */
421    if ((hi52 != 0) && (ctz64(val) >= 52)) {
422        tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
423        return;
424    }
425
426    /* Slow path.  Initialize the low 32 bits, then concat high bits.  */
427    tcg_out_movi_i32(s, rd, val);
428
429    /* Load hi32 and hi52 explicitly when they are unexpected values. */
430    if (hi32 != sextreg(hi12, 20, 20)) {
431        tcg_out_opc_cu32i_d(s, rd, hi32);
432    }
433
434    if (hi52 != sextreg(hi32, 20, 12)) {
435        tcg_out_opc_cu52i_d(s, rd, rd, hi52);
436    }
437}
438
439static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
440                         TCGReg rs, tcg_target_long imm)
441{
442    tcg_target_long lo12 = sextreg(imm, 0, 12);
443    tcg_target_long hi16 = sextreg(imm - lo12, 16, 16);
444
445    /*
446     * Note that there's a hole in between hi16 and lo12:
447     *
448     *       3                   2                   1                   0
449     *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
450     * ...+-------------------------------+-------+-----------------------+
451     *    |             hi16              |       |          lo12         |
452     * ...+-------------------------------+-------+-----------------------+
453     *
454     * For bits within that hole, it's more efficient to use LU12I and ADD.
455     */
456    if (imm == (hi16 << 16) + lo12) {
457        if (hi16) {
458            tcg_out_opc_addu16i_d(s, rd, rs, hi16);
459            rs = rd;
460        }
461        if (type == TCG_TYPE_I32) {
462            tcg_out_opc_addi_w(s, rd, rs, lo12);
463        } else if (lo12) {
464            tcg_out_opc_addi_d(s, rd, rs, lo12);
465        } else {
466            tcg_out_mov(s, type, rd, rs);
467        }
468    } else {
469        tcg_out_movi(s, type, TCG_REG_TMP0, imm);
470        if (type == TCG_TYPE_I32) {
471            tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0);
472        } else {
473            tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0);
474        }
475    }
476}
477
478static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
479{
480    return false;
481}
482
483static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
484                             tcg_target_long imm)
485{
486    /* This function is only used for passing structs by reference. */
487    g_assert_not_reached();
488}
489
490static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
491{
492    tcg_out_opc_andi(s, ret, arg, 0xff);
493}
494
495static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
496{
497    tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
498}
499
500static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
501{
502    tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
503}
504
505static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
506{
507    tcg_out_opc_sext_b(s, ret, arg);
508}
509
510static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
511{
512    tcg_out_opc_sext_h(s, ret, arg);
513}
514
515static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
516{
517    tcg_out_opc_addi_w(s, ret, arg, 0);
518}
519
520static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
521{
522    if (ret != arg) {
523        tcg_out_ext32s(s, ret, arg);
524    }
525}
526
527static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
528{
529    tcg_out_ext32u(s, ret, arg);
530}
531
532static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
533{
534    tcg_out_ext32s(s, ret, arg);
535}
536
537static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
538                           TCGReg a0, TCGReg a1, TCGReg a2,
539                           bool c2, bool is_32bit)
540{
541    if (c2) {
542        /*
543         * Fast path: semantics already satisfied due to constraint and
544         * insn behavior, single instruction is enough.
545         */
546        tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
547        /* all clz/ctz insns belong to DJ-format */
548        tcg_out32(s, encode_dj_insn(opc, a0, a1));
549        return;
550    }
551
552    tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
553    /* a0 = a1 ? REG_TMP0 : a2 */
554    tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
555    tcg_out_opc_masknez(s, a0, a2, a1);
556    tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
557}
558
559#define SETCOND_INV    TCG_TARGET_NB_REGS
560#define SETCOND_NEZ    (SETCOND_INV << 1)
561#define SETCOND_FLAGS  (SETCOND_INV | SETCOND_NEZ)
562
563static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
564                               TCGReg arg1, tcg_target_long arg2, bool c2)
565{
566    int flags = 0;
567
568    switch (cond) {
569    case TCG_COND_EQ:    /* -> NE  */
570    case TCG_COND_GE:    /* -> LT  */
571    case TCG_COND_GEU:   /* -> LTU */
572    case TCG_COND_GT:    /* -> LE  */
573    case TCG_COND_GTU:   /* -> LEU */
574        cond = tcg_invert_cond(cond);
575        flags ^= SETCOND_INV;
576        break;
577    default:
578        break;
579    }
580
581    switch (cond) {
582    case TCG_COND_LE:
583    case TCG_COND_LEU:
584        /*
585         * If we have a constant input, the most efficient way to implement
586         * LE is by adding 1 and using LT.  Watch out for wrap around for LEU.
587         * We don't need to care for this for LE because the constant input
588         * is still constrained to int32_t, and INT32_MAX+1 is representable
589         * in the 64-bit temporary register.
590         */
591        if (c2) {
592            if (cond == TCG_COND_LEU) {
593                /* unsigned <= -1 is true */
594                if (arg2 == -1) {
595                    tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
596                    return ret;
597                }
598                cond = TCG_COND_LTU;
599            } else {
600                cond = TCG_COND_LT;
601            }
602            arg2 += 1;
603        } else {
604            TCGReg tmp = arg2;
605            arg2 = arg1;
606            arg1 = tmp;
607            cond = tcg_swap_cond(cond);    /* LE -> GE */
608            cond = tcg_invert_cond(cond);  /* GE -> LT */
609            flags ^= SETCOND_INV;
610        }
611        break;
612    default:
613        break;
614    }
615
616    switch (cond) {
617    case TCG_COND_NE:
618        flags |= SETCOND_NEZ;
619        if (!c2) {
620            tcg_out_opc_xor(s, ret, arg1, arg2);
621        } else if (arg2 == 0) {
622            ret = arg1;
623        } else if (arg2 >= 0 && arg2 <= 0xfff) {
624            tcg_out_opc_xori(s, ret, arg1, arg2);
625        } else {
626            tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2);
627        }
628        break;
629
630    case TCG_COND_LT:
631    case TCG_COND_LTU:
632        if (c2) {
633            if (arg2 >= -0x800 && arg2 <= 0x7ff) {
634                if (cond == TCG_COND_LT) {
635                    tcg_out_opc_slti(s, ret, arg1, arg2);
636                } else {
637                    tcg_out_opc_sltui(s, ret, arg1, arg2);
638                }
639                break;
640            }
641            tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
642            arg2 = TCG_REG_TMP0;
643        }
644        if (cond == TCG_COND_LT) {
645            tcg_out_opc_slt(s, ret, arg1, arg2);
646        } else {
647            tcg_out_opc_sltu(s, ret, arg1, arg2);
648        }
649        break;
650
651    default:
652        g_assert_not_reached();
653        break;
654    }
655
656    return ret | flags;
657}
658
659static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
660                            TCGReg arg1, tcg_target_long arg2, bool c2)
661{
662    int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
663
664    if (tmpflags != ret) {
665        TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
666
667        switch (tmpflags & SETCOND_FLAGS) {
668        case SETCOND_INV:
669            /* Intermediate result is boolean: simply invert. */
670            tcg_out_opc_xori(s, ret, tmp, 1);
671            break;
672        case SETCOND_NEZ:
673            /* Intermediate result is zero/non-zero: test != 0. */
674            tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
675            break;
676        case SETCOND_NEZ | SETCOND_INV:
677            /* Intermediate result is zero/non-zero: test == 0. */
678            tcg_out_opc_sltui(s, ret, tmp, 1);
679            break;
680        default:
681            g_assert_not_reached();
682        }
683    }
684}
685
686static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
687                            TCGReg c1, tcg_target_long c2, bool const2,
688                            TCGReg v1, TCGReg v2)
689{
690    int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
691    TCGReg t;
692
693    /* Standardize the test below to t != 0. */
694    if (tmpflags & SETCOND_INV) {
695        t = v1, v1 = v2, v2 = t;
696    }
697
698    t = tmpflags & ~SETCOND_FLAGS;
699    if (v1 == TCG_REG_ZERO) {
700        tcg_out_opc_masknez(s, ret, v2, t);
701    } else if (v2 == TCG_REG_ZERO) {
702        tcg_out_opc_maskeqz(s, ret, v1, t);
703    } else {
704        tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */
705        tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */
706        tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2);
707    }
708}
709
710/*
711 * Branch helpers
712 */
713
714static const struct {
715    LoongArchInsn op;
716    bool swap;
717} tcg_brcond_to_loongarch[] = {
718    [TCG_COND_EQ] =  { OPC_BEQ,  false },
719    [TCG_COND_NE] =  { OPC_BNE,  false },
720    [TCG_COND_LT] =  { OPC_BGT,  true  },
721    [TCG_COND_GE] =  { OPC_BLE,  true  },
722    [TCG_COND_LE] =  { OPC_BLE,  false },
723    [TCG_COND_GT] =  { OPC_BGT,  false },
724    [TCG_COND_LTU] = { OPC_BGTU, true  },
725    [TCG_COND_GEU] = { OPC_BLEU, true  },
726    [TCG_COND_LEU] = { OPC_BLEU, false },
727    [TCG_COND_GTU] = { OPC_BGTU, false }
728};
729
730static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
731                           TCGReg arg2, TCGLabel *l)
732{
733    LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
734
735    tcg_debug_assert(op != 0);
736
737    if (tcg_brcond_to_loongarch[cond].swap) {
738        TCGReg t = arg1;
739        arg1 = arg2;
740        arg2 = t;
741    }
742
743    /* all conditional branch insns belong to DJSk16-format */
744    tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
745    tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
746}
747
748static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
749{
750    TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
751    ptrdiff_t offset = tcg_pcrel_diff(s, arg);
752
753    tcg_debug_assert((offset & 3) == 0);
754    if (offset == sextreg(offset, 0, 28)) {
755        /* short jump: +/- 256MiB */
756        if (tail) {
757            tcg_out_opc_b(s, offset >> 2);
758        } else {
759            tcg_out_opc_bl(s, offset >> 2);
760        }
761    } else if (offset == sextreg(offset, 0, 38)) {
762        /* long jump: +/- 256GiB */
763        tcg_target_long lo = sextreg(offset, 0, 18);
764        tcg_target_long hi = offset - lo;
765        tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
766        tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
767    } else {
768        /* far jump: 64-bit */
769        tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
770        tcg_target_long hi = (tcg_target_long)arg - lo;
771        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
772        tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
773    }
774}
775
776static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
777                         const TCGHelperInfo *info)
778{
779    tcg_out_call_int(s, arg, false);
780}
781
782/*
783 * Load/store helpers
784 */
785
786static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
787                         TCGReg addr, intptr_t offset)
788{
789    intptr_t imm12 = sextreg(offset, 0, 12);
790
791    if (offset != imm12) {
792        intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
793
794        if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
795            imm12 = sextreg(diff, 0, 12);
796            tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
797        } else {
798            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
799            if (addr != TCG_REG_ZERO) {
800                tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
801            }
802        }
803        addr = TCG_REG_TMP2;
804    }
805
806    switch (opc) {
807    case OPC_LD_B:
808    case OPC_LD_BU:
809    case OPC_LD_H:
810    case OPC_LD_HU:
811    case OPC_LD_W:
812    case OPC_LD_WU:
813    case OPC_LD_D:
814    case OPC_ST_B:
815    case OPC_ST_H:
816    case OPC_ST_W:
817    case OPC_ST_D:
818        tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
819        break;
820    case OPC_FLD_S:
821    case OPC_FLD_D:
822    case OPC_FST_S:
823    case OPC_FST_D:
824        tcg_out32(s, encode_fdjsk12_insn(opc, data, addr, imm12));
825        break;
826    default:
827        g_assert_not_reached();
828    }
829}
830
831static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg dest,
832                       TCGReg base, intptr_t offset)
833{
834    switch (type) {
835    case TCG_TYPE_I32:
836        if (dest < TCG_REG_V0) {
837            tcg_out_ldst(s, OPC_LD_W, dest, base, offset);
838        } else {
839            tcg_out_ldst(s, OPC_FLD_S, dest, base, offset);
840        }
841        break;
842    case TCG_TYPE_I64:
843    case TCG_TYPE_V64:
844        if (dest < TCG_REG_V0) {
845            tcg_out_ldst(s, OPC_LD_D, dest, base, offset);
846        } else {
847            tcg_out_ldst(s, OPC_FLD_D, dest, base, offset);
848        }
849        break;
850    case TCG_TYPE_V128:
851        if (-0x800 <= offset && offset <= 0x7ff) {
852            tcg_out_opc_vld(s, dest, base, offset);
853        } else {
854            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
855            tcg_out_opc_vldx(s, dest, base, TCG_REG_TMP0);
856        }
857        break;
858    case TCG_TYPE_V256:
859        if (-0x800 <= offset && offset <= 0x7ff) {
860            tcg_out_opc_xvld(s, dest, base, offset);
861        } else {
862            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
863            tcg_out_opc_xvldx(s, dest, base, TCG_REG_TMP0);
864        }
865        break;
866    default:
867        g_assert_not_reached();
868    }
869}
870
871static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src,
872                       TCGReg base, intptr_t offset)
873{
874    switch (type) {
875    case TCG_TYPE_I32:
876        if (src < TCG_REG_V0) {
877            tcg_out_ldst(s, OPC_ST_W, src, base, offset);
878        } else {
879            tcg_out_ldst(s, OPC_FST_S, src, base, offset);
880        }
881        break;
882    case TCG_TYPE_I64:
883    case TCG_TYPE_V64:
884        if (src < TCG_REG_V0) {
885            tcg_out_ldst(s, OPC_ST_D, src, base, offset);
886        } else {
887            tcg_out_ldst(s, OPC_FST_D, src, base, offset);
888        }
889        break;
890    case TCG_TYPE_V128:
891        if (-0x800 <= offset && offset <= 0x7ff) {
892            tcg_out_opc_vst(s, src, base, offset);
893        } else {
894            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
895            tcg_out_opc_vstx(s, src, base, TCG_REG_TMP0);
896        }
897        break;
898    case TCG_TYPE_V256:
899        if (-0x800 <= offset && offset <= 0x7ff) {
900            tcg_out_opc_xvst(s, src, base, offset);
901        } else {
902            tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
903            tcg_out_opc_xvstx(s, src, base, TCG_REG_TMP0);
904        }
905        break;
906    default:
907        g_assert_not_reached();
908    }
909}
910
911static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
912                        TCGReg base, intptr_t ofs)
913{
914    if (val == 0) {
915        tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
916        return true;
917    }
918    return false;
919}
920
921/*
922 * Load/store helpers for SoftMMU, and qemu_ld/st implementations
923 */
924
925static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
926{
927    tcg_out_opc_b(s, 0);
928    return reloc_br_sd10k16(s->code_ptr - 1, target);
929}
930
931static const TCGLdstHelperParam ldst_helper_param = {
932    .ntmp = 1, .tmp = { TCG_REG_TMP0 }
933};
934
935static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
936{
937    MemOp opc = get_memop(l->oi);
938
939    /* resolve label address */
940    if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
941        return false;
942    }
943
944    tcg_out_ld_helper_args(s, l, &ldst_helper_param);
945    tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false);
946    tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param);
947    return tcg_out_goto(s, l->raddr);
948}
949
950static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
951{
952    MemOp opc = get_memop(l->oi);
953
954    /* resolve label address */
955    if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
956        return false;
957    }
958
959    tcg_out_st_helper_args(s, l, &ldst_helper_param);
960    tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
961    return tcg_out_goto(s, l->raddr);
962}
963
964typedef struct {
965    TCGReg base;
966    TCGReg index;
967    TCGAtomAlign aa;
968} HostAddress;
969
970bool tcg_target_has_memory_bswap(MemOp memop)
971{
972    return false;
973}
974
975/* We expect to use a 12-bit negative offset from ENV.  */
976#define MIN_TLB_MASK_TABLE_OFS  -(1 << 11)
977
978/*
979 * For system-mode, perform the TLB load and compare.
980 * For user-mode, perform any required alignment tests.
981 * In both cases, return a TCGLabelQemuLdst structure if the slow path
982 * is required and fill in @h with the host address for the fast path.
983 */
984static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
985                                           TCGReg addr_reg, MemOpIdx oi,
986                                           bool is_ld)
987{
988    TCGType addr_type = s->addr_type;
989    TCGLabelQemuLdst *ldst = NULL;
990    MemOp opc = get_memop(oi);
991    MemOp a_bits;
992
993    h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
994    a_bits = h->aa.align;
995
996    if (tcg_use_softmmu) {
997        unsigned s_bits = opc & MO_SIZE;
998        int mem_index = get_mmuidx(oi);
999        int fast_ofs = tlb_mask_table_ofs(s, mem_index);
1000        int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
1001        int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
1002
1003        ldst = new_ldst_label(s);
1004        ldst->is_ld = is_ld;
1005        ldst->oi = oi;
1006        ldst->addrlo_reg = addr_reg;
1007
1008        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
1009        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
1010
1011        tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
1012                           s->page_bits - CPU_TLB_ENTRY_BITS);
1013        tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
1014        tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
1015
1016        /* Load the tlb comparator and the addend.  */
1017        QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
1018        tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
1019                   is_ld ? offsetof(CPUTLBEntry, addr_read)
1020                         : offsetof(CPUTLBEntry, addr_write));
1021        tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
1022                   offsetof(CPUTLBEntry, addend));
1023
1024        /*
1025         * For aligned accesses, we check the first byte and include the
1026         * alignment bits within the address.  For unaligned access, we
1027         * check that we don't cross pages using the address of the last
1028         * byte of the access.
1029         */
1030        if (a_bits < s_bits) {
1031            unsigned a_mask = (1u << a_bits) - 1;
1032            unsigned s_mask = (1u << s_bits) - 1;
1033            tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
1034        } else {
1035            tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
1036        }
1037        tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
1038                              a_bits, s->page_bits - 1);
1039
1040        /* Compare masked address with the TLB entry.  */
1041        ldst->label_ptr[0] = s->code_ptr;
1042        tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
1043
1044        h->index = TCG_REG_TMP2;
1045    } else {
1046        if (a_bits) {
1047            ldst = new_ldst_label(s);
1048
1049            ldst->is_ld = is_ld;
1050            ldst->oi = oi;
1051            ldst->addrlo_reg = addr_reg;
1052
1053            /*
1054             * Without micro-architecture details, we don't know which of
1055             * bstrpick or andi is faster, so use bstrpick as it's not
1056             * constrained by imm field width. Not to say alignments >= 2^12
1057             * are going to happen any time soon.
1058             */
1059            tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
1060
1061            ldst->label_ptr[0] = s->code_ptr;
1062            tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
1063        }
1064
1065        h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
1066    }
1067
1068    if (addr_type == TCG_TYPE_I32) {
1069        h->base = TCG_REG_TMP0;
1070        tcg_out_ext32u(s, h->base, addr_reg);
1071    } else {
1072        h->base = addr_reg;
1073    }
1074
1075    return ldst;
1076}
1077
1078static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
1079                                    TCGReg rd, HostAddress h)
1080{
1081    /* Byte swapping is left to middle-end expansion.  */
1082    tcg_debug_assert((opc & MO_BSWAP) == 0);
1083
1084    switch (opc & MO_SSIZE) {
1085    case MO_UB:
1086        tcg_out_opc_ldx_bu(s, rd, h.base, h.index);
1087        break;
1088    case MO_SB:
1089        tcg_out_opc_ldx_b(s, rd, h.base, h.index);
1090        break;
1091    case MO_UW:
1092        tcg_out_opc_ldx_hu(s, rd, h.base, h.index);
1093        break;
1094    case MO_SW:
1095        tcg_out_opc_ldx_h(s, rd, h.base, h.index);
1096        break;
1097    case MO_UL:
1098        if (type == TCG_TYPE_I64) {
1099            tcg_out_opc_ldx_wu(s, rd, h.base, h.index);
1100            break;
1101        }
1102        /* fallthrough */
1103    case MO_SL:
1104        tcg_out_opc_ldx_w(s, rd, h.base, h.index);
1105        break;
1106    case MO_UQ:
1107        tcg_out_opc_ldx_d(s, rd, h.base, h.index);
1108        break;
1109    default:
1110        g_assert_not_reached();
1111    }
1112}
1113
1114static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1115                            MemOpIdx oi, TCGType data_type)
1116{
1117    TCGLabelQemuLdst *ldst;
1118    HostAddress h;
1119
1120    ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1121    tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h);
1122
1123    if (ldst) {
1124        ldst->type = data_type;
1125        ldst->datalo_reg = data_reg;
1126        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1127    }
1128}
1129
1130static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
1131                                    TCGReg rd, HostAddress h)
1132{
1133    /* Byte swapping is left to middle-end expansion.  */
1134    tcg_debug_assert((opc & MO_BSWAP) == 0);
1135
1136    switch (opc & MO_SIZE) {
1137    case MO_8:
1138        tcg_out_opc_stx_b(s, rd, h.base, h.index);
1139        break;
1140    case MO_16:
1141        tcg_out_opc_stx_h(s, rd, h.base, h.index);
1142        break;
1143    case MO_32:
1144        tcg_out_opc_stx_w(s, rd, h.base, h.index);
1145        break;
1146    case MO_64:
1147        tcg_out_opc_stx_d(s, rd, h.base, h.index);
1148        break;
1149    default:
1150        g_assert_not_reached();
1151    }
1152}
1153
1154static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1155                            MemOpIdx oi, TCGType data_type)
1156{
1157    TCGLabelQemuLdst *ldst;
1158    HostAddress h;
1159
1160    ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1161    tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h);
1162
1163    if (ldst) {
1164        ldst->type = data_type;
1165        ldst->datalo_reg = data_reg;
1166        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1167    }
1168}
1169
1170static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi,
1171                                   TCGReg addr_reg, MemOpIdx oi, bool is_ld)
1172{
1173    TCGLabelQemuLdst *ldst;
1174    HostAddress h;
1175
1176    ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
1177
1178    if (h.aa.atom == MO_128) {
1179        /*
1180         * Use VLDX/VSTX when 128-bit atomicity is required.
1181         * If address is aligned to 16-bytes, the 128-bit load/store is atomic.
1182         */
1183        if (is_ld) {
1184            tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index);
1185            tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0);
1186            tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1);
1187        } else {
1188            tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0);
1189            tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1);
1190            tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index);
1191        }
1192    } else {
1193        /* Otherwise use a pair of LD/ST. */
1194        TCGReg base = h.base;
1195        if (h.index != TCG_REG_ZERO) {
1196            base = TCG_REG_TMP0;
1197            tcg_out_opc_add_d(s, base, h.base, h.index);
1198        }
1199        if (is_ld) {
1200            tcg_debug_assert(base != data_lo);
1201            tcg_out_opc_ld_d(s, data_lo, base, 0);
1202            tcg_out_opc_ld_d(s, data_hi, base, 8);
1203        } else {
1204            tcg_out_opc_st_d(s, data_lo, base, 0);
1205            tcg_out_opc_st_d(s, data_hi, base, 8);
1206        }
1207    }
1208
1209    if (ldst) {
1210        ldst->type = TCG_TYPE_I128;
1211        ldst->datalo_reg = data_lo;
1212        ldst->datahi_reg = data_hi;
1213        ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1214    }
1215}
1216
1217/*
1218 * Entry-points
1219 */
1220
1221static const tcg_insn_unit *tb_ret_addr;
1222
1223static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1224{
1225    /* Reuse the zeroing that exists for goto_ptr.  */
1226    if (a0 == 0) {
1227        tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1228    } else {
1229        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1230        tcg_out_call_int(s, tb_ret_addr, true);
1231    }
1232}
1233
1234static void tcg_out_goto_tb(TCGContext *s, int which)
1235{
1236    /*
1237     * Direct branch, or load indirect address, to be patched
1238     * by tb_target_set_jmp_target.  Check indirect load offset
1239     * in range early, regardless of direct branch distance,
1240     * via assert within tcg_out_opc_pcaddu2i.
1241     */
1242    uintptr_t i_addr = get_jmp_target_addr(s, which);
1243    intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr);
1244
1245    set_jmp_insn_offset(s, which);
1246    tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2);
1247
1248    /* Finish the load and indirect branch. */
1249    tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0);
1250    tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1251    set_jmp_reset_offset(s, which);
1252}
1253
1254void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1255                              uintptr_t jmp_rx, uintptr_t jmp_rw)
1256{
1257    uintptr_t d_addr = tb->jmp_target_addr[n];
1258    ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2;
1259    tcg_insn_unit insn;
1260
1261    /* Either directly branch, or load slot address for indirect branch. */
1262    if (d_disp == sextreg(d_disp, 0, 26)) {
1263        insn = encode_sd10k16_insn(OPC_B, d_disp);
1264    } else {
1265        uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
1266        intptr_t i_disp = i_addr - jmp_rx;
1267        insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2);
1268    }
1269
1270    qatomic_set((tcg_insn_unit *)jmp_rw, insn);
1271    flush_idcache_range(jmp_rx, jmp_rw, 4);
1272}
1273
1274static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1275                       const TCGArg args[TCG_MAX_OP_ARGS],
1276                       const int const_args[TCG_MAX_OP_ARGS])
1277{
1278    TCGArg a0 = args[0];
1279    TCGArg a1 = args[1];
1280    TCGArg a2 = args[2];
1281    TCGArg a3 = args[3];
1282    int c2 = const_args[2];
1283
1284    switch (opc) {
1285    case INDEX_op_mb:
1286        tcg_out_mb(s, a0);
1287        break;
1288
1289    case INDEX_op_goto_ptr:
1290        tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
1291        break;
1292
1293    case INDEX_op_br:
1294        tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
1295                      0);
1296        tcg_out_opc_b(s, 0);
1297        break;
1298
1299    case INDEX_op_brcond_i32:
1300    case INDEX_op_brcond_i64:
1301        tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1302        break;
1303
1304    case INDEX_op_extrh_i64_i32:
1305        tcg_out_opc_srai_d(s, a0, a1, 32);
1306        break;
1307
1308    case INDEX_op_not_i32:
1309    case INDEX_op_not_i64:
1310        tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
1311        break;
1312
1313    case INDEX_op_nor_i32:
1314    case INDEX_op_nor_i64:
1315        if (c2) {
1316            tcg_out_opc_ori(s, a0, a1, a2);
1317            tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
1318        } else {
1319            tcg_out_opc_nor(s, a0, a1, a2);
1320        }
1321        break;
1322
1323    case INDEX_op_andc_i32:
1324    case INDEX_op_andc_i64:
1325        if (c2) {
1326            /* guaranteed to fit due to constraint */
1327            tcg_out_opc_andi(s, a0, a1, ~a2);
1328        } else {
1329            tcg_out_opc_andn(s, a0, a1, a2);
1330        }
1331        break;
1332
1333    case INDEX_op_orc_i32:
1334    case INDEX_op_orc_i64:
1335        if (c2) {
1336            /* guaranteed to fit due to constraint */
1337            tcg_out_opc_ori(s, a0, a1, ~a2);
1338        } else {
1339            tcg_out_opc_orn(s, a0, a1, a2);
1340        }
1341        break;
1342
1343    case INDEX_op_and_i32:
1344    case INDEX_op_and_i64:
1345        if (c2) {
1346            tcg_out_opc_andi(s, a0, a1, a2);
1347        } else {
1348            tcg_out_opc_and(s, a0, a1, a2);
1349        }
1350        break;
1351
1352    case INDEX_op_or_i32:
1353    case INDEX_op_or_i64:
1354        if (c2) {
1355            tcg_out_opc_ori(s, a0, a1, a2);
1356        } else {
1357            tcg_out_opc_or(s, a0, a1, a2);
1358        }
1359        break;
1360
1361    case INDEX_op_xor_i32:
1362    case INDEX_op_xor_i64:
1363        if (c2) {
1364            tcg_out_opc_xori(s, a0, a1, a2);
1365        } else {
1366            tcg_out_opc_xor(s, a0, a1, a2);
1367        }
1368        break;
1369
1370    case INDEX_op_extract_i32:
1371        tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
1372        break;
1373    case INDEX_op_extract_i64:
1374        tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
1375        break;
1376
1377    case INDEX_op_deposit_i32:
1378        tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
1379        break;
1380    case INDEX_op_deposit_i64:
1381        tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
1382        break;
1383
1384    case INDEX_op_bswap16_i32:
1385    case INDEX_op_bswap16_i64:
1386        tcg_out_opc_revb_2h(s, a0, a1);
1387        if (a2 & TCG_BSWAP_OS) {
1388            tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
1389        } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1390            tcg_out_ext16u(s, a0, a0);
1391        }
1392        break;
1393
1394    case INDEX_op_bswap32_i32:
1395        /* All 32-bit values are computed sign-extended in the register.  */
1396        a2 = TCG_BSWAP_OS;
1397        /* fallthrough */
1398    case INDEX_op_bswap32_i64:
1399        tcg_out_opc_revb_2w(s, a0, a1);
1400        if (a2 & TCG_BSWAP_OS) {
1401            tcg_out_ext32s(s, a0, a0);
1402        } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1403            tcg_out_ext32u(s, a0, a0);
1404        }
1405        break;
1406
1407    case INDEX_op_bswap64_i64:
1408        tcg_out_opc_revb_d(s, a0, a1);
1409        break;
1410
1411    case INDEX_op_clz_i32:
1412        tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
1413        break;
1414    case INDEX_op_clz_i64:
1415        tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
1416        break;
1417
1418    case INDEX_op_ctz_i32:
1419        tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
1420        break;
1421    case INDEX_op_ctz_i64:
1422        tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
1423        break;
1424
1425    case INDEX_op_shl_i32:
1426        if (c2) {
1427            tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
1428        } else {
1429            tcg_out_opc_sll_w(s, a0, a1, a2);
1430        }
1431        break;
1432    case INDEX_op_shl_i64:
1433        if (c2) {
1434            tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
1435        } else {
1436            tcg_out_opc_sll_d(s, a0, a1, a2);
1437        }
1438        break;
1439
1440    case INDEX_op_shr_i32:
1441        if (c2) {
1442            tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
1443        } else {
1444            tcg_out_opc_srl_w(s, a0, a1, a2);
1445        }
1446        break;
1447    case INDEX_op_shr_i64:
1448        if (c2) {
1449            tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
1450        } else {
1451            tcg_out_opc_srl_d(s, a0, a1, a2);
1452        }
1453        break;
1454
1455    case INDEX_op_sar_i32:
1456        if (c2) {
1457            tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
1458        } else {
1459            tcg_out_opc_sra_w(s, a0, a1, a2);
1460        }
1461        break;
1462    case INDEX_op_sar_i64:
1463        if (c2) {
1464            tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
1465        } else {
1466            tcg_out_opc_sra_d(s, a0, a1, a2);
1467        }
1468        break;
1469
1470    case INDEX_op_rotl_i32:
1471        /* transform into equivalent rotr/rotri */
1472        if (c2) {
1473            tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
1474        } else {
1475            tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1476            tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
1477        }
1478        break;
1479    case INDEX_op_rotl_i64:
1480        /* transform into equivalent rotr/rotri */
1481        if (c2) {
1482            tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
1483        } else {
1484            tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1485            tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
1486        }
1487        break;
1488
1489    case INDEX_op_rotr_i32:
1490        if (c2) {
1491            tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
1492        } else {
1493            tcg_out_opc_rotr_w(s, a0, a1, a2);
1494        }
1495        break;
1496    case INDEX_op_rotr_i64:
1497        if (c2) {
1498            tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
1499        } else {
1500            tcg_out_opc_rotr_d(s, a0, a1, a2);
1501        }
1502        break;
1503
1504    case INDEX_op_add_i32:
1505        if (c2) {
1506            tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
1507        } else {
1508            tcg_out_opc_add_w(s, a0, a1, a2);
1509        }
1510        break;
1511    case INDEX_op_add_i64:
1512        if (c2) {
1513            tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
1514        } else {
1515            tcg_out_opc_add_d(s, a0, a1, a2);
1516        }
1517        break;
1518
1519    case INDEX_op_sub_i32:
1520        if (c2) {
1521            tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
1522        } else {
1523            tcg_out_opc_sub_w(s, a0, a1, a2);
1524        }
1525        break;
1526    case INDEX_op_sub_i64:
1527        if (c2) {
1528            tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
1529        } else {
1530            tcg_out_opc_sub_d(s, a0, a1, a2);
1531        }
1532        break;
1533
1534    case INDEX_op_neg_i32:
1535        tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1);
1536        break;
1537    case INDEX_op_neg_i64:
1538        tcg_out_opc_sub_d(s, a0, TCG_REG_ZERO, a1);
1539        break;
1540
1541    case INDEX_op_mul_i32:
1542        tcg_out_opc_mul_w(s, a0, a1, a2);
1543        break;
1544    case INDEX_op_mul_i64:
1545        tcg_out_opc_mul_d(s, a0, a1, a2);
1546        break;
1547
1548    case INDEX_op_mulsh_i32:
1549        tcg_out_opc_mulh_w(s, a0, a1, a2);
1550        break;
1551    case INDEX_op_mulsh_i64:
1552        tcg_out_opc_mulh_d(s, a0, a1, a2);
1553        break;
1554
1555    case INDEX_op_muluh_i32:
1556        tcg_out_opc_mulh_wu(s, a0, a1, a2);
1557        break;
1558    case INDEX_op_muluh_i64:
1559        tcg_out_opc_mulh_du(s, a0, a1, a2);
1560        break;
1561
1562    case INDEX_op_div_i32:
1563        tcg_out_opc_div_w(s, a0, a1, a2);
1564        break;
1565    case INDEX_op_div_i64:
1566        tcg_out_opc_div_d(s, a0, a1, a2);
1567        break;
1568
1569    case INDEX_op_divu_i32:
1570        tcg_out_opc_div_wu(s, a0, a1, a2);
1571        break;
1572    case INDEX_op_divu_i64:
1573        tcg_out_opc_div_du(s, a0, a1, a2);
1574        break;
1575
1576    case INDEX_op_rem_i32:
1577        tcg_out_opc_mod_w(s, a0, a1, a2);
1578        break;
1579    case INDEX_op_rem_i64:
1580        tcg_out_opc_mod_d(s, a0, a1, a2);
1581        break;
1582
1583    case INDEX_op_remu_i32:
1584        tcg_out_opc_mod_wu(s, a0, a1, a2);
1585        break;
1586    case INDEX_op_remu_i64:
1587        tcg_out_opc_mod_du(s, a0, a1, a2);
1588        break;
1589
1590    case INDEX_op_setcond_i32:
1591    case INDEX_op_setcond_i64:
1592        tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1593        break;
1594
1595    case INDEX_op_movcond_i32:
1596    case INDEX_op_movcond_i64:
1597        tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
1598        break;
1599
1600    case INDEX_op_ld8s_i32:
1601    case INDEX_op_ld8s_i64:
1602        tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
1603        break;
1604    case INDEX_op_ld8u_i32:
1605    case INDEX_op_ld8u_i64:
1606        tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
1607        break;
1608    case INDEX_op_ld16s_i32:
1609    case INDEX_op_ld16s_i64:
1610        tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
1611        break;
1612    case INDEX_op_ld16u_i32:
1613    case INDEX_op_ld16u_i64:
1614        tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
1615        break;
1616    case INDEX_op_ld_i32:
1617    case INDEX_op_ld32s_i64:
1618        tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
1619        break;
1620    case INDEX_op_ld32u_i64:
1621        tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
1622        break;
1623    case INDEX_op_ld_i64:
1624        tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
1625        break;
1626
1627    case INDEX_op_st8_i32:
1628    case INDEX_op_st8_i64:
1629        tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
1630        break;
1631    case INDEX_op_st16_i32:
1632    case INDEX_op_st16_i64:
1633        tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
1634        break;
1635    case INDEX_op_st_i32:
1636    case INDEX_op_st32_i64:
1637        tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
1638        break;
1639    case INDEX_op_st_i64:
1640        tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
1641        break;
1642
1643    case INDEX_op_qemu_ld_a32_i32:
1644    case INDEX_op_qemu_ld_a64_i32:
1645        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1646        break;
1647    case INDEX_op_qemu_ld_a32_i64:
1648    case INDEX_op_qemu_ld_a64_i64:
1649        tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1650        break;
1651    case INDEX_op_qemu_ld_a32_i128:
1652    case INDEX_op_qemu_ld_a64_i128:
1653        tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
1654        break;
1655    case INDEX_op_qemu_st_a32_i32:
1656    case INDEX_op_qemu_st_a64_i32:
1657        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1658        break;
1659    case INDEX_op_qemu_st_a32_i64:
1660    case INDEX_op_qemu_st_a64_i64:
1661        tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1662        break;
1663    case INDEX_op_qemu_st_a32_i128:
1664    case INDEX_op_qemu_st_a64_i128:
1665        tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
1666        break;
1667
1668    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
1669    case INDEX_op_mov_i64:
1670    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
1671    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
1672    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
1673    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
1674    case INDEX_op_ext8s_i64:
1675    case INDEX_op_ext8u_i32:
1676    case INDEX_op_ext8u_i64:
1677    case INDEX_op_ext16s_i32:
1678    case INDEX_op_ext16s_i64:
1679    case INDEX_op_ext16u_i32:
1680    case INDEX_op_ext16u_i64:
1681    case INDEX_op_ext32s_i64:
1682    case INDEX_op_ext32u_i64:
1683    case INDEX_op_ext_i32_i64:
1684    case INDEX_op_extu_i32_i64:
1685    case INDEX_op_extrl_i64_i32:
1686    default:
1687        g_assert_not_reached();
1688    }
1689}
1690
1691static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
1692                            TCGReg rd, TCGReg rs)
1693{
1694    static const LoongArchInsn repl_insn[2][4] = {
1695        { OPC_VREPLGR2VR_B, OPC_VREPLGR2VR_H,
1696          OPC_VREPLGR2VR_W, OPC_VREPLGR2VR_D },
1697        { OPC_XVREPLGR2VR_B, OPC_XVREPLGR2VR_H,
1698          OPC_XVREPLGR2VR_W, OPC_XVREPLGR2VR_D },
1699    };
1700    bool lasx = type == TCG_TYPE_V256;
1701
1702    tcg_debug_assert(vece <= MO_64);
1703    tcg_out32(s, encode_vdj_insn(repl_insn[lasx][vece], rd, rs));
1704    return true;
1705}
1706
1707static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
1708                             TCGReg r, TCGReg base, intptr_t offset)
1709{
1710    bool lasx = type == TCG_TYPE_V256;
1711
1712    /* Handle imm overflow and division (vldrepl.d imm is divided by 8). */
1713    if (offset < -0x800 || offset > 0x7ff ||
1714        (offset & ((1 << vece) - 1)) != 0) {
1715        tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset);
1716        base = TCG_REG_TMP0;
1717        offset = 0;
1718    }
1719    offset >>= vece;
1720
1721    switch (vece) {
1722    case MO_8:
1723        if (lasx) {
1724            tcg_out_opc_xvldrepl_b(s, r, base, offset);
1725        } else {
1726            tcg_out_opc_vldrepl_b(s, r, base, offset);
1727        }
1728        break;
1729    case MO_16:
1730        if (lasx) {
1731            tcg_out_opc_xvldrepl_h(s, r, base, offset);
1732        } else {
1733            tcg_out_opc_vldrepl_h(s, r, base, offset);
1734        }
1735        break;
1736    case MO_32:
1737        if (lasx) {
1738            tcg_out_opc_xvldrepl_w(s, r, base, offset);
1739        } else {
1740            tcg_out_opc_vldrepl_w(s, r, base, offset);
1741        }
1742        break;
1743    case MO_64:
1744        if (lasx) {
1745            tcg_out_opc_xvldrepl_d(s, r, base, offset);
1746        } else {
1747            tcg_out_opc_vldrepl_d(s, r, base, offset);
1748        }
1749        break;
1750    default:
1751        g_assert_not_reached();
1752    }
1753    return true;
1754}
1755
1756static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1757                             TCGReg rd, int64_t v64)
1758{
1759    /* Try vldi if imm can fit */
1760    int64_t value = sextract64(v64, 0, 8 << vece);
1761    if (-0x200 <= value && value <= 0x1FF) {
1762        uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF);
1763
1764        if (type == TCG_TYPE_V256) {
1765            tcg_out_opc_xvldi(s, rd, imm);
1766        } else {
1767            tcg_out_opc_vldi(s, rd, imm);
1768        }
1769        return;
1770    }
1771
1772    /* TODO: vldi patterns when imm 12 is set */
1773
1774    tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value);
1775    tcg_out_dup_vec(s, type, vece, rd, TCG_REG_TMP0);
1776}
1777
1778static void tcg_out_addsub_vec(TCGContext *s, bool lasx, unsigned vece,
1779                               TCGArg a0, TCGArg a1, TCGArg a2,
1780                               bool a2_is_const, bool is_add)
1781{
1782    static const LoongArchInsn add_vec_insn[2][4] = {
1783        { OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D },
1784        { OPC_XVADD_B, OPC_XVADD_H, OPC_XVADD_W, OPC_XVADD_D },
1785    };
1786    static const LoongArchInsn add_vec_imm_insn[2][4] = {
1787        { OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU },
1788        { OPC_XVADDI_BU, OPC_XVADDI_HU, OPC_XVADDI_WU, OPC_XVADDI_DU },
1789    };
1790    static const LoongArchInsn sub_vec_insn[2][4] = {
1791        { OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D },
1792        { OPC_XVSUB_B, OPC_XVSUB_H, OPC_XVSUB_W, OPC_XVSUB_D },
1793    };
1794    static const LoongArchInsn sub_vec_imm_insn[2][4] = {
1795        { OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU },
1796        { OPC_XVSUBI_BU, OPC_XVSUBI_HU, OPC_XVSUBI_WU, OPC_XVSUBI_DU },
1797    };
1798    LoongArchInsn insn;
1799
1800    if (a2_is_const) {
1801        int64_t value = sextract64(a2, 0, 8 << vece);
1802
1803        if (!is_add) {
1804            value = -value;
1805        }
1806        if (value < 0) {
1807            insn = sub_vec_imm_insn[lasx][vece];
1808            value = -value;
1809        } else {
1810            insn = add_vec_imm_insn[lasx][vece];
1811        }
1812
1813        /* Constraint TCG_CT_CONST_VADD ensures validity. */
1814        tcg_debug_assert(0 <= value && value <= 0x1f);
1815
1816        tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
1817        return;
1818    }
1819
1820    if (is_add) {
1821        insn = add_vec_insn[lasx][vece];
1822    } else {
1823        insn = sub_vec_insn[lasx][vece];
1824    }
1825    tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
1826}
1827
1828static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
1829                           unsigned vecl, unsigned vece,
1830                           const TCGArg args[TCG_MAX_OP_ARGS],
1831                           const int const_args[TCG_MAX_OP_ARGS])
1832{
1833    TCGType type = vecl + TCG_TYPE_V64;
1834    bool lasx = type == TCG_TYPE_V256;
1835    TCGArg a0, a1, a2, a3;
1836    LoongArchInsn insn;
1837
1838    static const LoongArchInsn cmp_vec_insn[16][2][4] = {
1839        [TCG_COND_EQ] = {
1840            { OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D },
1841            { OPC_XVSEQ_B, OPC_XVSEQ_H, OPC_XVSEQ_W, OPC_XVSEQ_D },
1842        },
1843        [TCG_COND_LE] = {
1844            { OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D },
1845            { OPC_XVSLE_B, OPC_XVSLE_H, OPC_XVSLE_W, OPC_XVSLE_D },
1846        },
1847        [TCG_COND_LEU] = {
1848            { OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU },
1849            { OPC_XVSLE_BU, OPC_XVSLE_HU, OPC_XVSLE_WU, OPC_XVSLE_DU },
1850        },
1851        [TCG_COND_LT] = {
1852            { OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D },
1853            { OPC_XVSLT_B, OPC_XVSLT_H, OPC_XVSLT_W, OPC_XVSLT_D },
1854        },
1855        [TCG_COND_LTU] = {
1856            { OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU },
1857            { OPC_XVSLT_BU, OPC_XVSLT_HU, OPC_XVSLT_WU, OPC_XVSLT_DU },
1858        }
1859    };
1860    static const LoongArchInsn cmp_vec_imm_insn[16][2][4] = {
1861        [TCG_COND_EQ] = {
1862            { OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D },
1863            { OPC_XVSEQI_B, OPC_XVSEQI_H, OPC_XVSEQI_W, OPC_XVSEQI_D },
1864        },
1865        [TCG_COND_LE] = {
1866            { OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D },
1867            { OPC_XVSLEI_B, OPC_XVSLEI_H, OPC_XVSLEI_W, OPC_XVSLEI_D },
1868        },
1869        [TCG_COND_LEU] = {
1870            { OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU },
1871            { OPC_XVSLEI_BU, OPC_XVSLEI_HU, OPC_XVSLEI_WU, OPC_XVSLEI_DU },
1872        },
1873        [TCG_COND_LT] = {
1874            { OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D },
1875            { OPC_XVSLTI_B, OPC_XVSLTI_H, OPC_XVSLTI_W, OPC_XVSLTI_D },
1876        },
1877        [TCG_COND_LTU] = {
1878            { OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU },
1879            { OPC_XVSLTI_BU, OPC_XVSLTI_HU, OPC_XVSLTI_WU, OPC_XVSLTI_DU },
1880        }
1881    };
1882    static const LoongArchInsn neg_vec_insn[2][4] = {
1883        { OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D },
1884        { OPC_XVNEG_B, OPC_XVNEG_H, OPC_XVNEG_W, OPC_XVNEG_D },
1885    };
1886    static const LoongArchInsn mul_vec_insn[2][4] = {
1887        { OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D },
1888        { OPC_XVMUL_B, OPC_XVMUL_H, OPC_XVMUL_W, OPC_XVMUL_D },
1889    };
1890    static const LoongArchInsn smin_vec_insn[2][4] = {
1891        { OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D },
1892        { OPC_XVMIN_B, OPC_XVMIN_H, OPC_XVMIN_W, OPC_XVMIN_D },
1893    };
1894    static const LoongArchInsn umin_vec_insn[2][4] = {
1895        { OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU },
1896        { OPC_XVMIN_BU, OPC_XVMIN_HU, OPC_XVMIN_WU, OPC_XVMIN_DU },
1897    };
1898    static const LoongArchInsn smax_vec_insn[2][4] = {
1899        { OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D },
1900        { OPC_XVMAX_B, OPC_XVMAX_H, OPC_XVMAX_W, OPC_XVMAX_D },
1901    };
1902    static const LoongArchInsn umax_vec_insn[2][4] = {
1903        { OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU },
1904        { OPC_XVMAX_BU, OPC_XVMAX_HU, OPC_XVMAX_WU, OPC_XVMAX_DU },
1905    };
1906    static const LoongArchInsn ssadd_vec_insn[2][4] = {
1907        { OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D },
1908        { OPC_XVSADD_B, OPC_XVSADD_H, OPC_XVSADD_W, OPC_XVSADD_D },
1909    };
1910    static const LoongArchInsn usadd_vec_insn[2][4] = {
1911        { OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU },
1912        { OPC_XVSADD_BU, OPC_XVSADD_HU, OPC_XVSADD_WU, OPC_XVSADD_DU },
1913    };
1914    static const LoongArchInsn sssub_vec_insn[2][4] = {
1915        { OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D },
1916        { OPC_XVSSUB_B, OPC_XVSSUB_H, OPC_XVSSUB_W, OPC_XVSSUB_D },
1917    };
1918    static const LoongArchInsn ussub_vec_insn[2][4] = {
1919        { OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU },
1920        { OPC_XVSSUB_BU, OPC_XVSSUB_HU, OPC_XVSSUB_WU, OPC_XVSSUB_DU },
1921    };
1922    static const LoongArchInsn shlv_vec_insn[2][4] = {
1923        { OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D },
1924        { OPC_XVSLL_B, OPC_XVSLL_H, OPC_XVSLL_W, OPC_XVSLL_D },
1925    };
1926    static const LoongArchInsn shrv_vec_insn[2][4] = {
1927        { OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D },
1928        { OPC_XVSRL_B, OPC_XVSRL_H, OPC_XVSRL_W, OPC_XVSRL_D },
1929    };
1930    static const LoongArchInsn sarv_vec_insn[2][4] = {
1931        { OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D },
1932        { OPC_XVSRA_B, OPC_XVSRA_H, OPC_XVSRA_W, OPC_XVSRA_D },
1933    };
1934    static const LoongArchInsn shli_vec_insn[2][4] = {
1935        { OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D },
1936        { OPC_XVSLLI_B, OPC_XVSLLI_H, OPC_XVSLLI_W, OPC_XVSLLI_D },
1937    };
1938    static const LoongArchInsn shri_vec_insn[2][4] = {
1939        { OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D },
1940        { OPC_XVSRLI_B, OPC_XVSRLI_H, OPC_XVSRLI_W, OPC_XVSRLI_D },
1941    };
1942    static const LoongArchInsn sari_vec_insn[2][4] = {
1943        { OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D },
1944        { OPC_XVSRAI_B, OPC_XVSRAI_H, OPC_XVSRAI_W, OPC_XVSRAI_D },
1945    };
1946    static const LoongArchInsn rotrv_vec_insn[2][4] = {
1947        { OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D },
1948        { OPC_XVROTR_B, OPC_XVROTR_H, OPC_XVROTR_W, OPC_XVROTR_D },
1949    };
1950    static const LoongArchInsn rotri_vec_insn[2][4] = {
1951        { OPC_VROTRI_B, OPC_VROTRI_H, OPC_VROTRI_W, OPC_VROTRI_D },
1952        { OPC_XVROTRI_B, OPC_XVROTRI_H, OPC_XVROTRI_W, OPC_XVROTRI_D },
1953    };
1954
1955    a0 = args[0];
1956    a1 = args[1];
1957    a2 = args[2];
1958    a3 = args[3];
1959
1960    switch (opc) {
1961    case INDEX_op_st_vec:
1962        tcg_out_st(s, type, a0, a1, a2);
1963        break;
1964    case INDEX_op_ld_vec:
1965        tcg_out_ld(s, type, a0, a1, a2);
1966        break;
1967    case INDEX_op_and_vec:
1968        insn = lasx ? OPC_XVAND_V : OPC_VAND_V;
1969        goto vdvjvk;
1970    case INDEX_op_andc_vec:
1971        /*
1972         * vandn vd, vj, vk: vd = vk & ~vj
1973         * andc_vec vd, vj, vk: vd = vj & ~vk
1974         * vj and vk are swapped
1975         */
1976        a1 = a2;
1977        a2 = args[1];
1978        insn = lasx ? OPC_XVANDN_V : OPC_VANDN_V;
1979        goto vdvjvk;
1980    case INDEX_op_or_vec:
1981        insn = lasx ? OPC_XVOR_V : OPC_VOR_V;
1982        goto vdvjvk;
1983    case INDEX_op_orc_vec:
1984        insn = lasx ? OPC_XVORN_V : OPC_VORN_V;
1985        goto vdvjvk;
1986    case INDEX_op_xor_vec:
1987        insn = lasx ? OPC_XVXOR_V : OPC_VXOR_V;
1988        goto vdvjvk;
1989    case INDEX_op_not_vec:
1990        a2 = a1;
1991        /* fall through */
1992    case INDEX_op_nor_vec:
1993        insn = lasx ? OPC_XVNOR_V : OPC_VNOR_V;
1994        goto vdvjvk;
1995    case INDEX_op_cmp_vec:
1996        {
1997            TCGCond cond = args[3];
1998
1999            if (const_args[2]) {
2000                /*
2001                 * cmp_vec dest, src, value
2002                 * Try vseqi/vslei/vslti
2003                 */
2004                int64_t value = sextract64(a2, 0, 8 << vece);
2005                if ((cond == TCG_COND_EQ ||
2006                     cond == TCG_COND_LE ||
2007                     cond == TCG_COND_LT) &&
2008                    (-0x10 <= value && value <= 0x0f)) {
2009                    insn = cmp_vec_imm_insn[cond][lasx][vece];
2010                    tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value));
2011                    break;
2012                } else if ((cond == TCG_COND_LEU ||
2013                            cond == TCG_COND_LTU) &&
2014                           (0x00 <= value && value <= 0x1f)) {
2015                    insn = cmp_vec_imm_insn[cond][lasx][vece];
2016                    tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
2017                    break;
2018                }
2019
2020                /*
2021                 * Fallback to:
2022                 * dupi_vec temp, a2
2023                 * cmp_vec a0, a1, temp, cond
2024                 */
2025                tcg_out_dupi_vec(s, type, vece, TCG_VEC_TMP0, a2);
2026                a2 = TCG_VEC_TMP0;
2027            }
2028
2029            insn = cmp_vec_insn[cond][lasx][vece];
2030            if (insn == 0) {
2031                TCGArg t;
2032                t = a1, a1 = a2, a2 = t;
2033                cond = tcg_swap_cond(cond);
2034                insn = cmp_vec_insn[cond][lasx][vece];
2035                tcg_debug_assert(insn != 0);
2036            }
2037        }
2038        goto vdvjvk;
2039    case INDEX_op_add_vec:
2040        tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], true);
2041        break;
2042    case INDEX_op_sub_vec:
2043        tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], false);
2044        break;
2045    case INDEX_op_neg_vec:
2046        tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece], a0, a1));
2047        break;
2048    case INDEX_op_mul_vec:
2049        insn = mul_vec_insn[lasx][vece];
2050        goto vdvjvk;
2051    case INDEX_op_smin_vec:
2052        insn = smin_vec_insn[lasx][vece];
2053        goto vdvjvk;
2054    case INDEX_op_smax_vec:
2055        insn = smax_vec_insn[lasx][vece];
2056        goto vdvjvk;
2057    case INDEX_op_umin_vec:
2058        insn = umin_vec_insn[lasx][vece];
2059        goto vdvjvk;
2060    case INDEX_op_umax_vec:
2061        insn = umax_vec_insn[lasx][vece];
2062        goto vdvjvk;
2063    case INDEX_op_ssadd_vec:
2064        insn = ssadd_vec_insn[lasx][vece];
2065        goto vdvjvk;
2066    case INDEX_op_usadd_vec:
2067        insn = usadd_vec_insn[lasx][vece];
2068        goto vdvjvk;
2069    case INDEX_op_sssub_vec:
2070        insn = sssub_vec_insn[lasx][vece];
2071        goto vdvjvk;
2072    case INDEX_op_ussub_vec:
2073        insn = ussub_vec_insn[lasx][vece];
2074        goto vdvjvk;
2075    case INDEX_op_shlv_vec:
2076        insn = shlv_vec_insn[lasx][vece];
2077        goto vdvjvk;
2078    case INDEX_op_shrv_vec:
2079        insn = shrv_vec_insn[lasx][vece];
2080        goto vdvjvk;
2081    case INDEX_op_sarv_vec:
2082        insn = sarv_vec_insn[lasx][vece];
2083        goto vdvjvk;
2084    case INDEX_op_rotlv_vec:
2085        /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
2086        tcg_out32(s, encode_vdvj_insn(neg_vec_insn[lasx][vece],
2087                                      TCG_VEC_TMP0, a2));
2088        a2 = TCG_VEC_TMP0;
2089        /* fall through */
2090    case INDEX_op_rotrv_vec:
2091        insn = rotrv_vec_insn[lasx][vece];
2092        goto vdvjvk;
2093    case INDEX_op_shli_vec:
2094        insn = shli_vec_insn[lasx][vece];
2095        goto vdvjukN;
2096    case INDEX_op_shri_vec:
2097        insn = shri_vec_insn[lasx][vece];
2098        goto vdvjukN;
2099    case INDEX_op_sari_vec:
2100        insn = sari_vec_insn[lasx][vece];
2101        goto vdvjukN;
2102    case INDEX_op_rotli_vec:
2103        /* rotli_vec a1, a2 = rotri_vec a1, -a2 */
2104        a2 = extract32(-a2, 0, 3 + vece);
2105        insn = rotri_vec_insn[lasx][vece];
2106        goto vdvjukN;
2107    case INDEX_op_bitsel_vec:
2108        /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
2109        if (lasx) {
2110            tcg_out_opc_xvbitsel_v(s, a0, a3, a2, a1);
2111        } else {
2112            tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
2113        }
2114        break;
2115    case INDEX_op_dupm_vec:
2116        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2117        break;
2118    default:
2119        g_assert_not_reached();
2120    vdvjvk:
2121        tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
2122        break;
2123    vdvjukN:
2124        switch (vece) {
2125        case MO_8:
2126            tcg_out32(s, encode_vdvjuk3_insn(insn, a0, a1, a2));
2127            break;
2128        case MO_16:
2129            tcg_out32(s, encode_vdvjuk4_insn(insn, a0, a1, a2));
2130            break;
2131        case MO_32:
2132            tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, a2));
2133            break;
2134        case MO_64:
2135            tcg_out32(s, encode_vdvjuk6_insn(insn, a0, a1, a2));
2136            break;
2137        default:
2138            g_assert_not_reached();
2139        }
2140        break;
2141    }
2142}
2143
2144int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2145{
2146    switch (opc) {
2147    case INDEX_op_ld_vec:
2148    case INDEX_op_st_vec:
2149    case INDEX_op_dup_vec:
2150    case INDEX_op_dupm_vec:
2151    case INDEX_op_cmp_vec:
2152    case INDEX_op_add_vec:
2153    case INDEX_op_sub_vec:
2154    case INDEX_op_and_vec:
2155    case INDEX_op_andc_vec:
2156    case INDEX_op_or_vec:
2157    case INDEX_op_orc_vec:
2158    case INDEX_op_xor_vec:
2159    case INDEX_op_nor_vec:
2160    case INDEX_op_not_vec:
2161    case INDEX_op_neg_vec:
2162    case INDEX_op_mul_vec:
2163    case INDEX_op_smin_vec:
2164    case INDEX_op_smax_vec:
2165    case INDEX_op_umin_vec:
2166    case INDEX_op_umax_vec:
2167    case INDEX_op_ssadd_vec:
2168    case INDEX_op_usadd_vec:
2169    case INDEX_op_sssub_vec:
2170    case INDEX_op_ussub_vec:
2171    case INDEX_op_shlv_vec:
2172    case INDEX_op_shrv_vec:
2173    case INDEX_op_sarv_vec:
2174    case INDEX_op_bitsel_vec:
2175        return 1;
2176    default:
2177        return 0;
2178    }
2179}
2180
2181void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2182                       TCGArg a0, ...)
2183{
2184    g_assert_not_reached();
2185}
2186
2187static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2188{
2189    switch (op) {
2190    case INDEX_op_goto_ptr:
2191        return C_O0_I1(r);
2192
2193    case INDEX_op_st8_i32:
2194    case INDEX_op_st8_i64:
2195    case INDEX_op_st16_i32:
2196    case INDEX_op_st16_i64:
2197    case INDEX_op_st32_i64:
2198    case INDEX_op_st_i32:
2199    case INDEX_op_st_i64:
2200    case INDEX_op_qemu_st_a32_i32:
2201    case INDEX_op_qemu_st_a64_i32:
2202    case INDEX_op_qemu_st_a32_i64:
2203    case INDEX_op_qemu_st_a64_i64:
2204        return C_O0_I2(rZ, r);
2205
2206    case INDEX_op_qemu_ld_a32_i128:
2207    case INDEX_op_qemu_ld_a64_i128:
2208        return C_N2_I1(r, r, r);
2209
2210    case INDEX_op_qemu_st_a32_i128:
2211    case INDEX_op_qemu_st_a64_i128:
2212        return C_O0_I3(r, r, r);
2213
2214    case INDEX_op_brcond_i32:
2215    case INDEX_op_brcond_i64:
2216        return C_O0_I2(rZ, rZ);
2217
2218    case INDEX_op_ext8s_i32:
2219    case INDEX_op_ext8s_i64:
2220    case INDEX_op_ext8u_i32:
2221    case INDEX_op_ext8u_i64:
2222    case INDEX_op_ext16s_i32:
2223    case INDEX_op_ext16s_i64:
2224    case INDEX_op_ext16u_i32:
2225    case INDEX_op_ext16u_i64:
2226    case INDEX_op_ext32s_i64:
2227    case INDEX_op_ext32u_i64:
2228    case INDEX_op_extu_i32_i64:
2229    case INDEX_op_extrl_i64_i32:
2230    case INDEX_op_extrh_i64_i32:
2231    case INDEX_op_ext_i32_i64:
2232    case INDEX_op_neg_i32:
2233    case INDEX_op_neg_i64:
2234    case INDEX_op_not_i32:
2235    case INDEX_op_not_i64:
2236    case INDEX_op_extract_i32:
2237    case INDEX_op_extract_i64:
2238    case INDEX_op_bswap16_i32:
2239    case INDEX_op_bswap16_i64:
2240    case INDEX_op_bswap32_i32:
2241    case INDEX_op_bswap32_i64:
2242    case INDEX_op_bswap64_i64:
2243    case INDEX_op_ld8s_i32:
2244    case INDEX_op_ld8s_i64:
2245    case INDEX_op_ld8u_i32:
2246    case INDEX_op_ld8u_i64:
2247    case INDEX_op_ld16s_i32:
2248    case INDEX_op_ld16s_i64:
2249    case INDEX_op_ld16u_i32:
2250    case INDEX_op_ld16u_i64:
2251    case INDEX_op_ld32s_i64:
2252    case INDEX_op_ld32u_i64:
2253    case INDEX_op_ld_i32:
2254    case INDEX_op_ld_i64:
2255    case INDEX_op_qemu_ld_a32_i32:
2256    case INDEX_op_qemu_ld_a64_i32:
2257    case INDEX_op_qemu_ld_a32_i64:
2258    case INDEX_op_qemu_ld_a64_i64:
2259        return C_O1_I1(r, r);
2260
2261    case INDEX_op_andc_i32:
2262    case INDEX_op_andc_i64:
2263    case INDEX_op_orc_i32:
2264    case INDEX_op_orc_i64:
2265        /*
2266         * LoongArch insns for these ops don't have reg-imm forms, but we
2267         * can express using andi/ori if ~constant satisfies
2268         * TCG_CT_CONST_U12.
2269         */
2270        return C_O1_I2(r, r, rC);
2271
2272    case INDEX_op_shl_i32:
2273    case INDEX_op_shl_i64:
2274    case INDEX_op_shr_i32:
2275    case INDEX_op_shr_i64:
2276    case INDEX_op_sar_i32:
2277    case INDEX_op_sar_i64:
2278    case INDEX_op_rotl_i32:
2279    case INDEX_op_rotl_i64:
2280    case INDEX_op_rotr_i32:
2281    case INDEX_op_rotr_i64:
2282        return C_O1_I2(r, r, ri);
2283
2284    case INDEX_op_add_i32:
2285        return C_O1_I2(r, r, ri);
2286    case INDEX_op_add_i64:
2287        return C_O1_I2(r, r, rJ);
2288
2289    case INDEX_op_and_i32:
2290    case INDEX_op_and_i64:
2291    case INDEX_op_nor_i32:
2292    case INDEX_op_nor_i64:
2293    case INDEX_op_or_i32:
2294    case INDEX_op_or_i64:
2295    case INDEX_op_xor_i32:
2296    case INDEX_op_xor_i64:
2297        /* LoongArch reg-imm bitops have their imms ZERO-extended */
2298        return C_O1_I2(r, r, rU);
2299
2300    case INDEX_op_clz_i32:
2301    case INDEX_op_clz_i64:
2302    case INDEX_op_ctz_i32:
2303    case INDEX_op_ctz_i64:
2304        return C_O1_I2(r, r, rW);
2305
2306    case INDEX_op_deposit_i32:
2307    case INDEX_op_deposit_i64:
2308        /* Must deposit into the same register as input */
2309        return C_O1_I2(r, 0, rZ);
2310
2311    case INDEX_op_sub_i32:
2312    case INDEX_op_setcond_i32:
2313        return C_O1_I2(r, rZ, ri);
2314    case INDEX_op_sub_i64:
2315    case INDEX_op_setcond_i64:
2316        return C_O1_I2(r, rZ, rJ);
2317
2318    case INDEX_op_mul_i32:
2319    case INDEX_op_mul_i64:
2320    case INDEX_op_mulsh_i32:
2321    case INDEX_op_mulsh_i64:
2322    case INDEX_op_muluh_i32:
2323    case INDEX_op_muluh_i64:
2324    case INDEX_op_div_i32:
2325    case INDEX_op_div_i64:
2326    case INDEX_op_divu_i32:
2327    case INDEX_op_divu_i64:
2328    case INDEX_op_rem_i32:
2329    case INDEX_op_rem_i64:
2330    case INDEX_op_remu_i32:
2331    case INDEX_op_remu_i64:
2332        return C_O1_I2(r, rZ, rZ);
2333
2334    case INDEX_op_movcond_i32:
2335    case INDEX_op_movcond_i64:
2336        return C_O1_I4(r, rZ, rJ, rZ, rZ);
2337
2338    case INDEX_op_ld_vec:
2339    case INDEX_op_dupm_vec:
2340    case INDEX_op_dup_vec:
2341        return C_O1_I1(w, r);
2342
2343    case INDEX_op_st_vec:
2344        return C_O0_I2(w, r);
2345
2346    case INDEX_op_cmp_vec:
2347        return C_O1_I2(w, w, wM);
2348
2349    case INDEX_op_add_vec:
2350    case INDEX_op_sub_vec:
2351        return C_O1_I2(w, w, wA);
2352
2353    case INDEX_op_and_vec:
2354    case INDEX_op_andc_vec:
2355    case INDEX_op_or_vec:
2356    case INDEX_op_orc_vec:
2357    case INDEX_op_xor_vec:
2358    case INDEX_op_nor_vec:
2359    case INDEX_op_mul_vec:
2360    case INDEX_op_smin_vec:
2361    case INDEX_op_smax_vec:
2362    case INDEX_op_umin_vec:
2363    case INDEX_op_umax_vec:
2364    case INDEX_op_ssadd_vec:
2365    case INDEX_op_usadd_vec:
2366    case INDEX_op_sssub_vec:
2367    case INDEX_op_ussub_vec:
2368    case INDEX_op_shlv_vec:
2369    case INDEX_op_shrv_vec:
2370    case INDEX_op_sarv_vec:
2371    case INDEX_op_rotrv_vec:
2372    case INDEX_op_rotlv_vec:
2373        return C_O1_I2(w, w, w);
2374
2375    case INDEX_op_not_vec:
2376    case INDEX_op_neg_vec:
2377    case INDEX_op_shli_vec:
2378    case INDEX_op_shri_vec:
2379    case INDEX_op_sari_vec:
2380    case INDEX_op_rotli_vec:
2381        return C_O1_I1(w, w);
2382
2383    case INDEX_op_bitsel_vec:
2384        return C_O1_I3(w, w, w, w);
2385
2386    default:
2387        g_assert_not_reached();
2388    }
2389}
2390
2391static const int tcg_target_callee_save_regs[] = {
2392    TCG_REG_S0,     /* used for the global env (TCG_AREG0) */
2393    TCG_REG_S1,
2394    TCG_REG_S2,
2395    TCG_REG_S3,
2396    TCG_REG_S4,
2397    TCG_REG_S5,
2398    TCG_REG_S6,
2399    TCG_REG_S7,
2400    TCG_REG_S8,
2401    TCG_REG_S9,
2402    TCG_REG_RA,     /* should be last for ABI compliance */
2403};
2404
2405/* Stack frame parameters.  */
2406#define REG_SIZE   (TCG_TARGET_REG_BITS / 8)
2407#define SAVE_SIZE  ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2408#define TEMP_SIZE  (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2409#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2410                     + TCG_TARGET_STACK_ALIGN - 1) \
2411                    & -TCG_TARGET_STACK_ALIGN)
2412#define SAVE_OFS   (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2413
2414/* We're expecting to be able to use an immediate for frame allocation.  */
2415QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
2416
2417/* Generate global QEMU prologue and epilogue code */
2418static void tcg_target_qemu_prologue(TCGContext *s)
2419{
2420    int i;
2421
2422    tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2423
2424    /* TB prologue */
2425    tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2426    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2427        tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2428                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2429    }
2430
2431    if (!tcg_use_softmmu && guest_base) {
2432        tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2433        tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2434    }
2435
2436    /* Call generated code */
2437    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2438    tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
2439
2440    /* Return path for goto_ptr. Set return value to 0 */
2441    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2442    tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
2443
2444    /* TB epilogue */
2445    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2446    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2447        tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2448                   TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2449    }
2450
2451    tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2452    tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
2453}
2454
2455static void tcg_out_tb_start(TCGContext *s)
2456{
2457    /* nothing to do */
2458}
2459
2460static void tcg_target_init(TCGContext *s)
2461{
2462    unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2463
2464    /* Server and desktop class cpus have UAL; embedded cpus do not. */
2465    if (!(hwcap & HWCAP_LOONGARCH_UAL)) {
2466        error_report("TCG: unaligned access support required; exiting");
2467        exit(EXIT_FAILURE);
2468    }
2469
2470    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2471    tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
2472
2473    tcg_target_call_clobber_regs = ALL_GENERAL_REGS | ALL_VECTOR_REGS;
2474    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
2475    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
2476    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
2477    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
2478    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
2479    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
2480    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
2481    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
2482    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
2483    tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
2484
2485    if (cpuinfo & CPUINFO_LSX) {
2486        tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
2487        tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2488        if (cpuinfo & CPUINFO_LASX) {
2489            tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
2490        }
2491        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
2492        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
2493        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
2494        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
2495        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
2496        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
2497        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
2498        tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
2499    }
2500
2501    s->reserved_regs = 0;
2502    tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
2503    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
2504    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
2505    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
2506    tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2507    tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
2508    tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
2509    tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
2510}
2511
2512typedef struct {
2513    DebugFrameHeader h;
2514    uint8_t fde_def_cfa[4];
2515    uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
2516} DebugFrame;
2517
2518#define ELF_HOST_MACHINE EM_LOONGARCH
2519
2520static const DebugFrame debug_frame = {
2521    .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
2522    .h.cie.id = -1,
2523    .h.cie.version = 1,
2524    .h.cie.code_align = 1,
2525    .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
2526    .h.cie.return_column = TCG_REG_RA,
2527
2528    /* Total FDE size does not include the "len" member.  */
2529    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2530
2531    .fde_def_cfa = {
2532        12, TCG_REG_SP,                 /* DW_CFA_def_cfa sp, ...  */
2533        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
2534        (FRAME_SIZE >> 7)
2535    },
2536    .fde_reg_ofs = {
2537        0x80 + 23, 11,                  /* DW_CFA_offset, s0, -88 */
2538        0x80 + 24, 10,                  /* DW_CFA_offset, s1, -80 */
2539        0x80 + 25, 9,                   /* DW_CFA_offset, s2, -72 */
2540        0x80 + 26, 8,                   /* DW_CFA_offset, s3, -64 */
2541        0x80 + 27, 7,                   /* DW_CFA_offset, s4, -56 */
2542        0x80 + 28, 6,                   /* DW_CFA_offset, s5, -48 */
2543        0x80 + 29, 5,                   /* DW_CFA_offset, s6, -40 */
2544        0x80 + 30, 4,                   /* DW_CFA_offset, s7, -32 */
2545        0x80 + 31, 3,                   /* DW_CFA_offset, s8, -24 */
2546        0x80 + 22, 2,                   /* DW_CFA_offset, s9, -16 */
2547        0x80 + 1 , 1,                   /* DW_CFA_offset, ra, -8 */
2548    }
2549};
2550
2551void tcg_register_jit(const void *buf, size_t buf_size)
2552{
2553    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2554}
2555