xref: /qemu/tcg/i386/tcg-target.c.inc (revision e99c1f89)
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "../tcg-ldst.c.inc"
26#include "../tcg-pool.c.inc"
27
28#ifdef CONFIG_DEBUG_TCG
29static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
30#if TCG_TARGET_REG_BITS == 64
31    "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
32#else
33    "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
34#endif
35    "%r8",  "%r9",  "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
36    "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7",
37#if TCG_TARGET_REG_BITS == 64
38    "%xmm8", "%xmm9", "%xmm10", "%xmm11",
39    "%xmm12", "%xmm13", "%xmm14", "%xmm15",
40#endif
41};
42#endif
43
44static const int tcg_target_reg_alloc_order[] = {
45#if TCG_TARGET_REG_BITS == 64
46    TCG_REG_RBP,
47    TCG_REG_RBX,
48    TCG_REG_R12,
49    TCG_REG_R13,
50    TCG_REG_R14,
51    TCG_REG_R15,
52    TCG_REG_R10,
53    TCG_REG_R11,
54    TCG_REG_R9,
55    TCG_REG_R8,
56    TCG_REG_RCX,
57    TCG_REG_RDX,
58    TCG_REG_RSI,
59    TCG_REG_RDI,
60    TCG_REG_RAX,
61#else
62    TCG_REG_EBX,
63    TCG_REG_ESI,
64    TCG_REG_EDI,
65    TCG_REG_EBP,
66    TCG_REG_ECX,
67    TCG_REG_EDX,
68    TCG_REG_EAX,
69#endif
70    TCG_REG_XMM0,
71    TCG_REG_XMM1,
72    TCG_REG_XMM2,
73    TCG_REG_XMM3,
74    TCG_REG_XMM4,
75    TCG_REG_XMM5,
76#ifndef _WIN64
77    /* The Win64 ABI has xmm6-xmm15 as caller-saves, and we do not save
78       any of them.  Therefore only allow xmm0-xmm5 to be allocated.  */
79    TCG_REG_XMM6,
80    TCG_REG_XMM7,
81#if TCG_TARGET_REG_BITS == 64
82    TCG_REG_XMM8,
83    TCG_REG_XMM9,
84    TCG_REG_XMM10,
85    TCG_REG_XMM11,
86    TCG_REG_XMM12,
87    TCG_REG_XMM13,
88    TCG_REG_XMM14,
89    TCG_REG_XMM15,
90#endif
91#endif
92};
93
94static const int tcg_target_call_iarg_regs[] = {
95#if TCG_TARGET_REG_BITS == 64
96#if defined(_WIN64)
97    TCG_REG_RCX,
98    TCG_REG_RDX,
99#else
100    TCG_REG_RDI,
101    TCG_REG_RSI,
102    TCG_REG_RDX,
103    TCG_REG_RCX,
104#endif
105    TCG_REG_R8,
106    TCG_REG_R9,
107#else
108    /* 32 bit mode uses stack based calling convention (GCC default). */
109#endif
110};
111
112static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
113{
114    switch (kind) {
115    case TCG_CALL_RET_NORMAL:
116        tcg_debug_assert(slot >= 0 && slot <= 1);
117        return slot ? TCG_REG_EDX : TCG_REG_EAX;
118#ifdef _WIN64
119    case TCG_CALL_RET_BY_VEC:
120        tcg_debug_assert(slot == 0);
121        return TCG_REG_XMM0;
122#endif
123    default:
124        g_assert_not_reached();
125    }
126}
127
128/* Constants we accept.  */
129#define TCG_CT_CONST_S32 0x100
130#define TCG_CT_CONST_U32 0x200
131#define TCG_CT_CONST_I32 0x400
132#define TCG_CT_CONST_WSZ 0x800
133
134/* Registers used with L constraint, which are the first argument
135   registers on x86_64, and two random call clobbered registers on
136   i386. */
137#if TCG_TARGET_REG_BITS == 64
138# define TCG_REG_L0 tcg_target_call_iarg_regs[0]
139# define TCG_REG_L1 tcg_target_call_iarg_regs[1]
140#else
141# define TCG_REG_L0 TCG_REG_EAX
142# define TCG_REG_L1 TCG_REG_EDX
143#endif
144
145#define ALL_BYTEH_REGS         0x0000000fu
146#if TCG_TARGET_REG_BITS == 64
147# define ALL_GENERAL_REGS      0x0000ffffu
148# define ALL_VECTOR_REGS       0xffff0000u
149# define ALL_BYTEL_REGS        ALL_GENERAL_REGS
150#else
151# define ALL_GENERAL_REGS      0x000000ffu
152# define ALL_VECTOR_REGS       0x00ff0000u
153# define ALL_BYTEL_REGS        ALL_BYTEH_REGS
154#endif
155#ifdef CONFIG_SOFTMMU
156# define SOFTMMU_RESERVE_REGS  ((1 << TCG_REG_L0) | (1 << TCG_REG_L1))
157#else
158# define SOFTMMU_RESERVE_REGS  0
159#endif
160
161/* The host compiler should supply <cpuid.h> to enable runtime features
162   detection, as we're not going to go so far as our own inline assembly.
163   If not available, default values will be assumed.  */
164#if defined(CONFIG_CPUID_H)
165#include "qemu/cpuid.h"
166#endif
167
168/* For 64-bit, we always know that CMOV is available.  */
169#if TCG_TARGET_REG_BITS == 64
170# define have_cmov 1
171#elif defined(CONFIG_CPUID_H)
172static bool have_cmov;
173#else
174# define have_cmov 0
175#endif
176
177/* We need these symbols in tcg-target.h, and we can't properly conditionalize
178   it there.  Therefore we always define the variable.  */
179bool have_bmi1;
180bool have_popcnt;
181bool have_avx1;
182bool have_avx2;
183bool have_avx512bw;
184bool have_avx512dq;
185bool have_avx512vbmi2;
186bool have_avx512vl;
187bool have_movbe;
188
189#ifdef CONFIG_CPUID_H
190static bool have_bmi2;
191static bool have_lzcnt;
192#else
193# define have_bmi2 0
194# define have_lzcnt 0
195#endif
196
197static const tcg_insn_unit *tb_ret_addr;
198
199static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
200                        intptr_t value, intptr_t addend)
201{
202    value += addend;
203    switch(type) {
204    case R_386_PC32:
205        value -= (uintptr_t)tcg_splitwx_to_rx(code_ptr);
206        if (value != (int32_t)value) {
207            return false;
208        }
209        /* FALLTHRU */
210    case R_386_32:
211        tcg_patch32(code_ptr, value);
212        break;
213    case R_386_PC8:
214        value -= (uintptr_t)tcg_splitwx_to_rx(code_ptr);
215        if (value != (int8_t)value) {
216            return false;
217        }
218        tcg_patch8(code_ptr, value);
219        break;
220    default:
221        g_assert_not_reached();
222    }
223    return true;
224}
225
226/* test if a constant matches the constraint */
227static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
228{
229    if (ct & TCG_CT_CONST) {
230        return 1;
231    }
232    if (type == TCG_TYPE_I32) {
233        if (ct & (TCG_CT_CONST_S32 | TCG_CT_CONST_U32 | TCG_CT_CONST_I32)) {
234            return 1;
235        }
236    } else {
237        if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
238            return 1;
239        }
240        if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
241            return 1;
242        }
243        if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
244            return 1;
245        }
246    }
247    if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
248        return 1;
249    }
250    return 0;
251}
252
253# define LOWREGMASK(x)	((x) & 7)
254
255#define P_EXT		0x100		/* 0x0f opcode prefix */
256#define P_EXT38         0x200           /* 0x0f 0x38 opcode prefix */
257#define P_DATA16        0x400           /* 0x66 opcode prefix */
258#define P_VEXW          0x1000          /* Set VEX.W = 1 */
259#if TCG_TARGET_REG_BITS == 64
260# define P_REXW         P_VEXW          /* Set REX.W = 1; match VEXW */
261# define P_REXB_R       0x2000          /* REG field as byte register */
262# define P_REXB_RM      0x4000          /* R/M field as byte register */
263# define P_GS           0x8000          /* gs segment override */
264#else
265# define P_REXW		0
266# define P_REXB_R	0
267# define P_REXB_RM	0
268# define P_GS           0
269#endif
270#define P_EXT3A         0x10000         /* 0x0f 0x3a opcode prefix */
271#define P_SIMDF3        0x20000         /* 0xf3 opcode prefix */
272#define P_SIMDF2        0x40000         /* 0xf2 opcode prefix */
273#define P_VEXL          0x80000         /* Set VEX.L = 1 */
274#define P_EVEX          0x100000        /* Requires EVEX encoding */
275
276#define OPC_ARITH_EvIz	(0x81)
277#define OPC_ARITH_EvIb	(0x83)
278#define OPC_ARITH_GvEv	(0x03)		/* ... plus (ARITH_FOO << 3) */
279#define OPC_ANDN        (0xf2 | P_EXT38)
280#define OPC_ADD_GvEv	(OPC_ARITH_GvEv | (ARITH_ADD << 3))
281#define OPC_AND_GvEv    (OPC_ARITH_GvEv | (ARITH_AND << 3))
282#define OPC_BLENDPS     (0x0c | P_EXT3A | P_DATA16)
283#define OPC_BSF         (0xbc | P_EXT)
284#define OPC_BSR         (0xbd | P_EXT)
285#define OPC_BSWAP	(0xc8 | P_EXT)
286#define OPC_CALL_Jz	(0xe8)
287#define OPC_CMOVCC      (0x40 | P_EXT)  /* ... plus condition code */
288#define OPC_CMP_GvEv	(OPC_ARITH_GvEv | (ARITH_CMP << 3))
289#define OPC_DEC_r32	(0x48)
290#define OPC_IMUL_GvEv	(0xaf | P_EXT)
291#define OPC_IMUL_GvEvIb	(0x6b)
292#define OPC_IMUL_GvEvIz	(0x69)
293#define OPC_INC_r32	(0x40)
294#define OPC_JCC_long	(0x80 | P_EXT)	/* ... plus condition code */
295#define OPC_JCC_short	(0x70)		/* ... plus condition code */
296#define OPC_JMP_long	(0xe9)
297#define OPC_JMP_short	(0xeb)
298#define OPC_LEA         (0x8d)
299#define OPC_LZCNT       (0xbd | P_EXT | P_SIMDF3)
300#define OPC_MOVB_EvGv	(0x88)		/* stores, more or less */
301#define OPC_MOVL_EvGv	(0x89)		/* stores, more or less */
302#define OPC_MOVL_GvEv	(0x8b)		/* loads, more or less */
303#define OPC_MOVB_EvIz   (0xc6)
304#define OPC_MOVL_EvIz	(0xc7)
305#define OPC_MOVL_Iv     (0xb8)
306#define OPC_MOVBE_GyMy  (0xf0 | P_EXT38)
307#define OPC_MOVBE_MyGy  (0xf1 | P_EXT38)
308#define OPC_MOVD_VyEy   (0x6e | P_EXT | P_DATA16)
309#define OPC_MOVD_EyVy   (0x7e | P_EXT | P_DATA16)
310#define OPC_MOVDDUP     (0x12 | P_EXT | P_SIMDF2)
311#define OPC_MOVDQA_VxWx (0x6f | P_EXT | P_DATA16)
312#define OPC_MOVDQA_WxVx (0x7f | P_EXT | P_DATA16)
313#define OPC_MOVDQU_VxWx (0x6f | P_EXT | P_SIMDF3)
314#define OPC_MOVDQU_WxVx (0x7f | P_EXT | P_SIMDF3)
315#define OPC_MOVQ_VqWq   (0x7e | P_EXT | P_SIMDF3)
316#define OPC_MOVQ_WqVq   (0xd6 | P_EXT | P_DATA16)
317#define OPC_MOVSBL	(0xbe | P_EXT)
318#define OPC_MOVSWL	(0xbf | P_EXT)
319#define OPC_MOVSLQ	(0x63 | P_REXW)
320#define OPC_MOVZBL	(0xb6 | P_EXT)
321#define OPC_MOVZWL	(0xb7 | P_EXT)
322#define OPC_PABSB       (0x1c | P_EXT38 | P_DATA16)
323#define OPC_PABSW       (0x1d | P_EXT38 | P_DATA16)
324#define OPC_PABSD       (0x1e | P_EXT38 | P_DATA16)
325#define OPC_VPABSQ      (0x1f | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
326#define OPC_PACKSSDW    (0x6b | P_EXT | P_DATA16)
327#define OPC_PACKSSWB    (0x63 | P_EXT | P_DATA16)
328#define OPC_PACKUSDW    (0x2b | P_EXT38 | P_DATA16)
329#define OPC_PACKUSWB    (0x67 | P_EXT | P_DATA16)
330#define OPC_PADDB       (0xfc | P_EXT | P_DATA16)
331#define OPC_PADDW       (0xfd | P_EXT | P_DATA16)
332#define OPC_PADDD       (0xfe | P_EXT | P_DATA16)
333#define OPC_PADDQ       (0xd4 | P_EXT | P_DATA16)
334#define OPC_PADDSB      (0xec | P_EXT | P_DATA16)
335#define OPC_PADDSW      (0xed | P_EXT | P_DATA16)
336#define OPC_PADDUB      (0xdc | P_EXT | P_DATA16)
337#define OPC_PADDUW      (0xdd | P_EXT | P_DATA16)
338#define OPC_PAND        (0xdb | P_EXT | P_DATA16)
339#define OPC_PANDN       (0xdf | P_EXT | P_DATA16)
340#define OPC_PBLENDW     (0x0e | P_EXT3A | P_DATA16)
341#define OPC_PCMPEQB     (0x74 | P_EXT | P_DATA16)
342#define OPC_PCMPEQW     (0x75 | P_EXT | P_DATA16)
343#define OPC_PCMPEQD     (0x76 | P_EXT | P_DATA16)
344#define OPC_PCMPEQQ     (0x29 | P_EXT38 | P_DATA16)
345#define OPC_PCMPGTB     (0x64 | P_EXT | P_DATA16)
346#define OPC_PCMPGTW     (0x65 | P_EXT | P_DATA16)
347#define OPC_PCMPGTD     (0x66 | P_EXT | P_DATA16)
348#define OPC_PCMPGTQ     (0x37 | P_EXT38 | P_DATA16)
349#define OPC_PMAXSB      (0x3c | P_EXT38 | P_DATA16)
350#define OPC_PMAXSW      (0xee | P_EXT | P_DATA16)
351#define OPC_PMAXSD      (0x3d | P_EXT38 | P_DATA16)
352#define OPC_VPMAXSQ     (0x3d | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
353#define OPC_PMAXUB      (0xde | P_EXT | P_DATA16)
354#define OPC_PMAXUW      (0x3e | P_EXT38 | P_DATA16)
355#define OPC_PMAXUD      (0x3f | P_EXT38 | P_DATA16)
356#define OPC_VPMAXUQ     (0x3f | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
357#define OPC_PMINSB      (0x38 | P_EXT38 | P_DATA16)
358#define OPC_PMINSW      (0xea | P_EXT | P_DATA16)
359#define OPC_PMINSD      (0x39 | P_EXT38 | P_DATA16)
360#define OPC_VPMINSQ     (0x39 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
361#define OPC_PMINUB      (0xda | P_EXT | P_DATA16)
362#define OPC_PMINUW      (0x3a | P_EXT38 | P_DATA16)
363#define OPC_PMINUD      (0x3b | P_EXT38 | P_DATA16)
364#define OPC_VPMINUQ     (0x3b | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
365#define OPC_PMOVSXBW    (0x20 | P_EXT38 | P_DATA16)
366#define OPC_PMOVSXWD    (0x23 | P_EXT38 | P_DATA16)
367#define OPC_PMOVSXDQ    (0x25 | P_EXT38 | P_DATA16)
368#define OPC_PMOVZXBW    (0x30 | P_EXT38 | P_DATA16)
369#define OPC_PMOVZXWD    (0x33 | P_EXT38 | P_DATA16)
370#define OPC_PMOVZXDQ    (0x35 | P_EXT38 | P_DATA16)
371#define OPC_PMULLW      (0xd5 | P_EXT | P_DATA16)
372#define OPC_PMULLD      (0x40 | P_EXT38 | P_DATA16)
373#define OPC_VPMULLQ     (0x40 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
374#define OPC_POR         (0xeb | P_EXT | P_DATA16)
375#define OPC_PSHUFB      (0x00 | P_EXT38 | P_DATA16)
376#define OPC_PSHUFD      (0x70 | P_EXT | P_DATA16)
377#define OPC_PSHUFLW     (0x70 | P_EXT | P_SIMDF2)
378#define OPC_PSHUFHW     (0x70 | P_EXT | P_SIMDF3)
379#define OPC_PSHIFTW_Ib  (0x71 | P_EXT | P_DATA16) /* /2 /6 /4 */
380#define OPC_PSHIFTD_Ib  (0x72 | P_EXT | P_DATA16) /* /1 /2 /6 /4 */
381#define OPC_PSHIFTQ_Ib  (0x73 | P_EXT | P_DATA16) /* /2 /6 /4 */
382#define OPC_PSLLW       (0xf1 | P_EXT | P_DATA16)
383#define OPC_PSLLD       (0xf2 | P_EXT | P_DATA16)
384#define OPC_PSLLQ       (0xf3 | P_EXT | P_DATA16)
385#define OPC_PSRAW       (0xe1 | P_EXT | P_DATA16)
386#define OPC_PSRAD       (0xe2 | P_EXT | P_DATA16)
387#define OPC_VPSRAQ      (0xe2 | P_EXT | P_DATA16 | P_VEXW | P_EVEX)
388#define OPC_PSRLW       (0xd1 | P_EXT | P_DATA16)
389#define OPC_PSRLD       (0xd2 | P_EXT | P_DATA16)
390#define OPC_PSRLQ       (0xd3 | P_EXT | P_DATA16)
391#define OPC_PSUBB       (0xf8 | P_EXT | P_DATA16)
392#define OPC_PSUBW       (0xf9 | P_EXT | P_DATA16)
393#define OPC_PSUBD       (0xfa | P_EXT | P_DATA16)
394#define OPC_PSUBQ       (0xfb | P_EXT | P_DATA16)
395#define OPC_PSUBSB      (0xe8 | P_EXT | P_DATA16)
396#define OPC_PSUBSW      (0xe9 | P_EXT | P_DATA16)
397#define OPC_PSUBUB      (0xd8 | P_EXT | P_DATA16)
398#define OPC_PSUBUW      (0xd9 | P_EXT | P_DATA16)
399#define OPC_PUNPCKLBW   (0x60 | P_EXT | P_DATA16)
400#define OPC_PUNPCKLWD   (0x61 | P_EXT | P_DATA16)
401#define OPC_PUNPCKLDQ   (0x62 | P_EXT | P_DATA16)
402#define OPC_PUNPCKLQDQ  (0x6c | P_EXT | P_DATA16)
403#define OPC_PUNPCKHBW   (0x68 | P_EXT | P_DATA16)
404#define OPC_PUNPCKHWD   (0x69 | P_EXT | P_DATA16)
405#define OPC_PUNPCKHDQ   (0x6a | P_EXT | P_DATA16)
406#define OPC_PUNPCKHQDQ  (0x6d | P_EXT | P_DATA16)
407#define OPC_PXOR        (0xef | P_EXT | P_DATA16)
408#define OPC_POP_r32	(0x58)
409#define OPC_POPCNT      (0xb8 | P_EXT | P_SIMDF3)
410#define OPC_PUSH_r32	(0x50)
411#define OPC_PUSH_Iv	(0x68)
412#define OPC_PUSH_Ib	(0x6a)
413#define OPC_RET		(0xc3)
414#define OPC_SETCC	(0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
415#define OPC_SHIFT_1	(0xd1)
416#define OPC_SHIFT_Ib	(0xc1)
417#define OPC_SHIFT_cl	(0xd3)
418#define OPC_SARX        (0xf7 | P_EXT38 | P_SIMDF3)
419#define OPC_SHUFPS      (0xc6 | P_EXT)
420#define OPC_SHLX        (0xf7 | P_EXT38 | P_DATA16)
421#define OPC_SHRX        (0xf7 | P_EXT38 | P_SIMDF2)
422#define OPC_SHRD_Ib     (0xac | P_EXT)
423#define OPC_TESTL	(0x85)
424#define OPC_TZCNT       (0xbc | P_EXT | P_SIMDF3)
425#define OPC_UD2         (0x0b | P_EXT)
426#define OPC_VPBLENDD    (0x02 | P_EXT3A | P_DATA16)
427#define OPC_VPBLENDVB   (0x4c | P_EXT3A | P_DATA16)
428#define OPC_VPINSRB     (0x20 | P_EXT3A | P_DATA16)
429#define OPC_VPINSRW     (0xc4 | P_EXT | P_DATA16)
430#define OPC_VBROADCASTSS (0x18 | P_EXT38 | P_DATA16)
431#define OPC_VBROADCASTSD (0x19 | P_EXT38 | P_DATA16)
432#define OPC_VPBROADCASTB (0x78 | P_EXT38 | P_DATA16)
433#define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16)
434#define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16)
435#define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16)
436#define OPC_VPERMQ      (0x00 | P_EXT3A | P_DATA16 | P_VEXW)
437#define OPC_VPERM2I128  (0x46 | P_EXT3A | P_DATA16 | P_VEXL)
438#define OPC_VPROLVD     (0x15 | P_EXT38 | P_DATA16 | P_EVEX)
439#define OPC_VPROLVQ     (0x15 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
440#define OPC_VPRORVD     (0x14 | P_EXT38 | P_DATA16 | P_EVEX)
441#define OPC_VPRORVQ     (0x14 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
442#define OPC_VPSHLDW     (0x70 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
443#define OPC_VPSHLDD     (0x71 | P_EXT3A | P_DATA16 | P_EVEX)
444#define OPC_VPSHLDQ     (0x71 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
445#define OPC_VPSHLDVW    (0x70 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
446#define OPC_VPSHLDVD    (0x71 | P_EXT38 | P_DATA16 | P_EVEX)
447#define OPC_VPSHLDVQ    (0x71 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
448#define OPC_VPSHRDVW    (0x72 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
449#define OPC_VPSHRDVD    (0x73 | P_EXT38 | P_DATA16 | P_EVEX)
450#define OPC_VPSHRDVQ    (0x73 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
451#define OPC_VPSLLVW     (0x12 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
452#define OPC_VPSLLVD     (0x47 | P_EXT38 | P_DATA16)
453#define OPC_VPSLLVQ     (0x47 | P_EXT38 | P_DATA16 | P_VEXW)
454#define OPC_VPSRAVW     (0x11 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
455#define OPC_VPSRAVD     (0x46 | P_EXT38 | P_DATA16)
456#define OPC_VPSRAVQ     (0x46 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
457#define OPC_VPSRLVW     (0x10 | P_EXT38 | P_DATA16 | P_VEXW | P_EVEX)
458#define OPC_VPSRLVD     (0x45 | P_EXT38 | P_DATA16)
459#define OPC_VPSRLVQ     (0x45 | P_EXT38 | P_DATA16 | P_VEXW)
460#define OPC_VPTERNLOGQ  (0x25 | P_EXT3A | P_DATA16 | P_VEXW | P_EVEX)
461#define OPC_VZEROUPPER  (0x77 | P_EXT)
462#define OPC_XCHG_ax_r32	(0x90)
463#define OPC_XCHG_EvGv   (0x87)
464
465#define OPC_GRP3_Eb     (0xf6)
466#define OPC_GRP3_Ev     (0xf7)
467#define OPC_GRP5        (0xff)
468#define OPC_GRP14       (0x73 | P_EXT | P_DATA16)
469
470/* Group 1 opcode extensions for 0x80-0x83.
471   These are also used as modifiers for OPC_ARITH.  */
472#define ARITH_ADD 0
473#define ARITH_OR  1
474#define ARITH_ADC 2
475#define ARITH_SBB 3
476#define ARITH_AND 4
477#define ARITH_SUB 5
478#define ARITH_XOR 6
479#define ARITH_CMP 7
480
481/* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3.  */
482#define SHIFT_ROL 0
483#define SHIFT_ROR 1
484#define SHIFT_SHL 4
485#define SHIFT_SHR 5
486#define SHIFT_SAR 7
487
488/* Group 3 opcode extensions for 0xf6, 0xf7.  To be used with OPC_GRP3.  */
489#define EXT3_TESTi 0
490#define EXT3_NOT   2
491#define EXT3_NEG   3
492#define EXT3_MUL   4
493#define EXT3_IMUL  5
494#define EXT3_DIV   6
495#define EXT3_IDIV  7
496
497/* Group 5 opcode extensions for 0xff.  To be used with OPC_GRP5.  */
498#define EXT5_INC_Ev	0
499#define EXT5_DEC_Ev	1
500#define EXT5_CALLN_Ev	2
501#define EXT5_JMPN_Ev	4
502
503/* Condition codes to be added to OPC_JCC_{long,short}.  */
504#define JCC_JMP (-1)
505#define JCC_JO  0x0
506#define JCC_JNO 0x1
507#define JCC_JB  0x2
508#define JCC_JAE 0x3
509#define JCC_JE  0x4
510#define JCC_JNE 0x5
511#define JCC_JBE 0x6
512#define JCC_JA  0x7
513#define JCC_JS  0x8
514#define JCC_JNS 0x9
515#define JCC_JP  0xa
516#define JCC_JNP 0xb
517#define JCC_JL  0xc
518#define JCC_JGE 0xd
519#define JCC_JLE 0xe
520#define JCC_JG  0xf
521
522static const uint8_t tcg_cond_to_jcc[] = {
523    [TCG_COND_EQ] = JCC_JE,
524    [TCG_COND_NE] = JCC_JNE,
525    [TCG_COND_LT] = JCC_JL,
526    [TCG_COND_GE] = JCC_JGE,
527    [TCG_COND_LE] = JCC_JLE,
528    [TCG_COND_GT] = JCC_JG,
529    [TCG_COND_LTU] = JCC_JB,
530    [TCG_COND_GEU] = JCC_JAE,
531    [TCG_COND_LEU] = JCC_JBE,
532    [TCG_COND_GTU] = JCC_JA,
533};
534
535#if TCG_TARGET_REG_BITS == 64
536static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
537{
538    int rex;
539
540    if (opc & P_GS) {
541        tcg_out8(s, 0x65);
542    }
543    if (opc & P_DATA16) {
544        /* We should never be asking for both 16 and 64-bit operation.  */
545        tcg_debug_assert((opc & P_REXW) == 0);
546        tcg_out8(s, 0x66);
547    }
548    if (opc & P_SIMDF3) {
549        tcg_out8(s, 0xf3);
550    } else if (opc & P_SIMDF2) {
551        tcg_out8(s, 0xf2);
552    }
553
554    rex = 0;
555    rex |= (opc & P_REXW) ? 0x8 : 0x0;  /* REX.W */
556    rex |= (r & 8) >> 1;                /* REX.R */
557    rex |= (x & 8) >> 2;                /* REX.X */
558    rex |= (rm & 8) >> 3;               /* REX.B */
559
560    /* P_REXB_{R,RM} indicates that the given register is the low byte.
561       For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
562       as otherwise the encoding indicates %[abcd]h.  Note that the values
563       that are ORed in merely indicate that the REX byte must be present;
564       those bits get discarded in output.  */
565    rex |= opc & (r >= 4 ? P_REXB_R : 0);
566    rex |= opc & (rm >= 4 ? P_REXB_RM : 0);
567
568    if (rex) {
569        tcg_out8(s, (uint8_t)(rex | 0x40));
570    }
571
572    if (opc & (P_EXT | P_EXT38 | P_EXT3A)) {
573        tcg_out8(s, 0x0f);
574        if (opc & P_EXT38) {
575            tcg_out8(s, 0x38);
576        } else if (opc & P_EXT3A) {
577            tcg_out8(s, 0x3a);
578        }
579    }
580
581    tcg_out8(s, opc);
582}
583#else
584static void tcg_out_opc(TCGContext *s, int opc)
585{
586    if (opc & P_DATA16) {
587        tcg_out8(s, 0x66);
588    }
589    if (opc & P_SIMDF3) {
590        tcg_out8(s, 0xf3);
591    } else if (opc & P_SIMDF2) {
592        tcg_out8(s, 0xf2);
593    }
594    if (opc & (P_EXT | P_EXT38 | P_EXT3A)) {
595        tcg_out8(s, 0x0f);
596        if (opc & P_EXT38) {
597            tcg_out8(s, 0x38);
598        } else if (opc & P_EXT3A) {
599            tcg_out8(s, 0x3a);
600        }
601    }
602    tcg_out8(s, opc);
603}
604/* Discard the register arguments to tcg_out_opc early, so as not to penalize
605   the 32-bit compilation paths.  This method works with all versions of gcc,
606   whereas relying on optimization may not be able to exclude them.  */
607#define tcg_out_opc(s, opc, r, rm, x)  (tcg_out_opc)(s, opc)
608#endif
609
610static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
611{
612    tcg_out_opc(s, opc, r, rm, 0);
613    tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
614}
615
616static void tcg_out_vex_opc(TCGContext *s, int opc, int r, int v,
617                            int rm, int index)
618{
619    int tmp;
620
621    /* Use the two byte form if possible, which cannot encode
622       VEX.W, VEX.B, VEX.X, or an m-mmmm field other than P_EXT.  */
623    if ((opc & (P_EXT | P_EXT38 | P_EXT3A | P_VEXW)) == P_EXT
624        && ((rm | index) & 8) == 0) {
625        /* Two byte VEX prefix.  */
626        tcg_out8(s, 0xc5);
627
628        tmp = (r & 8 ? 0 : 0x80);              /* VEX.R */
629    } else {
630        /* Three byte VEX prefix.  */
631        tcg_out8(s, 0xc4);
632
633        /* VEX.m-mmmm */
634        if (opc & P_EXT3A) {
635            tmp = 3;
636        } else if (opc & P_EXT38) {
637            tmp = 2;
638        } else if (opc & P_EXT) {
639            tmp = 1;
640        } else {
641            g_assert_not_reached();
642        }
643        tmp |= (r & 8 ? 0 : 0x80);             /* VEX.R */
644        tmp |= (index & 8 ? 0 : 0x40);         /* VEX.X */
645        tmp |= (rm & 8 ? 0 : 0x20);            /* VEX.B */
646        tcg_out8(s, tmp);
647
648        tmp = (opc & P_VEXW ? 0x80 : 0);       /* VEX.W */
649    }
650
651    tmp |= (opc & P_VEXL ? 0x04 : 0);      /* VEX.L */
652    /* VEX.pp */
653    if (opc & P_DATA16) {
654        tmp |= 1;                          /* 0x66 */
655    } else if (opc & P_SIMDF3) {
656        tmp |= 2;                          /* 0xf3 */
657    } else if (opc & P_SIMDF2) {
658        tmp |= 3;                          /* 0xf2 */
659    }
660    tmp |= (~v & 15) << 3;                 /* VEX.vvvv */
661    tcg_out8(s, tmp);
662    tcg_out8(s, opc);
663}
664
665static void tcg_out_evex_opc(TCGContext *s, int opc, int r, int v,
666                             int rm, int index)
667{
668    /* The entire 4-byte evex prefix; with R' and V' set. */
669    uint32_t p = 0x08041062;
670    int mm, pp;
671
672    tcg_debug_assert(have_avx512vl);
673
674    /* EVEX.mm */
675    if (opc & P_EXT3A) {
676        mm = 3;
677    } else if (opc & P_EXT38) {
678        mm = 2;
679    } else if (opc & P_EXT) {
680        mm = 1;
681    } else {
682        g_assert_not_reached();
683    }
684
685    /* EVEX.pp */
686    if (opc & P_DATA16) {
687        pp = 1;                          /* 0x66 */
688    } else if (opc & P_SIMDF3) {
689        pp = 2;                          /* 0xf3 */
690    } else if (opc & P_SIMDF2) {
691        pp = 3;                          /* 0xf2 */
692    } else {
693        pp = 0;
694    }
695
696    p = deposit32(p, 8, 2, mm);
697    p = deposit32(p, 13, 1, (rm & 8) == 0);             /* EVEX.RXB.B */
698    p = deposit32(p, 14, 1, (index & 8) == 0);          /* EVEX.RXB.X */
699    p = deposit32(p, 15, 1, (r & 8) == 0);              /* EVEX.RXB.R */
700    p = deposit32(p, 16, 2, pp);
701    p = deposit32(p, 19, 4, ~v);
702    p = deposit32(p, 23, 1, (opc & P_VEXW) != 0);
703    p = deposit32(p, 29, 2, (opc & P_VEXL) != 0);
704
705    tcg_out32(s, p);
706    tcg_out8(s, opc);
707}
708
709static void tcg_out_vex_modrm(TCGContext *s, int opc, int r, int v, int rm)
710{
711    if (opc & P_EVEX) {
712        tcg_out_evex_opc(s, opc, r, v, rm, 0);
713    } else {
714        tcg_out_vex_opc(s, opc, r, v, rm, 0);
715    }
716    tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
717}
718
719/* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
720   We handle either RM and INDEX missing with a negative value.  In 64-bit
721   mode for absolute addresses, ~RM is the size of the immediate operand
722   that will follow the instruction.  */
723
724static void tcg_out_sib_offset(TCGContext *s, int r, int rm, int index,
725                               int shift, intptr_t offset)
726{
727    int mod, len;
728
729    if (index < 0 && rm < 0) {
730        if (TCG_TARGET_REG_BITS == 64) {
731            /* Try for a rip-relative addressing mode.  This has replaced
732               the 32-bit-mode absolute addressing encoding.  */
733            intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm;
734            intptr_t disp = offset - pc;
735            if (disp == (int32_t)disp) {
736                tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
737                tcg_out32(s, disp);
738                return;
739            }
740
741            /* Try for an absolute address encoding.  This requires the
742               use of the MODRM+SIB encoding and is therefore larger than
743               rip-relative addressing.  */
744            if (offset == (int32_t)offset) {
745                tcg_out8(s, (LOWREGMASK(r) << 3) | 4);
746                tcg_out8(s, (4 << 3) | 5);
747                tcg_out32(s, offset);
748                return;
749            }
750
751            /* ??? The memory isn't directly addressable.  */
752            g_assert_not_reached();
753        } else {
754            /* Absolute address.  */
755            tcg_out8(s, (r << 3) | 5);
756            tcg_out32(s, offset);
757            return;
758        }
759    }
760
761    /* Find the length of the immediate addend.  Note that the encoding
762       that would be used for (%ebp) indicates absolute addressing.  */
763    if (rm < 0) {
764        mod = 0, len = 4, rm = 5;
765    } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) {
766        mod = 0, len = 0;
767    } else if (offset == (int8_t)offset) {
768        mod = 0x40, len = 1;
769    } else {
770        mod = 0x80, len = 4;
771    }
772
773    /* Use a single byte MODRM format if possible.  Note that the encoding
774       that would be used for %esp is the escape to the two byte form.  */
775    if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) {
776        /* Single byte MODRM format.  */
777        tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
778    } else {
779        /* Two byte MODRM+SIB format.  */
780
781        /* Note that the encoding that would place %esp into the index
782           field indicates no index register.  In 64-bit mode, the REX.X
783           bit counts, so %r12 can be used as the index.  */
784        if (index < 0) {
785            index = 4;
786        } else {
787            tcg_debug_assert(index != TCG_REG_ESP);
788        }
789
790        tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4);
791        tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm));
792    }
793
794    if (len == 1) {
795        tcg_out8(s, offset);
796    } else if (len == 4) {
797        tcg_out32(s, offset);
798    }
799}
800
801static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
802                                     int index, int shift, intptr_t offset)
803{
804    tcg_out_opc(s, opc, r, rm < 0 ? 0 : rm, index < 0 ? 0 : index);
805    tcg_out_sib_offset(s, r, rm, index, shift, offset);
806}
807
808static void tcg_out_vex_modrm_sib_offset(TCGContext *s, int opc, int r, int v,
809                                         int rm, int index, int shift,
810                                         intptr_t offset)
811{
812    tcg_out_vex_opc(s, opc, r, v, rm < 0 ? 0 : rm, index < 0 ? 0 : index);
813    tcg_out_sib_offset(s, r, rm, index, shift, offset);
814}
815
816/* A simplification of the above with no index or shift.  */
817static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
818                                        int rm, intptr_t offset)
819{
820    tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
821}
822
823static inline void tcg_out_vex_modrm_offset(TCGContext *s, int opc, int r,
824                                            int v, int rm, intptr_t offset)
825{
826    tcg_out_vex_modrm_sib_offset(s, opc, r, v, rm, -1, 0, offset);
827}
828
829/* Output an opcode with an expected reference to the constant pool.  */
830static inline void tcg_out_modrm_pool(TCGContext *s, int opc, int r)
831{
832    tcg_out_opc(s, opc, r, 0, 0);
833    /* Absolute for 32-bit, pc-relative for 64-bit.  */
834    tcg_out8(s, LOWREGMASK(r) << 3 | 5);
835    tcg_out32(s, 0);
836}
837
838/* Output an opcode with an expected reference to the constant pool.  */
839static inline void tcg_out_vex_modrm_pool(TCGContext *s, int opc, int r)
840{
841    tcg_out_vex_opc(s, opc, r, 0, 0, 0);
842    /* Absolute for 32-bit, pc-relative for 64-bit.  */
843    tcg_out8(s, LOWREGMASK(r) << 3 | 5);
844    tcg_out32(s, 0);
845}
846
847/* Generate dest op= src.  Uses the same ARITH_* codes as tgen_arithi.  */
848static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
849{
850    /* Propagate an opcode prefix, such as P_REXW.  */
851    int ext = subop & ~0x7;
852    subop &= 0x7;
853
854    tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
855}
856
857static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
858{
859    int rexw = 0;
860
861    if (arg == ret) {
862        return true;
863    }
864    switch (type) {
865    case TCG_TYPE_I64:
866        rexw = P_REXW;
867        /* fallthru */
868    case TCG_TYPE_I32:
869        if (ret < 16) {
870            if (arg < 16) {
871                tcg_out_modrm(s, OPC_MOVL_GvEv + rexw, ret, arg);
872            } else {
873                tcg_out_vex_modrm(s, OPC_MOVD_EyVy + rexw, arg, 0, ret);
874            }
875        } else {
876            if (arg < 16) {
877                tcg_out_vex_modrm(s, OPC_MOVD_VyEy + rexw, ret, 0, arg);
878            } else {
879                tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg);
880            }
881        }
882        break;
883
884    case TCG_TYPE_V64:
885        tcg_debug_assert(ret >= 16 && arg >= 16);
886        tcg_out_vex_modrm(s, OPC_MOVQ_VqWq, ret, 0, arg);
887        break;
888    case TCG_TYPE_V128:
889        tcg_debug_assert(ret >= 16 && arg >= 16);
890        tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx, ret, 0, arg);
891        break;
892    case TCG_TYPE_V256:
893        tcg_debug_assert(ret >= 16 && arg >= 16);
894        tcg_out_vex_modrm(s, OPC_MOVDQA_VxWx | P_VEXL, ret, 0, arg);
895        break;
896
897    default:
898        g_assert_not_reached();
899    }
900    return true;
901}
902
903static const int avx2_dup_insn[4] = {
904    OPC_VPBROADCASTB, OPC_VPBROADCASTW,
905    OPC_VPBROADCASTD, OPC_VPBROADCASTQ,
906};
907
908static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
909                            TCGReg r, TCGReg a)
910{
911    if (have_avx2) {
912        int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
913        tcg_out_vex_modrm(s, avx2_dup_insn[vece] + vex_l, r, 0, a);
914    } else {
915        switch (vece) {
916        case MO_8:
917            /* ??? With zero in a register, use PSHUFB.  */
918            tcg_out_vex_modrm(s, OPC_PUNPCKLBW, r, a, a);
919            a = r;
920            /* FALLTHRU */
921        case MO_16:
922            tcg_out_vex_modrm(s, OPC_PUNPCKLWD, r, a, a);
923            a = r;
924            /* FALLTHRU */
925        case MO_32:
926            tcg_out_vex_modrm(s, OPC_PSHUFD, r, 0, a);
927            /* imm8 operand: all output lanes selected from input lane 0.  */
928            tcg_out8(s, 0);
929            break;
930        case MO_64:
931            tcg_out_vex_modrm(s, OPC_PUNPCKLQDQ, r, a, a);
932            break;
933        default:
934            g_assert_not_reached();
935        }
936    }
937    return true;
938}
939
940static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
941                             TCGReg r, TCGReg base, intptr_t offset)
942{
943    if (have_avx2) {
944        int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
945        tcg_out_vex_modrm_offset(s, avx2_dup_insn[vece] + vex_l,
946                                 r, 0, base, offset);
947    } else {
948        switch (vece) {
949        case MO_64:
950            tcg_out_vex_modrm_offset(s, OPC_MOVDDUP, r, 0, base, offset);
951            break;
952        case MO_32:
953            tcg_out_vex_modrm_offset(s, OPC_VBROADCASTSS, r, 0, base, offset);
954            break;
955        case MO_16:
956            tcg_out_vex_modrm_offset(s, OPC_VPINSRW, r, r, base, offset);
957            tcg_out8(s, 0); /* imm8 */
958            tcg_out_dup_vec(s, type, vece, r, r);
959            break;
960        case MO_8:
961            tcg_out_vex_modrm_offset(s, OPC_VPINSRB, r, r, base, offset);
962            tcg_out8(s, 0); /* imm8 */
963            tcg_out_dup_vec(s, type, vece, r, r);
964            break;
965        default:
966            g_assert_not_reached();
967        }
968    }
969    return true;
970}
971
972static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
973                             TCGReg ret, int64_t arg)
974{
975    int vex_l = (type == TCG_TYPE_V256 ? P_VEXL : 0);
976
977    if (arg == 0) {
978        tcg_out_vex_modrm(s, OPC_PXOR, ret, ret, ret);
979        return;
980    }
981    if (arg == -1) {
982        tcg_out_vex_modrm(s, OPC_PCMPEQB + vex_l, ret, ret, ret);
983        return;
984    }
985
986    if (TCG_TARGET_REG_BITS == 32 && vece < MO_64) {
987        if (have_avx2) {
988            tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTD + vex_l, ret);
989        } else {
990            tcg_out_vex_modrm_pool(s, OPC_VBROADCASTSS, ret);
991        }
992        new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
993    } else {
994        if (type == TCG_TYPE_V64) {
995            tcg_out_vex_modrm_pool(s, OPC_MOVQ_VqWq, ret);
996        } else if (have_avx2) {
997            tcg_out_vex_modrm_pool(s, OPC_VPBROADCASTQ + vex_l, ret);
998        } else {
999            tcg_out_vex_modrm_pool(s, OPC_MOVDDUP, ret);
1000        }
1001        if (TCG_TARGET_REG_BITS == 64) {
1002            new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4);
1003        } else {
1004            new_pool_l2(s, R_386_32, s->code_ptr - 4, 0, arg, arg >> 32);
1005        }
1006    }
1007}
1008
1009static void tcg_out_movi_vec(TCGContext *s, TCGType type,
1010                             TCGReg ret, tcg_target_long arg)
1011{
1012    if (arg == 0) {
1013        tcg_out_vex_modrm(s, OPC_PXOR, ret, ret, ret);
1014        return;
1015    }
1016    if (arg == -1) {
1017        tcg_out_vex_modrm(s, OPC_PCMPEQB, ret, ret, ret);
1018        return;
1019    }
1020
1021    int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW);
1022    tcg_out_vex_modrm_pool(s, OPC_MOVD_VyEy + rexw, ret);
1023    if (TCG_TARGET_REG_BITS == 64) {
1024        new_pool_label(s, arg, R_386_PC32, s->code_ptr - 4, -4);
1025    } else {
1026        new_pool_label(s, arg, R_386_32, s->code_ptr - 4, 0);
1027    }
1028}
1029
1030static void tcg_out_movi_int(TCGContext *s, TCGType type,
1031                             TCGReg ret, tcg_target_long arg)
1032{
1033    tcg_target_long diff;
1034
1035    if (arg == 0) {
1036        tgen_arithr(s, ARITH_XOR, ret, ret);
1037        return;
1038    }
1039    if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
1040        tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
1041        tcg_out32(s, arg);
1042        return;
1043    }
1044    if (arg == (int32_t)arg) {
1045        tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
1046        tcg_out32(s, arg);
1047        return;
1048    }
1049
1050    /* Try a 7 byte pc-relative lea before the 10 byte movq.  */
1051    diff = tcg_pcrel_diff(s, (const void *)arg) - 7;
1052    if (diff == (int32_t)diff) {
1053        tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0);
1054        tcg_out8(s, (LOWREGMASK(ret) << 3) | 5);
1055        tcg_out32(s, diff);
1056        return;
1057    }
1058
1059    tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
1060    tcg_out64(s, arg);
1061}
1062
1063static void tcg_out_movi(TCGContext *s, TCGType type,
1064                         TCGReg ret, tcg_target_long arg)
1065{
1066    switch (type) {
1067    case TCG_TYPE_I32:
1068#if TCG_TARGET_REG_BITS == 64
1069    case TCG_TYPE_I64:
1070#endif
1071        if (ret < 16) {
1072            tcg_out_movi_int(s, type, ret, arg);
1073        } else {
1074            tcg_out_movi_vec(s, type, ret, arg);
1075        }
1076        break;
1077    default:
1078        g_assert_not_reached();
1079    }
1080}
1081
1082static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1083{
1084    int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
1085    tcg_out_modrm(s, OPC_XCHG_EvGv + rexw, r1, r2);
1086    return true;
1087}
1088
1089static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1090                             tcg_target_long imm)
1091{
1092    /* This function is only used for passing structs by reference. */
1093    tcg_debug_assert(imm == (int32_t)imm);
1094    tcg_out_modrm_offset(s, OPC_LEA, rd, rs, imm);
1095}
1096
1097static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
1098{
1099    if (val == (int8_t)val) {
1100        tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0);
1101        tcg_out8(s, val);
1102    } else if (val == (int32_t)val) {
1103        tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
1104        tcg_out32(s, val);
1105    } else {
1106        g_assert_not_reached();
1107    }
1108}
1109
1110static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
1111{
1112    /* Given the strength of x86 memory ordering, we only need care for
1113       store-load ordering.  Experimentally, "lock orl $0,0(%esp)" is
1114       faster than "mfence", so don't bother with the sse insn.  */
1115    if (a0 & TCG_MO_ST_LD) {
1116        tcg_out8(s, 0xf0);
1117        tcg_out_modrm_offset(s, OPC_ARITH_EvIb, ARITH_OR, TCG_REG_ESP, 0);
1118        tcg_out8(s, 0);
1119    }
1120}
1121
1122static inline void tcg_out_push(TCGContext *s, int reg)
1123{
1124    tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0);
1125}
1126
1127static inline void tcg_out_pop(TCGContext *s, int reg)
1128{
1129    tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0);
1130}
1131
1132static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1133                       TCGReg arg1, intptr_t arg2)
1134{
1135    switch (type) {
1136    case TCG_TYPE_I32:
1137        if (ret < 16) {
1138            tcg_out_modrm_offset(s, OPC_MOVL_GvEv, ret, arg1, arg2);
1139        } else {
1140            tcg_out_vex_modrm_offset(s, OPC_MOVD_VyEy, ret, 0, arg1, arg2);
1141        }
1142        break;
1143    case TCG_TYPE_I64:
1144        if (ret < 16) {
1145            tcg_out_modrm_offset(s, OPC_MOVL_GvEv | P_REXW, ret, arg1, arg2);
1146            break;
1147        }
1148        /* FALLTHRU */
1149    case TCG_TYPE_V64:
1150        /* There is no instruction that can validate 8-byte alignment.  */
1151        tcg_debug_assert(ret >= 16);
1152        tcg_out_vex_modrm_offset(s, OPC_MOVQ_VqWq, ret, 0, arg1, arg2);
1153        break;
1154    case TCG_TYPE_V128:
1155        /*
1156         * The gvec infrastructure is asserts that v128 vector loads
1157         * and stores use a 16-byte aligned offset.  Validate that the
1158         * final pointer is aligned by using an insn that will SIGSEGV.
1159         */
1160        tcg_debug_assert(ret >= 16);
1161        tcg_out_vex_modrm_offset(s, OPC_MOVDQA_VxWx, ret, 0, arg1, arg2);
1162        break;
1163    case TCG_TYPE_V256:
1164        /*
1165         * The gvec infrastructure only requires 16-byte alignment,
1166         * so here we must use an unaligned load.
1167         */
1168        tcg_debug_assert(ret >= 16);
1169        tcg_out_vex_modrm_offset(s, OPC_MOVDQU_VxWx | P_VEXL,
1170                                 ret, 0, arg1, arg2);
1171        break;
1172    default:
1173        g_assert_not_reached();
1174    }
1175}
1176
1177static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1178                       TCGReg arg1, intptr_t arg2)
1179{
1180    switch (type) {
1181    case TCG_TYPE_I32:
1182        if (arg < 16) {
1183            tcg_out_modrm_offset(s, OPC_MOVL_EvGv, arg, arg1, arg2);
1184        } else {
1185            tcg_out_vex_modrm_offset(s, OPC_MOVD_EyVy, arg, 0, arg1, arg2);
1186        }
1187        break;
1188    case TCG_TYPE_I64:
1189        if (arg < 16) {
1190            tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_REXW, arg, arg1, arg2);
1191            break;
1192        }
1193        /* FALLTHRU */
1194    case TCG_TYPE_V64:
1195        /* There is no instruction that can validate 8-byte alignment.  */
1196        tcg_debug_assert(arg >= 16);
1197        tcg_out_vex_modrm_offset(s, OPC_MOVQ_WqVq, arg, 0, arg1, arg2);
1198        break;
1199    case TCG_TYPE_V128:
1200        /*
1201         * The gvec infrastructure is asserts that v128 vector loads
1202         * and stores use a 16-byte aligned offset.  Validate that the
1203         * final pointer is aligned by using an insn that will SIGSEGV.
1204         *
1205         * This specific instance is also used by TCG_CALL_RET_BY_VEC,
1206         * for _WIN64, which must have SSE2 but may not have AVX.
1207         */
1208        tcg_debug_assert(arg >= 16);
1209        if (have_avx1) {
1210            tcg_out_vex_modrm_offset(s, OPC_MOVDQA_WxVx, arg, 0, arg1, arg2);
1211        } else {
1212            tcg_out_modrm_offset(s, OPC_MOVDQA_WxVx, arg, arg1, arg2);
1213        }
1214        break;
1215    case TCG_TYPE_V256:
1216        /*
1217         * The gvec infrastructure only requires 16-byte alignment,
1218         * so here we must use an unaligned store.
1219         */
1220        tcg_debug_assert(arg >= 16);
1221        tcg_out_vex_modrm_offset(s, OPC_MOVDQU_WxVx | P_VEXL,
1222                                 arg, 0, arg1, arg2);
1223        break;
1224    default:
1225        g_assert_not_reached();
1226    }
1227}
1228
1229static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1230                        TCGReg base, intptr_t ofs)
1231{
1232    int rexw = 0;
1233    if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) {
1234        if (val != (int32_t)val) {
1235            return false;
1236        }
1237        rexw = P_REXW;
1238    } else if (type != TCG_TYPE_I32) {
1239        return false;
1240    }
1241    tcg_out_modrm_offset(s, OPC_MOVL_EvIz | rexw, 0, base, ofs);
1242    tcg_out32(s, val);
1243    return true;
1244}
1245
1246static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
1247{
1248    /* Propagate an opcode prefix, such as P_DATA16.  */
1249    int ext = subopc & ~0x7;
1250    subopc &= 0x7;
1251
1252    if (count == 1) {
1253        tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg);
1254    } else {
1255        tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg);
1256        tcg_out8(s, count);
1257    }
1258}
1259
1260static inline void tcg_out_bswap32(TCGContext *s, int reg)
1261{
1262    tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0);
1263}
1264
1265static inline void tcg_out_rolw_8(TCGContext *s, int reg)
1266{
1267    tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
1268}
1269
1270static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
1271{
1272    /* movzbl */
1273    tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
1274    tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
1275}
1276
1277static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1278{
1279    int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
1280    /* movsbl */
1281    tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
1282    tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
1283}
1284
1285static void tcg_out_ext16u(TCGContext *s, TCGReg dest, TCGReg src)
1286{
1287    /* movzwl */
1288    tcg_out_modrm(s, OPC_MOVZWL, dest, src);
1289}
1290
1291static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
1292{
1293    int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
1294    /* movsw[lq] */
1295    tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
1296}
1297
1298static void tcg_out_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
1299{
1300    /* 32-bit mov zero extends.  */
1301    tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
1302}
1303
1304static void tcg_out_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
1305{
1306    tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1307    tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
1308}
1309
1310static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1311{
1312    tcg_out_ext32s(s, dest, src);
1313}
1314
1315static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dest, TCGReg src)
1316{
1317    tcg_out_ext32u(s, dest, src);
1318}
1319
1320static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg dest, TCGReg src)
1321{
1322    tcg_out_ext32u(s, dest, src);
1323}
1324
1325static inline void tcg_out_bswap64(TCGContext *s, int reg)
1326{
1327    tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
1328}
1329
1330static void tgen_arithi(TCGContext *s, int c, int r0,
1331                        tcg_target_long val, int cf)
1332{
1333    int rexw = 0;
1334
1335    if (TCG_TARGET_REG_BITS == 64) {
1336        rexw = c & -8;
1337        c &= 7;
1338    }
1339
1340    /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
1341       partial flags update stalls on Pentium4 and are not recommended
1342       by current Intel optimization manuals.  */
1343    if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
1344        int is_inc = (c == ARITH_ADD) ^ (val < 0);
1345        if (TCG_TARGET_REG_BITS == 64) {
1346            /* The single-byte increment encodings are re-tasked as the
1347               REX prefixes.  Use the MODRM encoding.  */
1348            tcg_out_modrm(s, OPC_GRP5 + rexw,
1349                          (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
1350        } else {
1351            tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
1352        }
1353        return;
1354    }
1355
1356    if (c == ARITH_AND) {
1357        if (TCG_TARGET_REG_BITS == 64) {
1358            if (val == 0xffffffffu) {
1359                tcg_out_ext32u(s, r0, r0);
1360                return;
1361            }
1362            if (val == (uint32_t)val) {
1363                /* AND with no high bits set can use a 32-bit operation.  */
1364                rexw = 0;
1365            }
1366        }
1367        if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) {
1368            tcg_out_ext8u(s, r0, r0);
1369            return;
1370        }
1371        if (val == 0xffffu) {
1372            tcg_out_ext16u(s, r0, r0);
1373            return;
1374        }
1375    }
1376
1377    if (val == (int8_t)val) {
1378        tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0);
1379        tcg_out8(s, val);
1380        return;
1381    }
1382    if (rexw == 0 || val == (int32_t)val) {
1383        tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0);
1384        tcg_out32(s, val);
1385        return;
1386    }
1387
1388    g_assert_not_reached();
1389}
1390
1391static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1392{
1393    if (val != 0) {
1394        tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0);
1395    }
1396}
1397
1398/* Set SMALL to force a short forward branch.  */
1399static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, bool small)
1400{
1401    int32_t val, val1;
1402
1403    if (l->has_value) {
1404        val = tcg_pcrel_diff(s, l->u.value_ptr);
1405        val1 = val - 2;
1406        if ((int8_t)val1 == val1) {
1407            if (opc == -1) {
1408                tcg_out8(s, OPC_JMP_short);
1409            } else {
1410                tcg_out8(s, OPC_JCC_short + opc);
1411            }
1412            tcg_out8(s, val1);
1413        } else {
1414            tcg_debug_assert(!small);
1415            if (opc == -1) {
1416                tcg_out8(s, OPC_JMP_long);
1417                tcg_out32(s, val - 5);
1418            } else {
1419                tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
1420                tcg_out32(s, val - 6);
1421            }
1422        }
1423    } else if (small) {
1424        if (opc == -1) {
1425            tcg_out8(s, OPC_JMP_short);
1426        } else {
1427            tcg_out8(s, OPC_JCC_short + opc);
1428        }
1429        tcg_out_reloc(s, s->code_ptr, R_386_PC8, l, -1);
1430        s->code_ptr += 1;
1431    } else {
1432        if (opc == -1) {
1433            tcg_out8(s, OPC_JMP_long);
1434        } else {
1435            tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
1436        }
1437        tcg_out_reloc(s, s->code_ptr, R_386_PC32, l, -4);
1438        s->code_ptr += 4;
1439    }
1440}
1441
1442static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
1443                        int const_arg2, int rexw)
1444{
1445    if (const_arg2) {
1446        if (arg2 == 0) {
1447            /* test r, r */
1448            tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
1449        } else {
1450            tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
1451        }
1452    } else {
1453        tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
1454    }
1455}
1456
1457static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
1458                             TCGArg arg1, TCGArg arg2, int const_arg2,
1459                             TCGLabel *label, int small)
1460{
1461    tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
1462    tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
1463}
1464
1465#if TCG_TARGET_REG_BITS == 64
1466static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
1467                             TCGArg arg1, TCGArg arg2, int const_arg2,
1468                             TCGLabel *label, int small)
1469{
1470    tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
1471    tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
1472}
1473#else
1474/* XXX: we implement it at the target level to avoid having to
1475   handle cross basic blocks temporaries */
1476static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
1477                            const int *const_args, int small)
1478{
1479    TCGLabel *label_next = gen_new_label();
1480    TCGLabel *label_this = arg_label(args[5]);
1481
1482    switch(args[4]) {
1483    case TCG_COND_EQ:
1484        tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
1485                         label_next, 1);
1486        tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
1487                         label_this, small);
1488        break;
1489    case TCG_COND_NE:
1490        tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
1491                         label_this, small);
1492        tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
1493                         label_this, small);
1494        break;
1495    case TCG_COND_LT:
1496        tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
1497                         label_this, small);
1498        tcg_out_jxx(s, JCC_JNE, label_next, 1);
1499        tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
1500                         label_this, small);
1501        break;
1502    case TCG_COND_LE:
1503        tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
1504                         label_this, small);
1505        tcg_out_jxx(s, JCC_JNE, label_next, 1);
1506        tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
1507                         label_this, small);
1508        break;
1509    case TCG_COND_GT:
1510        tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
1511                         label_this, small);
1512        tcg_out_jxx(s, JCC_JNE, label_next, 1);
1513        tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
1514                         label_this, small);
1515        break;
1516    case TCG_COND_GE:
1517        tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
1518                         label_this, small);
1519        tcg_out_jxx(s, JCC_JNE, label_next, 1);
1520        tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
1521                         label_this, small);
1522        break;
1523    case TCG_COND_LTU:
1524        tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
1525                         label_this, small);
1526        tcg_out_jxx(s, JCC_JNE, label_next, 1);
1527        tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
1528                         label_this, small);
1529        break;
1530    case TCG_COND_LEU:
1531        tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
1532                         label_this, small);
1533        tcg_out_jxx(s, JCC_JNE, label_next, 1);
1534        tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
1535                         label_this, small);
1536        break;
1537    case TCG_COND_GTU:
1538        tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
1539                         label_this, small);
1540        tcg_out_jxx(s, JCC_JNE, label_next, 1);
1541        tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
1542                         label_this, small);
1543        break;
1544    case TCG_COND_GEU:
1545        tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
1546                         label_this, small);
1547        tcg_out_jxx(s, JCC_JNE, label_next, 1);
1548        tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
1549                         label_this, small);
1550        break;
1551    default:
1552        g_assert_not_reached();
1553    }
1554    tcg_out_label(s, label_next);
1555}
1556#endif
1557
1558static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
1559                              TCGArg arg1, TCGArg arg2, int const_arg2)
1560{
1561    tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
1562    tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1563    tcg_out_ext8u(s, dest, dest);
1564}
1565
1566#if TCG_TARGET_REG_BITS == 64
1567static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
1568                              TCGArg arg1, TCGArg arg2, int const_arg2)
1569{
1570    tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
1571    tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
1572    tcg_out_ext8u(s, dest, dest);
1573}
1574#else
1575static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1576                             const int *const_args)
1577{
1578    TCGArg new_args[6];
1579    TCGLabel *label_true, *label_over;
1580
1581    memcpy(new_args, args+1, 5*sizeof(TCGArg));
1582
1583    if (args[0] == args[1] || args[0] == args[2]
1584        || (!const_args[3] && args[0] == args[3])
1585        || (!const_args[4] && args[0] == args[4])) {
1586        /* When the destination overlaps with one of the argument
1587           registers, don't do anything tricky.  */
1588        label_true = gen_new_label();
1589        label_over = gen_new_label();
1590
1591        new_args[5] = label_arg(label_true);
1592        tcg_out_brcond2(s, new_args, const_args+1, 1);
1593
1594        tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1595        tcg_out_jxx(s, JCC_JMP, label_over, 1);
1596        tcg_out_label(s, label_true);
1597
1598        tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
1599        tcg_out_label(s, label_over);
1600    } else {
1601        /* When the destination does not overlap one of the arguments,
1602           clear the destination first, jump if cond false, and emit an
1603           increment in the true case.  This results in smaller code.  */
1604
1605        tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
1606
1607        label_over = gen_new_label();
1608        new_args[4] = tcg_invert_cond(new_args[4]);
1609        new_args[5] = label_arg(label_over);
1610        tcg_out_brcond2(s, new_args, const_args+1, 1);
1611
1612        tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
1613        tcg_out_label(s, label_over);
1614    }
1615}
1616#endif
1617
1618static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw,
1619                         TCGReg dest, TCGReg v1)
1620{
1621    if (have_cmov) {
1622        tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | rexw, dest, v1);
1623    } else {
1624        TCGLabel *over = gen_new_label();
1625        tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
1626        tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
1627        tcg_out_label(s, over);
1628    }
1629}
1630
1631static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGReg dest,
1632                              TCGReg c1, TCGArg c2, int const_c2,
1633                              TCGReg v1)
1634{
1635    tcg_out_cmp(s, c1, c2, const_c2, 0);
1636    tcg_out_cmov(s, cond, 0, dest, v1);
1637}
1638
1639#if TCG_TARGET_REG_BITS == 64
1640static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGReg dest,
1641                              TCGReg c1, TCGArg c2, int const_c2,
1642                              TCGReg v1)
1643{
1644    tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
1645    tcg_out_cmov(s, cond, P_REXW, dest, v1);
1646}
1647#endif
1648
1649static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
1650                        TCGArg arg2, bool const_a2)
1651{
1652    if (have_bmi1) {
1653        tcg_out_modrm(s, OPC_TZCNT + rexw, dest, arg1);
1654        if (const_a2) {
1655            tcg_debug_assert(arg2 == (rexw ? 64 : 32));
1656        } else {
1657            tcg_debug_assert(dest != arg2);
1658            tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
1659        }
1660    } else {
1661        tcg_debug_assert(dest != arg2);
1662        tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1);
1663        tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
1664    }
1665}
1666
1667static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
1668                        TCGArg arg2, bool const_a2)
1669{
1670    if (have_lzcnt) {
1671        tcg_out_modrm(s, OPC_LZCNT + rexw, dest, arg1);
1672        if (const_a2) {
1673            tcg_debug_assert(arg2 == (rexw ? 64 : 32));
1674        } else {
1675            tcg_debug_assert(dest != arg2);
1676            tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
1677        }
1678    } else {
1679        tcg_debug_assert(!const_a2);
1680        tcg_debug_assert(dest != arg1);
1681        tcg_debug_assert(dest != arg2);
1682
1683        /* Recall that the output of BSR is the index not the count.  */
1684        tcg_out_modrm(s, OPC_BSR + rexw, dest, arg1);
1685        tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0);
1686
1687        /* Since we have destroyed the flags from BSR, we have to re-test.  */
1688        tcg_out_cmp(s, arg1, 0, 1, rexw);
1689        tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
1690    }
1691}
1692
1693static void tcg_out_branch(TCGContext *s, int call, const tcg_insn_unit *dest)
1694{
1695    intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
1696
1697    if (disp == (int32_t)disp) {
1698        tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
1699        tcg_out32(s, disp);
1700    } else {
1701        /* rip-relative addressing into the constant pool.
1702           This is 6 + 8 = 14 bytes, as compared to using an
1703           immediate load 10 + 6 = 16 bytes, plus we may
1704           be able to re-use the pool constant for more calls.  */
1705        tcg_out_opc(s, OPC_GRP5, 0, 0, 0);
1706        tcg_out8(s, (call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev) << 3 | 5);
1707        new_pool_label(s, (uintptr_t)dest, R_386_PC32, s->code_ptr, -4);
1708        tcg_out32(s, 0);
1709    }
1710}
1711
1712static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
1713                         const TCGHelperInfo *info)
1714{
1715    tcg_out_branch(s, 1, dest);
1716
1717#ifndef _WIN32
1718    if (TCG_TARGET_REG_BITS == 32 && info->out_kind == TCG_CALL_RET_BY_REF) {
1719        /*
1720         * The sysv i386 abi for struct return places a reference as the
1721         * first argument of the stack, and pops that argument with the
1722         * return statement.  Since we want to retain the aligned stack
1723         * pointer for the callee, we do not want to actually push that
1724         * argument before the call but rely on the normal store to the
1725         * stack slot.  But we do need to compensate for the pop in order
1726         * to reset our correct stack pointer value.
1727         * Pushing a garbage value back onto the stack is quickest.
1728         */
1729        tcg_out_push(s, TCG_REG_EAX);
1730    }
1731#endif
1732}
1733
1734static void tcg_out_jmp(TCGContext *s, const tcg_insn_unit *dest)
1735{
1736    tcg_out_branch(s, 0, dest);
1737}
1738
1739static void tcg_out_nopn(TCGContext *s, int n)
1740{
1741    int i;
1742    /* Emit 1 or 2 operand size prefixes for the standard one byte nop,
1743     * "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the
1744     * duplicate prefix, and all of the interesting recent cores can
1745     * decode and discard the duplicates in a single cycle.
1746     */
1747    tcg_debug_assert(n >= 1);
1748    for (i = 1; i < n; ++i) {
1749        tcg_out8(s, 0x66);
1750    }
1751    tcg_out8(s, 0x90);
1752}
1753
1754#if defined(CONFIG_SOFTMMU)
1755/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1756 *                                     int mmu_idx, uintptr_t ra)
1757 */
1758static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
1759    [MO_UB]   = helper_ret_ldub_mmu,
1760    [MO_LEUW] = helper_le_lduw_mmu,
1761    [MO_LEUL] = helper_le_ldul_mmu,
1762    [MO_LEUQ] = helper_le_ldq_mmu,
1763    [MO_BEUW] = helper_be_lduw_mmu,
1764    [MO_BEUL] = helper_be_ldul_mmu,
1765    [MO_BEUQ] = helper_be_ldq_mmu,
1766};
1767
1768/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1769 *                                     uintxx_t val, int mmu_idx, uintptr_t ra)
1770 */
1771static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
1772    [MO_UB]   = helper_ret_stb_mmu,
1773    [MO_LEUW] = helper_le_stw_mmu,
1774    [MO_LEUL] = helper_le_stl_mmu,
1775    [MO_LEUQ] = helper_le_stq_mmu,
1776    [MO_BEUW] = helper_be_stw_mmu,
1777    [MO_BEUL] = helper_be_stl_mmu,
1778    [MO_BEUQ] = helper_be_stq_mmu,
1779};
1780
1781/* Perform the TLB load and compare.
1782
1783   Inputs:
1784   ADDRLO and ADDRHI contain the low and high part of the address.
1785
1786   MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1787
1788   WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1789   This should be offsetof addr_read or addr_write.
1790
1791   Outputs:
1792   LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1793   positions of the displacements of forward jumps to the TLB miss case.
1794
1795   Second argument register is loaded with the low part of the address.
1796   In the TLB hit case, it has been adjusted as indicated by the TLB
1797   and so is a host address.  In the TLB miss case, it continues to
1798   hold a guest address.
1799
1800   First argument register is clobbered.  */
1801
1802static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1803                                    int mem_index, MemOp opc,
1804                                    tcg_insn_unit **label_ptr, int which)
1805{
1806    const TCGReg r0 = TCG_REG_L0;
1807    const TCGReg r1 = TCG_REG_L1;
1808    TCGType ttype = TCG_TYPE_I32;
1809    TCGType tlbtype = TCG_TYPE_I32;
1810    int trexw = 0, hrexw = 0, tlbrexw = 0;
1811    unsigned a_bits = get_alignment_bits(opc);
1812    unsigned s_bits = opc & MO_SIZE;
1813    unsigned a_mask = (1 << a_bits) - 1;
1814    unsigned s_mask = (1 << s_bits) - 1;
1815    target_ulong tlb_mask;
1816
1817    if (TCG_TARGET_REG_BITS == 64) {
1818        if (TARGET_LONG_BITS == 64) {
1819            ttype = TCG_TYPE_I64;
1820            trexw = P_REXW;
1821        }
1822        if (TCG_TYPE_PTR == TCG_TYPE_I64) {
1823            hrexw = P_REXW;
1824            if (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32) {
1825                tlbtype = TCG_TYPE_I64;
1826                tlbrexw = P_REXW;
1827            }
1828        }
1829    }
1830
1831    tcg_out_mov(s, tlbtype, r0, addrlo);
1832    tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
1833                   TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1834
1835    tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, r0, TCG_AREG0,
1836                         TLB_MASK_TABLE_OFS(mem_index) +
1837                         offsetof(CPUTLBDescFast, mask));
1838
1839    tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r0, TCG_AREG0,
1840                         TLB_MASK_TABLE_OFS(mem_index) +
1841                         offsetof(CPUTLBDescFast, table));
1842
1843    /* If the required alignment is at least as large as the access, simply
1844       copy the address and mask.  For lesser alignments, check that we don't
1845       cross pages for the complete access.  */
1846    if (a_bits >= s_bits) {
1847        tcg_out_mov(s, ttype, r1, addrlo);
1848    } else {
1849        tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask);
1850    }
1851    tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
1852    tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0);
1853
1854    /* cmp 0(r0), r1 */
1855    tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, which);
1856
1857    /* Prepare for both the fast path add of the tlb addend, and the slow
1858       path function argument setup.  */
1859    tcg_out_mov(s, ttype, r1, addrlo);
1860
1861    /* jne slow_path */
1862    tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1863    label_ptr[0] = s->code_ptr;
1864    s->code_ptr += 4;
1865
1866    if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1867        /* cmp 4(r0), addrhi */
1868        tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, which + 4);
1869
1870        /* jne slow_path */
1871        tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1872        label_ptr[1] = s->code_ptr;
1873        s->code_ptr += 4;
1874    }
1875
1876    /* TLB Hit.  */
1877
1878    /* add addend(r0), r1 */
1879    tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
1880                         offsetof(CPUTLBEntry, addend));
1881}
1882
1883/*
1884 * Record the context of a call to the out of line helper code for the slow path
1885 * for a load or store, so that we can later generate the correct helper code
1886 */
1887static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
1888                                MemOpIdx oi,
1889                                TCGReg datalo, TCGReg datahi,
1890                                TCGReg addrlo, TCGReg addrhi,
1891                                tcg_insn_unit *raddr,
1892                                tcg_insn_unit **label_ptr)
1893{
1894    TCGLabelQemuLdst *label = new_ldst_label(s);
1895
1896    label->is_ld = is_ld;
1897    label->oi = oi;
1898    label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
1899    label->datalo_reg = datalo;
1900    label->datahi_reg = datahi;
1901    label->addrlo_reg = addrlo;
1902    label->addrhi_reg = addrhi;
1903    label->raddr = tcg_splitwx_to_rx(raddr);
1904    label->label_ptr[0] = label_ptr[0];
1905    if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1906        label->label_ptr[1] = label_ptr[1];
1907    }
1908}
1909
1910/*
1911 * Generate code for the slow path for a load at the end of block
1912 */
1913static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1914{
1915    MemOpIdx oi = l->oi;
1916    MemOp opc = get_memop(oi);
1917    TCGReg data_reg;
1918    tcg_insn_unit **label_ptr = &l->label_ptr[0];
1919
1920    /* resolve label address */
1921    tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1922    if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1923        tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1924    }
1925
1926    if (TCG_TARGET_REG_BITS == 32) {
1927        int ofs = 0;
1928
1929        tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1930        ofs += 4;
1931
1932        tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1933        ofs += 4;
1934
1935        if (TARGET_LONG_BITS == 64) {
1936            tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1937            ofs += 4;
1938        }
1939
1940        tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
1941        ofs += 4;
1942
1943        tcg_out_sti(s, TCG_TYPE_PTR, (uintptr_t)l->raddr, TCG_REG_ESP, ofs);
1944    } else {
1945        tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1946        /* The second argument is already loaded with addrlo.  */
1947        tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi);
1948        tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
1949                     (uintptr_t)l->raddr);
1950    }
1951
1952    tcg_out_branch(s, 1, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1953
1954    data_reg = l->datalo_reg;
1955    if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
1956        if (data_reg == TCG_REG_EDX) {
1957            /* xchg %edx, %eax */
1958            tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
1959            tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
1960        } else {
1961            tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1962            tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
1963        }
1964    } else {
1965        tcg_out_movext(s, l->type, data_reg,
1966                       TCG_TYPE_REG, opc & MO_SSIZE, TCG_REG_EAX);
1967    }
1968
1969    /* Jump to the code corresponding to next IR of qemu_st */
1970    tcg_out_jmp(s, l->raddr);
1971    return true;
1972}
1973
1974/*
1975 * Generate code for the slow path for a store at the end of block
1976 */
1977static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1978{
1979    MemOpIdx oi = l->oi;
1980    MemOp opc = get_memop(oi);
1981    MemOp s_bits = opc & MO_SIZE;
1982    tcg_insn_unit **label_ptr = &l->label_ptr[0];
1983    TCGReg retaddr;
1984
1985    /* resolve label address */
1986    tcg_patch32(label_ptr[0], s->code_ptr - label_ptr[0] - 4);
1987    if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1988        tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4);
1989    }
1990
1991    if (TCG_TARGET_REG_BITS == 32) {
1992        int ofs = 0;
1993
1994        tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1995        ofs += 4;
1996
1997        tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1998        ofs += 4;
1999
2000        if (TARGET_LONG_BITS == 64) {
2001            tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
2002            ofs += 4;
2003        }
2004
2005        tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
2006        ofs += 4;
2007
2008        if (s_bits == MO_64) {
2009            tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
2010            ofs += 4;
2011        }
2012
2013        tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs);
2014        ofs += 4;
2015
2016        retaddr = TCG_REG_EAX;
2017        tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
2018        tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, ofs);
2019    } else {
2020        tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
2021        /* The second argument is already loaded with addrlo.  */
2022        tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
2023                    tcg_target_call_iarg_regs[2], l->datalo_reg);
2024        tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], oi);
2025
2026        if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
2027            retaddr = tcg_target_call_iarg_regs[4];
2028            tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
2029        } else {
2030            retaddr = TCG_REG_RAX;
2031            tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
2032            tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP,
2033                       TCG_TARGET_CALL_STACK_OFFSET);
2034        }
2035    }
2036
2037    /* "Tail call" to the helper, with the return address back inline.  */
2038    tcg_out_push(s, retaddr);
2039    tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2040    return true;
2041}
2042#else
2043
2044static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
2045                                   TCGReg addrhi, unsigned a_bits)
2046{
2047    unsigned a_mask = (1 << a_bits) - 1;
2048    TCGLabelQemuLdst *label;
2049
2050    /*
2051     * We are expecting a_bits to max out at 7, so we can usually use testb.
2052     * For i686, we have to use testl for %esi/%edi.
2053     */
2054    if (a_mask <= 0xff && (TCG_TARGET_REG_BITS == 64 || addrlo < 4)) {
2055        tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, addrlo);
2056        tcg_out8(s, a_mask);
2057    } else {
2058        tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, addrlo);
2059        tcg_out32(s, a_mask);
2060    }
2061
2062    /* jne slow_path */
2063    tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
2064
2065    label = new_ldst_label(s);
2066    label->is_ld = is_ld;
2067    label->addrlo_reg = addrlo;
2068    label->addrhi_reg = addrhi;
2069    label->raddr = tcg_splitwx_to_rx(s->code_ptr + 4);
2070    label->label_ptr[0] = s->code_ptr;
2071
2072    s->code_ptr += 4;
2073}
2074
2075static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
2076{
2077    /* resolve label address */
2078    tcg_patch32(l->label_ptr[0], s->code_ptr - l->label_ptr[0] - 4);
2079
2080    if (TCG_TARGET_REG_BITS == 32) {
2081        int ofs = 0;
2082
2083        tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
2084        ofs += 4;
2085
2086        tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
2087        ofs += 4;
2088        if (TARGET_LONG_BITS == 64) {
2089            tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
2090            ofs += 4;
2091        }
2092
2093        tcg_out_pushi(s, (uintptr_t)l->raddr);
2094    } else {
2095        tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
2096                    l->addrlo_reg);
2097        tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
2098
2099        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, (uintptr_t)l->raddr);
2100        tcg_out_push(s, TCG_REG_RAX);
2101    }
2102
2103    /* "Tail call" to the helper, with the return address back inline. */
2104    tcg_out_jmp(s, (const void *)(l->is_ld ? helper_unaligned_ld
2105                                  : helper_unaligned_st));
2106    return true;
2107}
2108
2109static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
2110{
2111    return tcg_out_fail_alignment(s, l);
2112}
2113
2114static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
2115{
2116    return tcg_out_fail_alignment(s, l);
2117}
2118
2119#if TCG_TARGET_REG_BITS == 32
2120# define x86_guest_base_seg     0
2121# define x86_guest_base_index   -1
2122# define x86_guest_base_offset  guest_base
2123#else
2124static int x86_guest_base_seg;
2125static int x86_guest_base_index = -1;
2126static int32_t x86_guest_base_offset;
2127# if defined(__x86_64__) && defined(__linux__)
2128#  include <asm/prctl.h>
2129#  include <sys/prctl.h>
2130int arch_prctl(int code, unsigned long addr);
2131static inline int setup_guest_base_seg(void)
2132{
2133    if (arch_prctl(ARCH_SET_GS, guest_base) == 0) {
2134        return P_GS;
2135    }
2136    return 0;
2137}
2138# elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__)
2139#  include <machine/sysarch.h>
2140static inline int setup_guest_base_seg(void)
2141{
2142    if (sysarch(AMD64_SET_GSBASE, &guest_base) == 0) {
2143        return P_GS;
2144    }
2145    return 0;
2146}
2147# else
2148static inline int setup_guest_base_seg(void)
2149{
2150    return 0;
2151}
2152# endif
2153#endif
2154#endif /* SOFTMMU */
2155
2156static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
2157                                   TCGReg base, int index, intptr_t ofs,
2158                                   int seg, bool is64, MemOp memop)
2159{
2160    TCGType type = is64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
2161    bool use_movbe = false;
2162    int rexw = is64 * P_REXW;
2163    int movop = OPC_MOVL_GvEv;
2164
2165    /* Do big-endian loads with movbe.  */
2166    if (memop & MO_BSWAP) {
2167        tcg_debug_assert(have_movbe);
2168        use_movbe = true;
2169        movop = OPC_MOVBE_GyMy;
2170    }
2171
2172    switch (memop & MO_SSIZE) {
2173    case MO_UB:
2174        tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo,
2175                                 base, index, 0, ofs);
2176        break;
2177    case MO_SB:
2178        tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + seg, datalo,
2179                                 base, index, 0, ofs);
2180        break;
2181    case MO_UW:
2182        if (use_movbe) {
2183            /* There is no extending movbe; only low 16-bits are modified.  */
2184            if (datalo != base && datalo != index) {
2185                /* XOR breaks dependency chains.  */
2186                tgen_arithr(s, ARITH_XOR, datalo, datalo);
2187                tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
2188                                         datalo, base, index, 0, ofs);
2189            } else {
2190                tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
2191                                         datalo, base, index, 0, ofs);
2192                tcg_out_ext16u(s, datalo, datalo);
2193            }
2194        } else {
2195            tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
2196                                     base, index, 0, ofs);
2197        }
2198        break;
2199    case MO_SW:
2200        if (use_movbe) {
2201            tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
2202                                     datalo, base, index, 0, ofs);
2203            tcg_out_ext16s(s, type, datalo, datalo);
2204        } else {
2205            tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg,
2206                                     datalo, base, index, 0, ofs);
2207        }
2208        break;
2209    case MO_UL:
2210        tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
2211        break;
2212#if TCG_TARGET_REG_BITS == 64
2213    case MO_SL:
2214        if (use_movbe) {
2215            tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + seg, datalo,
2216                                     base, index, 0, ofs);
2217            tcg_out_ext32s(s, datalo, datalo);
2218        } else {
2219            tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo,
2220                                     base, index, 0, ofs);
2221        }
2222        break;
2223#endif
2224    case MO_UQ:
2225        if (TCG_TARGET_REG_BITS == 64) {
2226            tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
2227                                     base, index, 0, ofs);
2228        } else {
2229            if (use_movbe) {
2230                TCGReg t = datalo;
2231                datalo = datahi;
2232                datahi = t;
2233            }
2234            if (base != datalo) {
2235                tcg_out_modrm_sib_offset(s, movop + seg, datalo,
2236                                         base, index, 0, ofs);
2237                tcg_out_modrm_sib_offset(s, movop + seg, datahi,
2238                                         base, index, 0, ofs + 4);
2239            } else {
2240                tcg_out_modrm_sib_offset(s, movop + seg, datahi,
2241                                         base, index, 0, ofs + 4);
2242                tcg_out_modrm_sib_offset(s, movop + seg, datalo,
2243                                         base, index, 0, ofs);
2244            }
2245        }
2246        break;
2247    default:
2248        g_assert_not_reached();
2249    }
2250}
2251
2252/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
2253   EAX. It will be useful once fixed registers globals are less
2254   common. */
2255static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
2256{
2257    TCGReg datalo, datahi, addrlo;
2258    TCGReg addrhi __attribute__((unused));
2259    MemOpIdx oi;
2260    MemOp opc;
2261#if defined(CONFIG_SOFTMMU)
2262    int mem_index;
2263    tcg_insn_unit *label_ptr[2];
2264#else
2265    unsigned a_bits;
2266#endif
2267
2268    datalo = *args++;
2269    datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
2270    addrlo = *args++;
2271    addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
2272    oi = *args++;
2273    opc = get_memop(oi);
2274
2275#if defined(CONFIG_SOFTMMU)
2276    mem_index = get_mmuidx(oi);
2277
2278    tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
2279                     label_ptr, offsetof(CPUTLBEntry, addr_read));
2280
2281    /* TLB Hit.  */
2282    tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc);
2283
2284    /* Record the current context of a load into ldst label */
2285    add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi,
2286                        s->code_ptr, label_ptr);
2287#else
2288    a_bits = get_alignment_bits(opc);
2289    if (a_bits) {
2290        tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
2291    }
2292
2293    tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
2294                           x86_guest_base_offset, x86_guest_base_seg,
2295                           is64, opc);
2296#endif
2297}
2298
2299static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
2300                                   TCGReg base, int index, intptr_t ofs,
2301                                   int seg, MemOp memop)
2302{
2303    bool use_movbe = false;
2304    int movop = OPC_MOVL_EvGv;
2305
2306    /*
2307     * Do big-endian stores with movbe or softmmu.
2308     * User-only without movbe will have its swapping done generically.
2309     */
2310    if (memop & MO_BSWAP) {
2311        tcg_debug_assert(have_movbe);
2312        use_movbe = true;
2313        movop = OPC_MOVBE_MyGy;
2314    }
2315
2316    switch (memop & MO_SIZE) {
2317    case MO_8:
2318        /* This is handled with constraints on INDEX_op_qemu_st8_i32. */
2319        tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4);
2320        tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
2321                                 datalo, base, index, 0, ofs);
2322        break;
2323    case MO_16:
2324        tcg_out_modrm_sib_offset(s, movop + P_DATA16 + seg, datalo,
2325                                 base, index, 0, ofs);
2326        break;
2327    case MO_32:
2328        tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
2329        break;
2330    case MO_64:
2331        if (TCG_TARGET_REG_BITS == 64) {
2332            tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
2333                                     base, index, 0, ofs);
2334        } else {
2335            if (use_movbe) {
2336                TCGReg t = datalo;
2337                datalo = datahi;
2338                datahi = t;
2339            }
2340            tcg_out_modrm_sib_offset(s, movop + seg, datalo,
2341                                     base, index, 0, ofs);
2342            tcg_out_modrm_sib_offset(s, movop + seg, datahi,
2343                                     base, index, 0, ofs + 4);
2344        }
2345        break;
2346    default:
2347        g_assert_not_reached();
2348    }
2349}
2350
2351static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
2352{
2353    TCGReg datalo, datahi, addrlo;
2354    TCGReg addrhi __attribute__((unused));
2355    MemOpIdx oi;
2356    MemOp opc;
2357#if defined(CONFIG_SOFTMMU)
2358    int mem_index;
2359    tcg_insn_unit *label_ptr[2];
2360#else
2361    unsigned a_bits;
2362#endif
2363
2364    datalo = *args++;
2365    datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
2366    addrlo = *args++;
2367    addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
2368    oi = *args++;
2369    opc = get_memop(oi);
2370
2371#if defined(CONFIG_SOFTMMU)
2372    mem_index = get_mmuidx(oi);
2373
2374    tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
2375                     label_ptr, offsetof(CPUTLBEntry, addr_write));
2376
2377    /* TLB Hit.  */
2378    tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
2379
2380    /* Record the current context of a store into ldst label */
2381    add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi,
2382                        s->code_ptr, label_ptr);
2383#else
2384    a_bits = get_alignment_bits(opc);
2385    if (a_bits) {
2386        tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
2387    }
2388
2389    tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
2390                           x86_guest_base_offset, x86_guest_base_seg, opc);
2391#endif
2392}
2393
2394static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
2395{
2396    /* Reuse the zeroing that exists for goto_ptr.  */
2397    if (a0 == 0) {
2398        tcg_out_jmp(s, tcg_code_gen_epilogue);
2399    } else {
2400        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
2401        tcg_out_jmp(s, tb_ret_addr);
2402    }
2403}
2404
2405static void tcg_out_goto_tb(TCGContext *s, int which)
2406{
2407    /*
2408     * Jump displacement must be aligned for atomic patching;
2409     * see if we need to add extra nops before jump
2410     */
2411    int gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
2412    if (gap != 1) {
2413        tcg_out_nopn(s, gap - 1);
2414    }
2415    tcg_out8(s, OPC_JMP_long); /* jmp im */
2416    set_jmp_insn_offset(s, which);
2417    tcg_out32(s, 0);
2418    set_jmp_reset_offset(s, which);
2419}
2420
2421void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
2422                              uintptr_t jmp_rx, uintptr_t jmp_rw)
2423{
2424    /* patch the branch destination */
2425    uintptr_t addr = tb->jmp_target_addr[n];
2426    qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4));
2427    /* no need to flush icache explicitly */
2428}
2429
2430static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2431                              const TCGArg args[TCG_MAX_OP_ARGS],
2432                              const int const_args[TCG_MAX_OP_ARGS])
2433{
2434    TCGArg a0, a1, a2;
2435    int c, const_a2, vexop, rexw = 0;
2436
2437#if TCG_TARGET_REG_BITS == 64
2438# define OP_32_64(x) \
2439        case glue(glue(INDEX_op_, x), _i64): \
2440            rexw = P_REXW; /* FALLTHRU */    \
2441        case glue(glue(INDEX_op_, x), _i32)
2442#else
2443# define OP_32_64(x) \
2444        case glue(glue(INDEX_op_, x), _i32)
2445#endif
2446
2447    /* Hoist the loads of the most common arguments.  */
2448    a0 = args[0];
2449    a1 = args[1];
2450    a2 = args[2];
2451    const_a2 = const_args[2];
2452
2453    switch (opc) {
2454    case INDEX_op_goto_ptr:
2455        /* jmp to the given host address (could be epilogue) */
2456        tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
2457        break;
2458    case INDEX_op_br:
2459        tcg_out_jxx(s, JCC_JMP, arg_label(a0), 0);
2460        break;
2461    OP_32_64(ld8u):
2462        /* Note that we can ignore REXW for the zero-extend to 64-bit.  */
2463        tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2);
2464        break;
2465    OP_32_64(ld8s):
2466        tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, a0, a1, a2);
2467        break;
2468    OP_32_64(ld16u):
2469        /* Note that we can ignore REXW for the zero-extend to 64-bit.  */
2470        tcg_out_modrm_offset(s, OPC_MOVZWL, a0, a1, a2);
2471        break;
2472    OP_32_64(ld16s):
2473        tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, a0, a1, a2);
2474        break;
2475#if TCG_TARGET_REG_BITS == 64
2476    case INDEX_op_ld32u_i64:
2477#endif
2478    case INDEX_op_ld_i32:
2479        tcg_out_ld(s, TCG_TYPE_I32, a0, a1, a2);
2480        break;
2481
2482    OP_32_64(st8):
2483        if (const_args[0]) {
2484            tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, a1, a2);
2485            tcg_out8(s, a0);
2486        } else {
2487            tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, a0, a1, a2);
2488        }
2489        break;
2490    OP_32_64(st16):
2491        if (const_args[0]) {
2492            tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, a1, a2);
2493            tcg_out16(s, a0);
2494        } else {
2495            tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, a0, a1, a2);
2496        }
2497        break;
2498#if TCG_TARGET_REG_BITS == 64
2499    case INDEX_op_st32_i64:
2500#endif
2501    case INDEX_op_st_i32:
2502        if (const_args[0]) {
2503            tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, a1, a2);
2504            tcg_out32(s, a0);
2505        } else {
2506            tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2);
2507        }
2508        break;
2509
2510    OP_32_64(add):
2511        /* For 3-operand addition, use LEA.  */
2512        if (a0 != a1) {
2513            TCGArg c3 = 0;
2514            if (const_a2) {
2515                c3 = a2, a2 = -1;
2516            } else if (a0 == a2) {
2517                /* Watch out for dest = src + dest, since we've removed
2518                   the matching constraint on the add.  */
2519                tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
2520                break;
2521            }
2522
2523            tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
2524            break;
2525        }
2526        c = ARITH_ADD;
2527        goto gen_arith;
2528    OP_32_64(sub):
2529        c = ARITH_SUB;
2530        goto gen_arith;
2531    OP_32_64(and):
2532        c = ARITH_AND;
2533        goto gen_arith;
2534    OP_32_64(or):
2535        c = ARITH_OR;
2536        goto gen_arith;
2537    OP_32_64(xor):
2538        c = ARITH_XOR;
2539        goto gen_arith;
2540    gen_arith:
2541        if (const_a2) {
2542            tgen_arithi(s, c + rexw, a0, a2, 0);
2543        } else {
2544            tgen_arithr(s, c + rexw, a0, a2);
2545        }
2546        break;
2547
2548    OP_32_64(andc):
2549        if (const_a2) {
2550            tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
2551            tgen_arithi(s, ARITH_AND + rexw, a0, ~a2, 0);
2552        } else {
2553            tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1);
2554        }
2555        break;
2556
2557    OP_32_64(mul):
2558        if (const_a2) {
2559            int32_t val;
2560            val = a2;
2561            if (val == (int8_t)val) {
2562                tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0);
2563                tcg_out8(s, val);
2564            } else {
2565                tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0);
2566                tcg_out32(s, val);
2567            }
2568        } else {
2569            tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2);
2570        }
2571        break;
2572
2573    OP_32_64(div2):
2574        tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
2575        break;
2576    OP_32_64(divu2):
2577        tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
2578        break;
2579
2580    OP_32_64(shl):
2581        /* For small constant 3-operand shift, use LEA.  */
2582        if (const_a2 && a0 != a1 && (a2 - 1) < 3) {
2583            if (a2 - 1 == 0) {
2584                /* shl $1,a1,a0 -> lea (a1,a1),a0 */
2585                tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0);
2586            } else {
2587                /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
2588                tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0);
2589            }
2590            break;
2591        }
2592        c = SHIFT_SHL;
2593        vexop = OPC_SHLX;
2594        goto gen_shift_maybe_vex;
2595    OP_32_64(shr):
2596        c = SHIFT_SHR;
2597        vexop = OPC_SHRX;
2598        goto gen_shift_maybe_vex;
2599    OP_32_64(sar):
2600        c = SHIFT_SAR;
2601        vexop = OPC_SARX;
2602        goto gen_shift_maybe_vex;
2603    OP_32_64(rotl):
2604        c = SHIFT_ROL;
2605        goto gen_shift;
2606    OP_32_64(rotr):
2607        c = SHIFT_ROR;
2608        goto gen_shift;
2609    gen_shift_maybe_vex:
2610        if (have_bmi2) {
2611            if (!const_a2) {
2612                tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1);
2613                break;
2614            }
2615            tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
2616        }
2617        /* FALLTHRU */
2618    gen_shift:
2619        if (const_a2) {
2620            tcg_out_shifti(s, c + rexw, a0, a2);
2621        } else {
2622            tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, a0);
2623        }
2624        break;
2625
2626    OP_32_64(ctz):
2627        tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]);
2628        break;
2629    OP_32_64(clz):
2630        tcg_out_clz(s, rexw, args[0], args[1], args[2], const_args[2]);
2631        break;
2632    OP_32_64(ctpop):
2633        tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
2634        break;
2635
2636    case INDEX_op_brcond_i32:
2637        tcg_out_brcond32(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
2638        break;
2639    case INDEX_op_setcond_i32:
2640        tcg_out_setcond32(s, args[3], a0, a1, a2, const_a2);
2641        break;
2642    case INDEX_op_movcond_i32:
2643        tcg_out_movcond32(s, args[5], a0, a1, a2, const_a2, args[3]);
2644        break;
2645
2646    OP_32_64(bswap16):
2647        if (a2 & TCG_BSWAP_OS) {
2648            /* Output must be sign-extended. */
2649            if (rexw) {
2650                tcg_out_bswap64(s, a0);
2651                tcg_out_shifti(s, SHIFT_SAR + rexw, a0, 48);
2652            } else {
2653                tcg_out_bswap32(s, a0);
2654                tcg_out_shifti(s, SHIFT_SAR, a0, 16);
2655            }
2656        } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
2657            /* Output must be zero-extended, but input isn't. */
2658            tcg_out_bswap32(s, a0);
2659            tcg_out_shifti(s, SHIFT_SHR, a0, 16);
2660        } else {
2661            tcg_out_rolw_8(s, a0);
2662        }
2663        break;
2664    OP_32_64(bswap32):
2665        tcg_out_bswap32(s, a0);
2666        if (rexw && (a2 & TCG_BSWAP_OS)) {
2667            tcg_out_ext32s(s, a0, a0);
2668        }
2669        break;
2670
2671    OP_32_64(neg):
2672        tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0);
2673        break;
2674    OP_32_64(not):
2675        tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
2676        break;
2677
2678    case INDEX_op_qemu_ld_i32:
2679        tcg_out_qemu_ld(s, args, 0);
2680        break;
2681    case INDEX_op_qemu_ld_i64:
2682        tcg_out_qemu_ld(s, args, 1);
2683        break;
2684    case INDEX_op_qemu_st_i32:
2685    case INDEX_op_qemu_st8_i32:
2686        tcg_out_qemu_st(s, args, 0);
2687        break;
2688    case INDEX_op_qemu_st_i64:
2689        tcg_out_qemu_st(s, args, 1);
2690        break;
2691
2692    OP_32_64(mulu2):
2693        tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
2694        break;
2695    OP_32_64(muls2):
2696        tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]);
2697        break;
2698    OP_32_64(add2):
2699        if (const_args[4]) {
2700            tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1);
2701        } else {
2702            tgen_arithr(s, ARITH_ADD + rexw, a0, args[4]);
2703        }
2704        if (const_args[5]) {
2705            tgen_arithi(s, ARITH_ADC + rexw, a1, args[5], 1);
2706        } else {
2707            tgen_arithr(s, ARITH_ADC + rexw, a1, args[5]);
2708        }
2709        break;
2710    OP_32_64(sub2):
2711        if (const_args[4]) {
2712            tgen_arithi(s, ARITH_SUB + rexw, a0, args[4], 1);
2713        } else {
2714            tgen_arithr(s, ARITH_SUB + rexw, a0, args[4]);
2715        }
2716        if (const_args[5]) {
2717            tgen_arithi(s, ARITH_SBB + rexw, a1, args[5], 1);
2718        } else {
2719            tgen_arithr(s, ARITH_SBB + rexw, a1, args[5]);
2720        }
2721        break;
2722
2723#if TCG_TARGET_REG_BITS == 32
2724    case INDEX_op_brcond2_i32:
2725        tcg_out_brcond2(s, args, const_args, 0);
2726        break;
2727    case INDEX_op_setcond2_i32:
2728        tcg_out_setcond2(s, args, const_args);
2729        break;
2730#else /* TCG_TARGET_REG_BITS == 64 */
2731    case INDEX_op_ld32s_i64:
2732        tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2);
2733        break;
2734    case INDEX_op_ld_i64:
2735        tcg_out_ld(s, TCG_TYPE_I64, a0, a1, a2);
2736        break;
2737    case INDEX_op_st_i64:
2738        if (const_args[0]) {
2739            tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, 0, a1, a2);
2740            tcg_out32(s, a0);
2741        } else {
2742            tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2);
2743        }
2744        break;
2745
2746    case INDEX_op_brcond_i64:
2747        tcg_out_brcond64(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
2748        break;
2749    case INDEX_op_setcond_i64:
2750        tcg_out_setcond64(s, args[3], a0, a1, a2, const_a2);
2751        break;
2752    case INDEX_op_movcond_i64:
2753        tcg_out_movcond64(s, args[5], a0, a1, a2, const_a2, args[3]);
2754        break;
2755
2756    case INDEX_op_bswap64_i64:
2757        tcg_out_bswap64(s, a0);
2758        break;
2759    case INDEX_op_extrh_i64_i32:
2760        tcg_out_shifti(s, SHIFT_SHR + P_REXW, a0, 32);
2761        break;
2762#endif
2763
2764    OP_32_64(deposit):
2765        if (args[3] == 0 && args[4] == 8) {
2766            /* load bits 0..7 */
2767            tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
2768        } else if (args[3] == 8 && args[4] == 8) {
2769            /* load bits 8..15 */
2770            tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
2771        } else if (args[3] == 0 && args[4] == 16) {
2772            /* load bits 0..15 */
2773            tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
2774        } else {
2775            g_assert_not_reached();
2776        }
2777        break;
2778
2779    case INDEX_op_extract_i64:
2780        if (a2 + args[3] == 32) {
2781            /* This is a 32-bit zero-extending right shift.  */
2782            tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
2783            tcg_out_shifti(s, SHIFT_SHR, a0, a2);
2784            break;
2785        }
2786        /* FALLTHRU */
2787    case INDEX_op_extract_i32:
2788        /* On the off-chance that we can use the high-byte registers.
2789           Otherwise we emit the same ext16 + shift pattern that we
2790           would have gotten from the normal tcg-op.c expansion.  */
2791        tcg_debug_assert(a2 == 8 && args[3] == 8);
2792        if (a1 < 4 && a0 < 8) {
2793            tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4);
2794        } else {
2795            tcg_out_ext16u(s, a0, a1);
2796            tcg_out_shifti(s, SHIFT_SHR, a0, 8);
2797        }
2798        break;
2799
2800    case INDEX_op_sextract_i32:
2801        /* We don't implement sextract_i64, as we cannot sign-extend to
2802           64-bits without using the REX prefix that explicitly excludes
2803           access to the high-byte registers.  */
2804        tcg_debug_assert(a2 == 8 && args[3] == 8);
2805        if (a1 < 4 && a0 < 8) {
2806            tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4);
2807        } else {
2808            tcg_out_ext16s(s, TCG_TYPE_I32, a0, a1);
2809            tcg_out_shifti(s, SHIFT_SAR, a0, 8);
2810        }
2811        break;
2812
2813    OP_32_64(extract2):
2814        /* Note that SHRD outputs to the r/m operand.  */
2815        tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0);
2816        tcg_out8(s, args[3]);
2817        break;
2818
2819    case INDEX_op_mb:
2820        tcg_out_mb(s, a0);
2821        break;
2822    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
2823    case INDEX_op_mov_i64:
2824    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
2825    case INDEX_op_exit_tb:  /* Always emitted via tcg_out_exit_tb.  */
2826    case INDEX_op_goto_tb:  /* Always emitted via tcg_out_goto_tb.  */
2827    case INDEX_op_ext8s_i32:  /* Always emitted via tcg_reg_alloc_op.  */
2828    case INDEX_op_ext8s_i64:
2829    case INDEX_op_ext8u_i32:
2830    case INDEX_op_ext8u_i64:
2831    case INDEX_op_ext16s_i32:
2832    case INDEX_op_ext16s_i64:
2833    case INDEX_op_ext16u_i32:
2834    case INDEX_op_ext16u_i64:
2835    case INDEX_op_ext32s_i64:
2836    case INDEX_op_ext32u_i64:
2837    case INDEX_op_ext_i32_i64:
2838    case INDEX_op_extu_i32_i64:
2839    case INDEX_op_extrl_i64_i32:
2840    default:
2841        g_assert_not_reached();
2842    }
2843
2844#undef OP_32_64
2845}
2846
2847static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2848                           unsigned vecl, unsigned vece,
2849                           const TCGArg args[TCG_MAX_OP_ARGS],
2850                           const int const_args[TCG_MAX_OP_ARGS])
2851{
2852    static int const add_insn[4] = {
2853        OPC_PADDB, OPC_PADDW, OPC_PADDD, OPC_PADDQ
2854    };
2855    static int const ssadd_insn[4] = {
2856        OPC_PADDSB, OPC_PADDSW, OPC_UD2, OPC_UD2
2857    };
2858    static int const usadd_insn[4] = {
2859        OPC_PADDUB, OPC_PADDUW, OPC_UD2, OPC_UD2
2860    };
2861    static int const sub_insn[4] = {
2862        OPC_PSUBB, OPC_PSUBW, OPC_PSUBD, OPC_PSUBQ
2863    };
2864    static int const sssub_insn[4] = {
2865        OPC_PSUBSB, OPC_PSUBSW, OPC_UD2, OPC_UD2
2866    };
2867    static int const ussub_insn[4] = {
2868        OPC_PSUBUB, OPC_PSUBUW, OPC_UD2, OPC_UD2
2869    };
2870    static int const mul_insn[4] = {
2871        OPC_UD2, OPC_PMULLW, OPC_PMULLD, OPC_VPMULLQ
2872    };
2873    static int const shift_imm_insn[4] = {
2874        OPC_UD2, OPC_PSHIFTW_Ib, OPC_PSHIFTD_Ib, OPC_PSHIFTQ_Ib
2875    };
2876    static int const cmpeq_insn[4] = {
2877        OPC_PCMPEQB, OPC_PCMPEQW, OPC_PCMPEQD, OPC_PCMPEQQ
2878    };
2879    static int const cmpgt_insn[4] = {
2880        OPC_PCMPGTB, OPC_PCMPGTW, OPC_PCMPGTD, OPC_PCMPGTQ
2881    };
2882    static int const punpckl_insn[4] = {
2883        OPC_PUNPCKLBW, OPC_PUNPCKLWD, OPC_PUNPCKLDQ, OPC_PUNPCKLQDQ
2884    };
2885    static int const punpckh_insn[4] = {
2886        OPC_PUNPCKHBW, OPC_PUNPCKHWD, OPC_PUNPCKHDQ, OPC_PUNPCKHQDQ
2887    };
2888    static int const packss_insn[4] = {
2889        OPC_PACKSSWB, OPC_PACKSSDW, OPC_UD2, OPC_UD2
2890    };
2891    static int const packus_insn[4] = {
2892        OPC_PACKUSWB, OPC_PACKUSDW, OPC_UD2, OPC_UD2
2893    };
2894    static int const smin_insn[4] = {
2895        OPC_PMINSB, OPC_PMINSW, OPC_PMINSD, OPC_VPMINSQ
2896    };
2897    static int const smax_insn[4] = {
2898        OPC_PMAXSB, OPC_PMAXSW, OPC_PMAXSD, OPC_VPMAXSQ
2899    };
2900    static int const umin_insn[4] = {
2901        OPC_PMINUB, OPC_PMINUW, OPC_PMINUD, OPC_VPMINUQ
2902    };
2903    static int const umax_insn[4] = {
2904        OPC_PMAXUB, OPC_PMAXUW, OPC_PMAXUD, OPC_VPMAXUQ
2905    };
2906    static int const rotlv_insn[4] = {
2907        OPC_UD2, OPC_UD2, OPC_VPROLVD, OPC_VPROLVQ
2908    };
2909    static int const rotrv_insn[4] = {
2910        OPC_UD2, OPC_UD2, OPC_VPRORVD, OPC_VPRORVQ
2911    };
2912    static int const shlv_insn[4] = {
2913        OPC_UD2, OPC_VPSLLVW, OPC_VPSLLVD, OPC_VPSLLVQ
2914    };
2915    static int const shrv_insn[4] = {
2916        OPC_UD2, OPC_VPSRLVW, OPC_VPSRLVD, OPC_VPSRLVQ
2917    };
2918    static int const sarv_insn[4] = {
2919        OPC_UD2, OPC_VPSRAVW, OPC_VPSRAVD, OPC_VPSRAVQ
2920    };
2921    static int const shls_insn[4] = {
2922        OPC_UD2, OPC_PSLLW, OPC_PSLLD, OPC_PSLLQ
2923    };
2924    static int const shrs_insn[4] = {
2925        OPC_UD2, OPC_PSRLW, OPC_PSRLD, OPC_PSRLQ
2926    };
2927    static int const sars_insn[4] = {
2928        OPC_UD2, OPC_PSRAW, OPC_PSRAD, OPC_VPSRAQ
2929    };
2930    static int const vpshldi_insn[4] = {
2931        OPC_UD2, OPC_VPSHLDW, OPC_VPSHLDD, OPC_VPSHLDQ
2932    };
2933    static int const vpshldv_insn[4] = {
2934        OPC_UD2, OPC_VPSHLDVW, OPC_VPSHLDVD, OPC_VPSHLDVQ
2935    };
2936    static int const vpshrdv_insn[4] = {
2937        OPC_UD2, OPC_VPSHRDVW, OPC_VPSHRDVD, OPC_VPSHRDVQ
2938    };
2939    static int const abs_insn[4] = {
2940        OPC_PABSB, OPC_PABSW, OPC_PABSD, OPC_VPABSQ
2941    };
2942
2943    TCGType type = vecl + TCG_TYPE_V64;
2944    int insn, sub;
2945    TCGArg a0, a1, a2, a3;
2946
2947    a0 = args[0];
2948    a1 = args[1];
2949    a2 = args[2];
2950
2951    switch (opc) {
2952    case INDEX_op_add_vec:
2953        insn = add_insn[vece];
2954        goto gen_simd;
2955    case INDEX_op_ssadd_vec:
2956        insn = ssadd_insn[vece];
2957        goto gen_simd;
2958    case INDEX_op_usadd_vec:
2959        insn = usadd_insn[vece];
2960        goto gen_simd;
2961    case INDEX_op_sub_vec:
2962        insn = sub_insn[vece];
2963        goto gen_simd;
2964    case INDEX_op_sssub_vec:
2965        insn = sssub_insn[vece];
2966        goto gen_simd;
2967    case INDEX_op_ussub_vec:
2968        insn = ussub_insn[vece];
2969        goto gen_simd;
2970    case INDEX_op_mul_vec:
2971        insn = mul_insn[vece];
2972        goto gen_simd;
2973    case INDEX_op_and_vec:
2974        insn = OPC_PAND;
2975        goto gen_simd;
2976    case INDEX_op_or_vec:
2977        insn = OPC_POR;
2978        goto gen_simd;
2979    case INDEX_op_xor_vec:
2980        insn = OPC_PXOR;
2981        goto gen_simd;
2982    case INDEX_op_smin_vec:
2983        insn = smin_insn[vece];
2984        goto gen_simd;
2985    case INDEX_op_umin_vec:
2986        insn = umin_insn[vece];
2987        goto gen_simd;
2988    case INDEX_op_smax_vec:
2989        insn = smax_insn[vece];
2990        goto gen_simd;
2991    case INDEX_op_umax_vec:
2992        insn = umax_insn[vece];
2993        goto gen_simd;
2994    case INDEX_op_shlv_vec:
2995        insn = shlv_insn[vece];
2996        goto gen_simd;
2997    case INDEX_op_shrv_vec:
2998        insn = shrv_insn[vece];
2999        goto gen_simd;
3000    case INDEX_op_sarv_vec:
3001        insn = sarv_insn[vece];
3002        goto gen_simd;
3003    case INDEX_op_rotlv_vec:
3004        insn = rotlv_insn[vece];
3005        goto gen_simd;
3006    case INDEX_op_rotrv_vec:
3007        insn = rotrv_insn[vece];
3008        goto gen_simd;
3009    case INDEX_op_shls_vec:
3010        insn = shls_insn[vece];
3011        goto gen_simd;
3012    case INDEX_op_shrs_vec:
3013        insn = shrs_insn[vece];
3014        goto gen_simd;
3015    case INDEX_op_sars_vec:
3016        insn = sars_insn[vece];
3017        goto gen_simd;
3018    case INDEX_op_x86_punpckl_vec:
3019        insn = punpckl_insn[vece];
3020        goto gen_simd;
3021    case INDEX_op_x86_punpckh_vec:
3022        insn = punpckh_insn[vece];
3023        goto gen_simd;
3024    case INDEX_op_x86_packss_vec:
3025        insn = packss_insn[vece];
3026        goto gen_simd;
3027    case INDEX_op_x86_packus_vec:
3028        insn = packus_insn[vece];
3029        goto gen_simd;
3030    case INDEX_op_x86_vpshldv_vec:
3031        insn = vpshldv_insn[vece];
3032        a1 = a2;
3033        a2 = args[3];
3034        goto gen_simd;
3035    case INDEX_op_x86_vpshrdv_vec:
3036        insn = vpshrdv_insn[vece];
3037        a1 = a2;
3038        a2 = args[3];
3039        goto gen_simd;
3040#if TCG_TARGET_REG_BITS == 32
3041    case INDEX_op_dup2_vec:
3042        /* First merge the two 32-bit inputs to a single 64-bit element. */
3043        tcg_out_vex_modrm(s, OPC_PUNPCKLDQ, a0, a1, a2);
3044        /* Then replicate the 64-bit elements across the rest of the vector. */
3045        if (type != TCG_TYPE_V64) {
3046            tcg_out_dup_vec(s, type, MO_64, a0, a0);
3047        }
3048        break;
3049#endif
3050    case INDEX_op_abs_vec:
3051        insn = abs_insn[vece];
3052        a2 = a1;
3053        a1 = 0;
3054        goto gen_simd;
3055    gen_simd:
3056        tcg_debug_assert(insn != OPC_UD2);
3057        if (type == TCG_TYPE_V256) {
3058            insn |= P_VEXL;
3059        }
3060        tcg_out_vex_modrm(s, insn, a0, a1, a2);
3061        break;
3062
3063    case INDEX_op_cmp_vec:
3064        sub = args[3];
3065        if (sub == TCG_COND_EQ) {
3066            insn = cmpeq_insn[vece];
3067        } else if (sub == TCG_COND_GT) {
3068            insn = cmpgt_insn[vece];
3069        } else {
3070            g_assert_not_reached();
3071        }
3072        goto gen_simd;
3073
3074    case INDEX_op_andc_vec:
3075        insn = OPC_PANDN;
3076        if (type == TCG_TYPE_V256) {
3077            insn |= P_VEXL;
3078        }
3079        tcg_out_vex_modrm(s, insn, a0, a2, a1);
3080        break;
3081
3082    case INDEX_op_shli_vec:
3083        insn = shift_imm_insn[vece];
3084        sub = 6;
3085        goto gen_shift;
3086    case INDEX_op_shri_vec:
3087        insn = shift_imm_insn[vece];
3088        sub = 2;
3089        goto gen_shift;
3090    case INDEX_op_sari_vec:
3091        if (vece == MO_64) {
3092            insn = OPC_PSHIFTD_Ib | P_VEXW | P_EVEX;
3093        } else {
3094            insn = shift_imm_insn[vece];
3095        }
3096        sub = 4;
3097        goto gen_shift;
3098    case INDEX_op_rotli_vec:
3099        insn = OPC_PSHIFTD_Ib | P_EVEX;  /* VPROL[DQ] */
3100        if (vece == MO_64) {
3101            insn |= P_VEXW;
3102        }
3103        sub = 1;
3104        goto gen_shift;
3105    gen_shift:
3106        tcg_debug_assert(vece != MO_8);
3107        if (type == TCG_TYPE_V256) {
3108            insn |= P_VEXL;
3109        }
3110        tcg_out_vex_modrm(s, insn, sub, a0, a1);
3111        tcg_out8(s, a2);
3112        break;
3113
3114    case INDEX_op_ld_vec:
3115        tcg_out_ld(s, type, a0, a1, a2);
3116        break;
3117    case INDEX_op_st_vec:
3118        tcg_out_st(s, type, a0, a1, a2);
3119        break;
3120    case INDEX_op_dupm_vec:
3121        tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3122        break;
3123
3124    case INDEX_op_x86_shufps_vec:
3125        insn = OPC_SHUFPS;
3126        sub = args[3];
3127        goto gen_simd_imm8;
3128    case INDEX_op_x86_blend_vec:
3129        if (vece == MO_16) {
3130            insn = OPC_PBLENDW;
3131        } else if (vece == MO_32) {
3132            insn = (have_avx2 ? OPC_VPBLENDD : OPC_BLENDPS);
3133        } else {
3134            g_assert_not_reached();
3135        }
3136        sub = args[3];
3137        goto gen_simd_imm8;
3138    case INDEX_op_x86_vperm2i128_vec:
3139        insn = OPC_VPERM2I128;
3140        sub = args[3];
3141        goto gen_simd_imm8;
3142    case INDEX_op_x86_vpshldi_vec:
3143        insn = vpshldi_insn[vece];
3144        sub = args[3];
3145        goto gen_simd_imm8;
3146
3147    case INDEX_op_not_vec:
3148        insn = OPC_VPTERNLOGQ;
3149        a2 = a1;
3150        sub = 0x33; /* !B */
3151        goto gen_simd_imm8;
3152    case INDEX_op_nor_vec:
3153        insn = OPC_VPTERNLOGQ;
3154        sub = 0x11; /* norCB */
3155        goto gen_simd_imm8;
3156    case INDEX_op_nand_vec:
3157        insn = OPC_VPTERNLOGQ;
3158        sub = 0x77; /* nandCB */
3159        goto gen_simd_imm8;
3160    case INDEX_op_eqv_vec:
3161        insn = OPC_VPTERNLOGQ;
3162        sub = 0x99; /* xnorCB */
3163        goto gen_simd_imm8;
3164    case INDEX_op_orc_vec:
3165        insn = OPC_VPTERNLOGQ;
3166        sub = 0xdd; /* orB!C */
3167        goto gen_simd_imm8;
3168
3169    case INDEX_op_bitsel_vec:
3170        insn = OPC_VPTERNLOGQ;
3171        a3 = args[3];
3172        if (a0 == a1) {
3173            a1 = a2;
3174            a2 = a3;
3175            sub = 0xca; /* A?B:C */
3176        } else if (a0 == a2) {
3177            a2 = a3;
3178            sub = 0xe2; /* B?A:C */
3179        } else {
3180            tcg_out_mov(s, type, a0, a3);
3181            sub = 0xb8; /* B?C:A */
3182        }
3183        goto gen_simd_imm8;
3184
3185    gen_simd_imm8:
3186        tcg_debug_assert(insn != OPC_UD2);
3187        if (type == TCG_TYPE_V256) {
3188            insn |= P_VEXL;
3189        }
3190        tcg_out_vex_modrm(s, insn, a0, a1, a2);
3191        tcg_out8(s, sub);
3192        break;
3193
3194    case INDEX_op_x86_vpblendvb_vec:
3195        insn = OPC_VPBLENDVB;
3196        if (type == TCG_TYPE_V256) {
3197            insn |= P_VEXL;
3198        }
3199        tcg_out_vex_modrm(s, insn, a0, a1, a2);
3200        tcg_out8(s, args[3] << 4);
3201        break;
3202
3203    case INDEX_op_x86_psrldq_vec:
3204        tcg_out_vex_modrm(s, OPC_GRP14, 3, a0, a1);
3205        tcg_out8(s, a2);
3206        break;
3207
3208    case INDEX_op_mov_vec:  /* Always emitted via tcg_out_mov.  */
3209    case INDEX_op_dup_vec:  /* Always emitted via tcg_out_dup_vec.  */
3210    default:
3211        g_assert_not_reached();
3212    }
3213}
3214
3215static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3216{
3217    switch (op) {
3218    case INDEX_op_goto_ptr:
3219        return C_O0_I1(r);
3220
3221    case INDEX_op_ld8u_i32:
3222    case INDEX_op_ld8u_i64:
3223    case INDEX_op_ld8s_i32:
3224    case INDEX_op_ld8s_i64:
3225    case INDEX_op_ld16u_i32:
3226    case INDEX_op_ld16u_i64:
3227    case INDEX_op_ld16s_i32:
3228    case INDEX_op_ld16s_i64:
3229    case INDEX_op_ld_i32:
3230    case INDEX_op_ld32u_i64:
3231    case INDEX_op_ld32s_i64:
3232    case INDEX_op_ld_i64:
3233        return C_O1_I1(r, r);
3234
3235    case INDEX_op_st8_i32:
3236    case INDEX_op_st8_i64:
3237        return C_O0_I2(qi, r);
3238
3239    case INDEX_op_st16_i32:
3240    case INDEX_op_st16_i64:
3241    case INDEX_op_st_i32:
3242    case INDEX_op_st32_i64:
3243        return C_O0_I2(ri, r);
3244
3245    case INDEX_op_st_i64:
3246        return C_O0_I2(re, r);
3247
3248    case INDEX_op_add_i32:
3249    case INDEX_op_add_i64:
3250        return C_O1_I2(r, r, re);
3251
3252    case INDEX_op_sub_i32:
3253    case INDEX_op_sub_i64:
3254    case INDEX_op_mul_i32:
3255    case INDEX_op_mul_i64:
3256    case INDEX_op_or_i32:
3257    case INDEX_op_or_i64:
3258    case INDEX_op_xor_i32:
3259    case INDEX_op_xor_i64:
3260        return C_O1_I2(r, 0, re);
3261
3262    case INDEX_op_and_i32:
3263    case INDEX_op_and_i64:
3264        return C_O1_I2(r, 0, reZ);
3265
3266    case INDEX_op_andc_i32:
3267    case INDEX_op_andc_i64:
3268        return C_O1_I2(r, r, rI);
3269
3270    case INDEX_op_shl_i32:
3271    case INDEX_op_shl_i64:
3272    case INDEX_op_shr_i32:
3273    case INDEX_op_shr_i64:
3274    case INDEX_op_sar_i32:
3275    case INDEX_op_sar_i64:
3276        return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci);
3277
3278    case INDEX_op_rotl_i32:
3279    case INDEX_op_rotl_i64:
3280    case INDEX_op_rotr_i32:
3281    case INDEX_op_rotr_i64:
3282        return C_O1_I2(r, 0, ci);
3283
3284    case INDEX_op_brcond_i32:
3285    case INDEX_op_brcond_i64:
3286        return C_O0_I2(r, re);
3287
3288    case INDEX_op_bswap16_i32:
3289    case INDEX_op_bswap16_i64:
3290    case INDEX_op_bswap32_i32:
3291    case INDEX_op_bswap32_i64:
3292    case INDEX_op_bswap64_i64:
3293    case INDEX_op_neg_i32:
3294    case INDEX_op_neg_i64:
3295    case INDEX_op_not_i32:
3296    case INDEX_op_not_i64:
3297    case INDEX_op_extrh_i64_i32:
3298        return C_O1_I1(r, 0);
3299
3300    case INDEX_op_ext8s_i32:
3301    case INDEX_op_ext8s_i64:
3302    case INDEX_op_ext8u_i32:
3303    case INDEX_op_ext8u_i64:
3304        return C_O1_I1(r, q);
3305
3306    case INDEX_op_ext16s_i32:
3307    case INDEX_op_ext16s_i64:
3308    case INDEX_op_ext16u_i32:
3309    case INDEX_op_ext16u_i64:
3310    case INDEX_op_ext32s_i64:
3311    case INDEX_op_ext32u_i64:
3312    case INDEX_op_ext_i32_i64:
3313    case INDEX_op_extu_i32_i64:
3314    case INDEX_op_extrl_i64_i32:
3315    case INDEX_op_extract_i32:
3316    case INDEX_op_extract_i64:
3317    case INDEX_op_sextract_i32:
3318    case INDEX_op_ctpop_i32:
3319    case INDEX_op_ctpop_i64:
3320        return C_O1_I1(r, r);
3321
3322    case INDEX_op_extract2_i32:
3323    case INDEX_op_extract2_i64:
3324        return C_O1_I2(r, 0, r);
3325
3326    case INDEX_op_deposit_i32:
3327    case INDEX_op_deposit_i64:
3328        return C_O1_I2(Q, 0, Q);
3329
3330    case INDEX_op_setcond_i32:
3331    case INDEX_op_setcond_i64:
3332        return C_O1_I2(q, r, re);
3333
3334    case INDEX_op_movcond_i32:
3335    case INDEX_op_movcond_i64:
3336        return C_O1_I4(r, r, re, r, 0);
3337
3338    case INDEX_op_div2_i32:
3339    case INDEX_op_div2_i64:
3340    case INDEX_op_divu2_i32:
3341    case INDEX_op_divu2_i64:
3342        return C_O2_I3(a, d, 0, 1, r);
3343
3344    case INDEX_op_mulu2_i32:
3345    case INDEX_op_mulu2_i64:
3346    case INDEX_op_muls2_i32:
3347    case INDEX_op_muls2_i64:
3348        return C_O2_I2(a, d, a, r);
3349
3350    case INDEX_op_add2_i32:
3351    case INDEX_op_add2_i64:
3352    case INDEX_op_sub2_i32:
3353    case INDEX_op_sub2_i64:
3354        return C_O2_I4(r, r, 0, 1, re, re);
3355
3356    case INDEX_op_ctz_i32:
3357    case INDEX_op_ctz_i64:
3358        return have_bmi1 ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
3359
3360    case INDEX_op_clz_i32:
3361    case INDEX_op_clz_i64:
3362        return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
3363
3364    case INDEX_op_qemu_ld_i32:
3365        return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
3366                ? C_O1_I1(r, L) : C_O1_I2(r, L, L));
3367
3368    case INDEX_op_qemu_st_i32:
3369        return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
3370                ? C_O0_I2(L, L) : C_O0_I3(L, L, L));
3371    case INDEX_op_qemu_st8_i32:
3372        return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
3373                ? C_O0_I2(s, L) : C_O0_I3(s, L, L));
3374
3375    case INDEX_op_qemu_ld_i64:
3376        return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
3377                : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L)
3378                : C_O2_I2(r, r, L, L));
3379
3380    case INDEX_op_qemu_st_i64:
3381        return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L)
3382                : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(L, L, L)
3383                : C_O0_I4(L, L, L, L));
3384
3385    case INDEX_op_brcond2_i32:
3386        return C_O0_I4(r, r, ri, ri);
3387
3388    case INDEX_op_setcond2_i32:
3389        return C_O1_I4(r, r, r, ri, ri);
3390
3391    case INDEX_op_ld_vec:
3392    case INDEX_op_dupm_vec:
3393        return C_O1_I1(x, r);
3394
3395    case INDEX_op_st_vec:
3396        return C_O0_I2(x, r);
3397
3398    case INDEX_op_add_vec:
3399    case INDEX_op_sub_vec:
3400    case INDEX_op_mul_vec:
3401    case INDEX_op_and_vec:
3402    case INDEX_op_or_vec:
3403    case INDEX_op_xor_vec:
3404    case INDEX_op_andc_vec:
3405    case INDEX_op_orc_vec:
3406    case INDEX_op_nand_vec:
3407    case INDEX_op_nor_vec:
3408    case INDEX_op_eqv_vec:
3409    case INDEX_op_ssadd_vec:
3410    case INDEX_op_usadd_vec:
3411    case INDEX_op_sssub_vec:
3412    case INDEX_op_ussub_vec:
3413    case INDEX_op_smin_vec:
3414    case INDEX_op_umin_vec:
3415    case INDEX_op_smax_vec:
3416    case INDEX_op_umax_vec:
3417    case INDEX_op_shlv_vec:
3418    case INDEX_op_shrv_vec:
3419    case INDEX_op_sarv_vec:
3420    case INDEX_op_rotlv_vec:
3421    case INDEX_op_rotrv_vec:
3422    case INDEX_op_shls_vec:
3423    case INDEX_op_shrs_vec:
3424    case INDEX_op_sars_vec:
3425    case INDEX_op_cmp_vec:
3426    case INDEX_op_x86_shufps_vec:
3427    case INDEX_op_x86_blend_vec:
3428    case INDEX_op_x86_packss_vec:
3429    case INDEX_op_x86_packus_vec:
3430    case INDEX_op_x86_vperm2i128_vec:
3431    case INDEX_op_x86_punpckl_vec:
3432    case INDEX_op_x86_punpckh_vec:
3433    case INDEX_op_x86_vpshldi_vec:
3434#if TCG_TARGET_REG_BITS == 32
3435    case INDEX_op_dup2_vec:
3436#endif
3437        return C_O1_I2(x, x, x);
3438
3439    case INDEX_op_abs_vec:
3440    case INDEX_op_dup_vec:
3441    case INDEX_op_not_vec:
3442    case INDEX_op_shli_vec:
3443    case INDEX_op_shri_vec:
3444    case INDEX_op_sari_vec:
3445    case INDEX_op_rotli_vec:
3446    case INDEX_op_x86_psrldq_vec:
3447        return C_O1_I1(x, x);
3448
3449    case INDEX_op_x86_vpshldv_vec:
3450    case INDEX_op_x86_vpshrdv_vec:
3451        return C_O1_I3(x, 0, x, x);
3452
3453    case INDEX_op_bitsel_vec:
3454    case INDEX_op_x86_vpblendvb_vec:
3455        return C_O1_I3(x, x, x, x);
3456
3457    default:
3458        g_assert_not_reached();
3459    }
3460}
3461
3462int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3463{
3464    switch (opc) {
3465    case INDEX_op_add_vec:
3466    case INDEX_op_sub_vec:
3467    case INDEX_op_and_vec:
3468    case INDEX_op_or_vec:
3469    case INDEX_op_xor_vec:
3470    case INDEX_op_andc_vec:
3471    case INDEX_op_orc_vec:
3472    case INDEX_op_nand_vec:
3473    case INDEX_op_nor_vec:
3474    case INDEX_op_eqv_vec:
3475    case INDEX_op_not_vec:
3476    case INDEX_op_bitsel_vec:
3477        return 1;
3478    case INDEX_op_cmp_vec:
3479    case INDEX_op_cmpsel_vec:
3480        return -1;
3481
3482    case INDEX_op_rotli_vec:
3483        return have_avx512vl && vece >= MO_32 ? 1 : -1;
3484
3485    case INDEX_op_shli_vec:
3486    case INDEX_op_shri_vec:
3487        /* We must expand the operation for MO_8.  */
3488        return vece == MO_8 ? -1 : 1;
3489
3490    case INDEX_op_sari_vec:
3491        switch (vece) {
3492        case MO_8:
3493            return -1;
3494        case MO_16:
3495        case MO_32:
3496            return 1;
3497        case MO_64:
3498            if (have_avx512vl) {
3499                return 1;
3500            }
3501            /*
3502             * We can emulate this for MO_64, but it does not pay off
3503             * unless we're producing at least 4 values.
3504             */
3505            return type >= TCG_TYPE_V256 ? -1 : 0;
3506        }
3507        return 0;
3508
3509    case INDEX_op_shls_vec:
3510    case INDEX_op_shrs_vec:
3511        return vece >= MO_16;
3512    case INDEX_op_sars_vec:
3513        switch (vece) {
3514        case MO_16:
3515        case MO_32:
3516            return 1;
3517        case MO_64:
3518            return have_avx512vl;
3519        }
3520        return 0;
3521    case INDEX_op_rotls_vec:
3522        return vece >= MO_16 ? -1 : 0;
3523
3524    case INDEX_op_shlv_vec:
3525    case INDEX_op_shrv_vec:
3526        switch (vece) {
3527        case MO_16:
3528            return have_avx512bw;
3529        case MO_32:
3530        case MO_64:
3531            return have_avx2;
3532        }
3533        return 0;
3534    case INDEX_op_sarv_vec:
3535        switch (vece) {
3536        case MO_16:
3537            return have_avx512bw;
3538        case MO_32:
3539            return have_avx2;
3540        case MO_64:
3541            return have_avx512vl;
3542        }
3543        return 0;
3544    case INDEX_op_rotlv_vec:
3545    case INDEX_op_rotrv_vec:
3546        switch (vece) {
3547        case MO_16:
3548            return have_avx512vbmi2 ? -1 : 0;
3549        case MO_32:
3550        case MO_64:
3551            return have_avx512vl ? 1 : have_avx2 ? -1 : 0;
3552        }
3553        return 0;
3554
3555    case INDEX_op_mul_vec:
3556        switch (vece) {
3557        case MO_8:
3558            return -1;
3559        case MO_64:
3560            return have_avx512dq;
3561        }
3562        return 1;
3563
3564    case INDEX_op_ssadd_vec:
3565    case INDEX_op_usadd_vec:
3566    case INDEX_op_sssub_vec:
3567    case INDEX_op_ussub_vec:
3568        return vece <= MO_16;
3569    case INDEX_op_smin_vec:
3570    case INDEX_op_smax_vec:
3571    case INDEX_op_umin_vec:
3572    case INDEX_op_umax_vec:
3573    case INDEX_op_abs_vec:
3574        return vece <= MO_32 || have_avx512vl;
3575
3576    default:
3577        return 0;
3578    }
3579}
3580
3581static void expand_vec_shi(TCGType type, unsigned vece, TCGOpcode opc,
3582                           TCGv_vec v0, TCGv_vec v1, TCGArg imm)
3583{
3584    TCGv_vec t1, t2;
3585
3586    tcg_debug_assert(vece == MO_8);
3587
3588    t1 = tcg_temp_new_vec(type);
3589    t2 = tcg_temp_new_vec(type);
3590
3591    /*
3592     * Unpack to W, shift, and repack.  Tricky bits:
3593     * (1) Use punpck*bw x,x to produce DDCCBBAA,
3594     *     i.e. duplicate in other half of the 16-bit lane.
3595     * (2) For right-shift, add 8 so that the high half of the lane
3596     *     becomes zero.  For left-shift, and left-rotate, we must
3597     *     shift up and down again.
3598     * (3) Step 2 leaves high half zero such that PACKUSWB
3599     *     (pack with unsigned saturation) does not modify
3600     *     the quantity.
3601     */
3602    vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3603              tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(v1));
3604    vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3605              tcgv_vec_arg(t2), tcgv_vec_arg(v1), tcgv_vec_arg(v1));
3606
3607    if (opc != INDEX_op_rotli_vec) {
3608        imm += 8;
3609    }
3610    if (opc == INDEX_op_shri_vec) {
3611        tcg_gen_shri_vec(MO_16, t1, t1, imm);
3612        tcg_gen_shri_vec(MO_16, t2, t2, imm);
3613    } else {
3614        tcg_gen_shli_vec(MO_16, t1, t1, imm);
3615        tcg_gen_shli_vec(MO_16, t2, t2, imm);
3616        tcg_gen_shri_vec(MO_16, t1, t1, 8);
3617        tcg_gen_shri_vec(MO_16, t2, t2, 8);
3618    }
3619
3620    vec_gen_3(INDEX_op_x86_packus_vec, type, MO_8,
3621              tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3622    tcg_temp_free_vec(t1);
3623    tcg_temp_free_vec(t2);
3624}
3625
3626static void expand_vec_sari(TCGType type, unsigned vece,
3627                            TCGv_vec v0, TCGv_vec v1, TCGArg imm)
3628{
3629    TCGv_vec t1, t2;
3630
3631    switch (vece) {
3632    case MO_8:
3633        /* Unpack to W, shift, and repack, as in expand_vec_shi.  */
3634        t1 = tcg_temp_new_vec(type);
3635        t2 = tcg_temp_new_vec(type);
3636        vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3637                  tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(v1));
3638        vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3639                  tcgv_vec_arg(t2), tcgv_vec_arg(v1), tcgv_vec_arg(v1));
3640        tcg_gen_sari_vec(MO_16, t1, t1, imm + 8);
3641        tcg_gen_sari_vec(MO_16, t2, t2, imm + 8);
3642        vec_gen_3(INDEX_op_x86_packss_vec, type, MO_8,
3643                  tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3644        tcg_temp_free_vec(t1);
3645        tcg_temp_free_vec(t2);
3646        break;
3647
3648    case MO_64:
3649        t1 = tcg_temp_new_vec(type);
3650        if (imm <= 32) {
3651            /*
3652             * We can emulate a small sign extend by performing an arithmetic
3653             * 32-bit shift and overwriting the high half of a 64-bit logical
3654             * shift.  Note that the ISA says shift of 32 is valid, but TCG
3655             * does not, so we have to bound the smaller shift -- we get the
3656             * same result in the high half either way.
3657             */
3658            tcg_gen_sari_vec(MO_32, t1, v1, MIN(imm, 31));
3659            tcg_gen_shri_vec(MO_64, v0, v1, imm);
3660            vec_gen_4(INDEX_op_x86_blend_vec, type, MO_32,
3661                      tcgv_vec_arg(v0), tcgv_vec_arg(v0),
3662                      tcgv_vec_arg(t1), 0xaa);
3663        } else {
3664            /* Otherwise we will need to use a compare vs 0 to produce
3665             * the sign-extend, shift and merge.
3666             */
3667            tcg_gen_cmp_vec(TCG_COND_GT, MO_64, t1,
3668                            tcg_constant_vec(type, MO_64, 0), v1);
3669            tcg_gen_shri_vec(MO_64, v0, v1, imm);
3670            tcg_gen_shli_vec(MO_64, t1, t1, 64 - imm);
3671            tcg_gen_or_vec(MO_64, v0, v0, t1);
3672        }
3673        tcg_temp_free_vec(t1);
3674        break;
3675
3676    default:
3677        g_assert_not_reached();
3678    }
3679}
3680
3681static void expand_vec_rotli(TCGType type, unsigned vece,
3682                             TCGv_vec v0, TCGv_vec v1, TCGArg imm)
3683{
3684    TCGv_vec t;
3685
3686    if (vece == MO_8) {
3687        expand_vec_shi(type, vece, INDEX_op_rotli_vec, v0, v1, imm);
3688        return;
3689    }
3690
3691    if (have_avx512vbmi2) {
3692        vec_gen_4(INDEX_op_x86_vpshldi_vec, type, vece,
3693                  tcgv_vec_arg(v0), tcgv_vec_arg(v1), tcgv_vec_arg(v1), imm);
3694        return;
3695    }
3696
3697    t = tcg_temp_new_vec(type);
3698    tcg_gen_shli_vec(vece, t, v1, imm);
3699    tcg_gen_shri_vec(vece, v0, v1, (8 << vece) - imm);
3700    tcg_gen_or_vec(vece, v0, v0, t);
3701    tcg_temp_free_vec(t);
3702}
3703
3704static void expand_vec_rotv(TCGType type, unsigned vece, TCGv_vec v0,
3705                            TCGv_vec v1, TCGv_vec sh, bool right)
3706{
3707    TCGv_vec t;
3708
3709    if (have_avx512vbmi2) {
3710        vec_gen_4(right ? INDEX_op_x86_vpshrdv_vec : INDEX_op_x86_vpshldv_vec,
3711                  type, vece, tcgv_vec_arg(v0), tcgv_vec_arg(v1),
3712                  tcgv_vec_arg(v1), tcgv_vec_arg(sh));
3713        return;
3714    }
3715
3716    t = tcg_temp_new_vec(type);
3717    tcg_gen_dupi_vec(vece, t, 8 << vece);
3718    tcg_gen_sub_vec(vece, t, t, sh);
3719    if (right) {
3720        tcg_gen_shlv_vec(vece, t, v1, t);
3721        tcg_gen_shrv_vec(vece, v0, v1, sh);
3722    } else {
3723        tcg_gen_shrv_vec(vece, t, v1, t);
3724        tcg_gen_shlv_vec(vece, v0, v1, sh);
3725    }
3726    tcg_gen_or_vec(vece, v0, v0, t);
3727    tcg_temp_free_vec(t);
3728}
3729
3730static void expand_vec_rotls(TCGType type, unsigned vece,
3731                             TCGv_vec v0, TCGv_vec v1, TCGv_i32 lsh)
3732{
3733    TCGv_vec t = tcg_temp_new_vec(type);
3734
3735    tcg_debug_assert(vece != MO_8);
3736
3737    if (vece >= MO_32 ? have_avx512vl : have_avx512vbmi2) {
3738        tcg_gen_dup_i32_vec(vece, t, lsh);
3739        if (vece >= MO_32) {
3740            tcg_gen_rotlv_vec(vece, v0, v1, t);
3741        } else {
3742            expand_vec_rotv(type, vece, v0, v1, t, false);
3743        }
3744    } else {
3745        TCGv_i32 rsh = tcg_temp_new_i32();
3746
3747        tcg_gen_neg_i32(rsh, lsh);
3748        tcg_gen_andi_i32(rsh, rsh, (8 << vece) - 1);
3749        tcg_gen_shls_vec(vece, t, v1, lsh);
3750        tcg_gen_shrs_vec(vece, v0, v1, rsh);
3751        tcg_gen_or_vec(vece, v0, v0, t);
3752
3753        tcg_temp_free_i32(rsh);
3754    }
3755
3756    tcg_temp_free_vec(t);
3757}
3758
3759static void expand_vec_mul(TCGType type, unsigned vece,
3760                           TCGv_vec v0, TCGv_vec v1, TCGv_vec v2)
3761{
3762    TCGv_vec t1, t2, t3, t4, zero;
3763
3764    tcg_debug_assert(vece == MO_8);
3765
3766    /*
3767     * Unpack v1 bytes to words, 0 | x.
3768     * Unpack v2 bytes to words, y | 0.
3769     * This leaves the 8-bit result, x * y, with 8 bits of right padding.
3770     * Shift logical right by 8 bits to clear the high 8 bytes before
3771     * using an unsigned saturated pack.
3772     *
3773     * The difference between the V64, V128 and V256 cases is merely how
3774     * we distribute the expansion between temporaries.
3775     */
3776    switch (type) {
3777    case TCG_TYPE_V64:
3778        t1 = tcg_temp_new_vec(TCG_TYPE_V128);
3779        t2 = tcg_temp_new_vec(TCG_TYPE_V128);
3780        zero = tcg_constant_vec(TCG_TYPE_V128, MO_8, 0);
3781        vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3782                  tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(zero));
3783        vec_gen_3(INDEX_op_x86_punpckl_vec, TCG_TYPE_V128, MO_8,
3784                  tcgv_vec_arg(t2), tcgv_vec_arg(zero), tcgv_vec_arg(v2));
3785        tcg_gen_mul_vec(MO_16, t1, t1, t2);
3786        tcg_gen_shri_vec(MO_16, t1, t1, 8);
3787        vec_gen_3(INDEX_op_x86_packus_vec, TCG_TYPE_V128, MO_8,
3788                  tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(t1));
3789        tcg_temp_free_vec(t1);
3790        tcg_temp_free_vec(t2);
3791        break;
3792
3793    case TCG_TYPE_V128:
3794    case TCG_TYPE_V256:
3795        t1 = tcg_temp_new_vec(type);
3796        t2 = tcg_temp_new_vec(type);
3797        t3 = tcg_temp_new_vec(type);
3798        t4 = tcg_temp_new_vec(type);
3799        zero = tcg_constant_vec(TCG_TYPE_V128, MO_8, 0);
3800        vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3801                  tcgv_vec_arg(t1), tcgv_vec_arg(v1), tcgv_vec_arg(zero));
3802        vec_gen_3(INDEX_op_x86_punpckl_vec, type, MO_8,
3803                  tcgv_vec_arg(t2), tcgv_vec_arg(zero), tcgv_vec_arg(v2));
3804        vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3805                  tcgv_vec_arg(t3), tcgv_vec_arg(v1), tcgv_vec_arg(zero));
3806        vec_gen_3(INDEX_op_x86_punpckh_vec, type, MO_8,
3807                  tcgv_vec_arg(t4), tcgv_vec_arg(zero), tcgv_vec_arg(v2));
3808        tcg_gen_mul_vec(MO_16, t1, t1, t2);
3809        tcg_gen_mul_vec(MO_16, t3, t3, t4);
3810        tcg_gen_shri_vec(MO_16, t1, t1, 8);
3811        tcg_gen_shri_vec(MO_16, t3, t3, 8);
3812        vec_gen_3(INDEX_op_x86_packus_vec, type, MO_8,
3813                  tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(t3));
3814        tcg_temp_free_vec(t1);
3815        tcg_temp_free_vec(t2);
3816        tcg_temp_free_vec(t3);
3817        tcg_temp_free_vec(t4);
3818        break;
3819
3820    default:
3821        g_assert_not_reached();
3822    }
3823}
3824
3825static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
3826                                 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3827{
3828    enum {
3829        NEED_INV  = 1,
3830        NEED_SWAP = 2,
3831        NEED_BIAS = 4,
3832        NEED_UMIN = 8,
3833        NEED_UMAX = 16,
3834    };
3835    TCGv_vec t1, t2, t3;
3836    uint8_t fixup;
3837
3838    switch (cond) {
3839    case TCG_COND_EQ:
3840    case TCG_COND_GT:
3841        fixup = 0;
3842        break;
3843    case TCG_COND_NE:
3844    case TCG_COND_LE:
3845        fixup = NEED_INV;
3846        break;
3847    case TCG_COND_LT:
3848        fixup = NEED_SWAP;
3849        break;
3850    case TCG_COND_GE:
3851        fixup = NEED_SWAP | NEED_INV;
3852        break;
3853    case TCG_COND_LEU:
3854        if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) {
3855            fixup = NEED_UMIN;
3856        } else {
3857            fixup = NEED_BIAS | NEED_INV;
3858        }
3859        break;
3860    case TCG_COND_GTU:
3861        if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece)) {
3862            fixup = NEED_UMIN | NEED_INV;
3863        } else {
3864            fixup = NEED_BIAS;
3865        }
3866        break;
3867    case TCG_COND_GEU:
3868        if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) {
3869            fixup = NEED_UMAX;
3870        } else {
3871            fixup = NEED_BIAS | NEED_SWAP | NEED_INV;
3872        }
3873        break;
3874    case TCG_COND_LTU:
3875        if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece)) {
3876            fixup = NEED_UMAX | NEED_INV;
3877        } else {
3878            fixup = NEED_BIAS | NEED_SWAP;
3879        }
3880        break;
3881    default:
3882        g_assert_not_reached();
3883    }
3884
3885    if (fixup & NEED_INV) {
3886        cond = tcg_invert_cond(cond);
3887    }
3888    if (fixup & NEED_SWAP) {
3889        t1 = v1, v1 = v2, v2 = t1;
3890        cond = tcg_swap_cond(cond);
3891    }
3892
3893    t1 = t2 = NULL;
3894    if (fixup & (NEED_UMIN | NEED_UMAX)) {
3895        t1 = tcg_temp_new_vec(type);
3896        if (fixup & NEED_UMIN) {
3897            tcg_gen_umin_vec(vece, t1, v1, v2);
3898        } else {
3899            tcg_gen_umax_vec(vece, t1, v1, v2);
3900        }
3901        v2 = t1;
3902        cond = TCG_COND_EQ;
3903    } else if (fixup & NEED_BIAS) {
3904        t1 = tcg_temp_new_vec(type);
3905        t2 = tcg_temp_new_vec(type);
3906        t3 = tcg_constant_vec(type, vece, 1ull << ((8 << vece) - 1));
3907        tcg_gen_sub_vec(vece, t1, v1, t3);
3908        tcg_gen_sub_vec(vece, t2, v2, t3);
3909        v1 = t1;
3910        v2 = t2;
3911        cond = tcg_signed_cond(cond);
3912    }
3913
3914    tcg_debug_assert(cond == TCG_COND_EQ || cond == TCG_COND_GT);
3915    /* Expand directly; do not recurse.  */
3916    vec_gen_4(INDEX_op_cmp_vec, type, vece,
3917              tcgv_vec_arg(v0), tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3918
3919    if (t1) {
3920        tcg_temp_free_vec(t1);
3921        if (t2) {
3922            tcg_temp_free_vec(t2);
3923        }
3924    }
3925    return fixup & NEED_INV;
3926}
3927
3928static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3929                           TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3930{
3931    if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) {
3932        tcg_gen_not_vec(vece, v0, v0);
3933    }
3934}
3935
3936static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0,
3937                              TCGv_vec c1, TCGv_vec c2,
3938                              TCGv_vec v3, TCGv_vec v4, TCGCond cond)
3939{
3940    TCGv_vec t = tcg_temp_new_vec(type);
3941
3942    if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) {
3943        /* Invert the sense of the compare by swapping arguments.  */
3944        TCGv_vec x;
3945        x = v3, v3 = v4, v4 = x;
3946    }
3947    vec_gen_4(INDEX_op_x86_vpblendvb_vec, type, vece,
3948              tcgv_vec_arg(v0), tcgv_vec_arg(v4),
3949              tcgv_vec_arg(v3), tcgv_vec_arg(t));
3950    tcg_temp_free_vec(t);
3951}
3952
3953void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3954                       TCGArg a0, ...)
3955{
3956    va_list va;
3957    TCGArg a2;
3958    TCGv_vec v0, v1, v2, v3, v4;
3959
3960    va_start(va, a0);
3961    v0 = temp_tcgv_vec(arg_temp(a0));
3962    v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3963    a2 = va_arg(va, TCGArg);
3964
3965    switch (opc) {
3966    case INDEX_op_shli_vec:
3967    case INDEX_op_shri_vec:
3968        expand_vec_shi(type, vece, opc, v0, v1, a2);
3969        break;
3970
3971    case INDEX_op_sari_vec:
3972        expand_vec_sari(type, vece, v0, v1, a2);
3973        break;
3974
3975    case INDEX_op_rotli_vec:
3976        expand_vec_rotli(type, vece, v0, v1, a2);
3977        break;
3978
3979    case INDEX_op_rotls_vec:
3980        expand_vec_rotls(type, vece, v0, v1, temp_tcgv_i32(arg_temp(a2)));
3981        break;
3982
3983    case INDEX_op_rotlv_vec:
3984        v2 = temp_tcgv_vec(arg_temp(a2));
3985        expand_vec_rotv(type, vece, v0, v1, v2, false);
3986        break;
3987    case INDEX_op_rotrv_vec:
3988        v2 = temp_tcgv_vec(arg_temp(a2));
3989        expand_vec_rotv(type, vece, v0, v1, v2, true);
3990        break;
3991
3992    case INDEX_op_mul_vec:
3993        v2 = temp_tcgv_vec(arg_temp(a2));
3994        expand_vec_mul(type, vece, v0, v1, v2);
3995        break;
3996
3997    case INDEX_op_cmp_vec:
3998        v2 = temp_tcgv_vec(arg_temp(a2));
3999        expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
4000        break;
4001
4002    case INDEX_op_cmpsel_vec:
4003        v2 = temp_tcgv_vec(arg_temp(a2));
4004        v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
4005        v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
4006        expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg));
4007        break;
4008
4009    default:
4010        break;
4011    }
4012
4013    va_end(va);
4014}
4015
4016static const int tcg_target_callee_save_regs[] = {
4017#if TCG_TARGET_REG_BITS == 64
4018    TCG_REG_RBP,
4019    TCG_REG_RBX,
4020#if defined(_WIN64)
4021    TCG_REG_RDI,
4022    TCG_REG_RSI,
4023#endif
4024    TCG_REG_R12,
4025    TCG_REG_R13,
4026    TCG_REG_R14, /* Currently used for the global env. */
4027    TCG_REG_R15,
4028#else
4029    TCG_REG_EBP, /* Currently used for the global env. */
4030    TCG_REG_EBX,
4031    TCG_REG_ESI,
4032    TCG_REG_EDI,
4033#endif
4034};
4035
4036/* Compute frame size via macros, to share between tcg_target_qemu_prologue
4037   and tcg_register_jit.  */
4038
4039#define PUSH_SIZE \
4040    ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
4041     * (TCG_TARGET_REG_BITS / 8))
4042
4043#define FRAME_SIZE \
4044    ((PUSH_SIZE \
4045      + TCG_STATIC_CALL_ARGS_SIZE \
4046      + CPU_TEMP_BUF_NLONGS * sizeof(long) \
4047      + TCG_TARGET_STACK_ALIGN - 1) \
4048     & ~(TCG_TARGET_STACK_ALIGN - 1))
4049
4050/* Generate global QEMU prologue and epilogue code */
4051static void tcg_target_qemu_prologue(TCGContext *s)
4052{
4053    int i, stack_addend;
4054
4055    /* TB prologue */
4056
4057    /* Reserve some stack space, also for TCG temps.  */
4058    stack_addend = FRAME_SIZE - PUSH_SIZE;
4059    tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
4060                  CPU_TEMP_BUF_NLONGS * sizeof(long));
4061
4062    /* Save all callee saved registers.  */
4063    for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
4064        tcg_out_push(s, tcg_target_callee_save_regs[i]);
4065    }
4066
4067#if TCG_TARGET_REG_BITS == 32
4068    tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
4069               (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
4070    tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
4071    /* jmp *tb.  */
4072    tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
4073                         (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
4074                         + stack_addend);
4075#else
4076# if !defined(CONFIG_SOFTMMU) && TCG_TARGET_REG_BITS == 64
4077    if (guest_base) {
4078        int seg = setup_guest_base_seg();
4079        if (seg != 0) {
4080            x86_guest_base_seg = seg;
4081        } else if (guest_base == (int32_t)guest_base) {
4082            x86_guest_base_offset = guest_base;
4083        } else {
4084            /* Choose R12 because, as a base, it requires a SIB byte. */
4085            x86_guest_base_index = TCG_REG_R12;
4086            tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base_index, guest_base);
4087            tcg_regset_set_reg(s->reserved_regs, x86_guest_base_index);
4088        }
4089    }
4090# endif
4091    tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
4092    tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
4093    /* jmp *tb.  */
4094    tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
4095#endif
4096
4097    /*
4098     * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
4099     * and fall through to the rest of the epilogue.
4100     */
4101    tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
4102    tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_EAX, 0);
4103
4104    /* TB epilogue */
4105    tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
4106
4107    tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend);
4108
4109    if (have_avx2) {
4110        tcg_out_vex_opc(s, OPC_VZEROUPPER, 0, 0, 0, 0);
4111    }
4112    for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
4113        tcg_out_pop(s, tcg_target_callee_save_regs[i]);
4114    }
4115    tcg_out_opc(s, OPC_RET, 0, 0, 0);
4116}
4117
4118static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
4119{
4120    memset(p, 0x90, count);
4121}
4122
4123static void tcg_target_init(TCGContext *s)
4124{
4125#ifdef CONFIG_CPUID_H
4126    unsigned a, b, c, d, b7 = 0, c7 = 0;
4127    unsigned max = __get_cpuid_max(0, 0);
4128
4129    if (max >= 7) {
4130        /* BMI1 is available on AMD Piledriver and Intel Haswell CPUs.  */
4131        __cpuid_count(7, 0, a, b7, c7, d);
4132        have_bmi1 = (b7 & bit_BMI) != 0;
4133        have_bmi2 = (b7 & bit_BMI2) != 0;
4134    }
4135
4136    if (max >= 1) {
4137        __cpuid(1, a, b, c, d);
4138#ifndef have_cmov
4139        /* For 32-bit, 99% certainty that we're running on hardware that
4140           supports cmov, but we still need to check.  In case cmov is not
4141           available, we'll use a small forward branch.  */
4142        have_cmov = (d & bit_CMOV) != 0;
4143#endif
4144
4145        /* MOVBE is only available on Intel Atom and Haswell CPUs, so we
4146           need to probe for it.  */
4147        have_movbe = (c & bit_MOVBE) != 0;
4148        have_popcnt = (c & bit_POPCNT) != 0;
4149
4150        /* There are a number of things we must check before we can be
4151           sure of not hitting invalid opcode.  */
4152        if (c & bit_OSXSAVE) {
4153            unsigned bv = xgetbv_low(0);
4154
4155            if ((bv & 6) == 6) {
4156                have_avx1 = (c & bit_AVX) != 0;
4157                have_avx2 = (b7 & bit_AVX2) != 0;
4158
4159                /*
4160                 * There are interesting instructions in AVX512, so long
4161                 * as we have AVX512VL, which indicates support for EVEX
4162                 * on sizes smaller than 512 bits.  We are required to
4163                 * check that OPMASK and all extended ZMM state are enabled
4164                 * even if we're not using them -- the insns will fault.
4165                 */
4166                if ((bv & 0xe0) == 0xe0
4167                    && (b7 & bit_AVX512F)
4168                    && (b7 & bit_AVX512VL)) {
4169                    have_avx512vl = true;
4170                    have_avx512bw = (b7 & bit_AVX512BW) != 0;
4171                    have_avx512dq = (b7 & bit_AVX512DQ) != 0;
4172                    have_avx512vbmi2 = (c7 & bit_AVX512VBMI2) != 0;
4173                }
4174            }
4175        }
4176    }
4177
4178    max = __get_cpuid_max(0x8000000, 0);
4179    if (max >= 1) {
4180        __cpuid(0x80000001, a, b, c, d);
4181        /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs.  */
4182        have_lzcnt = (c & bit_LZCNT) != 0;
4183    }
4184#endif /* CONFIG_CPUID_H */
4185
4186    tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
4187    if (TCG_TARGET_REG_BITS == 64) {
4188        tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
4189    }
4190    if (have_avx1) {
4191        tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
4192        tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
4193    }
4194    if (have_avx2) {
4195        tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
4196    }
4197
4198    tcg_target_call_clobber_regs = ALL_VECTOR_REGS;
4199    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
4200    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
4201    tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
4202    if (TCG_TARGET_REG_BITS == 64) {
4203#if !defined(_WIN64)
4204        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI);
4205        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI);
4206#endif
4207        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
4208        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
4209        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
4210        tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
4211    }
4212
4213    s->reserved_regs = 0;
4214    tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
4215#ifdef _WIN64
4216    /* These are call saved, and we don't save them, so don't use them. */
4217    tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM6);
4218    tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM7);
4219    tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM8);
4220    tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM9);
4221    tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM10);
4222    tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM11);
4223    tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM12);
4224    tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM13);
4225    tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM14);
4226    tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM15);
4227#endif
4228}
4229
4230typedef struct {
4231    DebugFrameHeader h;
4232    uint8_t fde_def_cfa[4];
4233    uint8_t fde_reg_ofs[14];
4234} DebugFrame;
4235
4236/* We're expecting a 2 byte uleb128 encoded value.  */
4237QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
4238
4239#if !defined(__ELF__)
4240    /* Host machine without ELF. */
4241#elif TCG_TARGET_REG_BITS == 64
4242#define ELF_HOST_MACHINE EM_X86_64
4243static const DebugFrame debug_frame = {
4244    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
4245    .h.cie.id = -1,
4246    .h.cie.version = 1,
4247    .h.cie.code_align = 1,
4248    .h.cie.data_align = 0x78,             /* sleb128 -8 */
4249    .h.cie.return_column = 16,
4250
4251    /* Total FDE size does not include the "len" member.  */
4252    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
4253
4254    .fde_def_cfa = {
4255        12, 7,                          /* DW_CFA_def_cfa %rsp, ... */
4256        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
4257        (FRAME_SIZE >> 7)
4258    },
4259    .fde_reg_ofs = {
4260        0x90, 1,                        /* DW_CFA_offset, %rip, -8 */
4261        /* The following ordering must match tcg_target_callee_save_regs.  */
4262        0x86, 2,                        /* DW_CFA_offset, %rbp, -16 */
4263        0x83, 3,                        /* DW_CFA_offset, %rbx, -24 */
4264        0x8c, 4,                        /* DW_CFA_offset, %r12, -32 */
4265        0x8d, 5,                        /* DW_CFA_offset, %r13, -40 */
4266        0x8e, 6,                        /* DW_CFA_offset, %r14, -48 */
4267        0x8f, 7,                        /* DW_CFA_offset, %r15, -56 */
4268    }
4269};
4270#else
4271#define ELF_HOST_MACHINE EM_386
4272static const DebugFrame debug_frame = {
4273    .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
4274    .h.cie.id = -1,
4275    .h.cie.version = 1,
4276    .h.cie.code_align = 1,
4277    .h.cie.data_align = 0x7c,             /* sleb128 -4 */
4278    .h.cie.return_column = 8,
4279
4280    /* Total FDE size does not include the "len" member.  */
4281    .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
4282
4283    .fde_def_cfa = {
4284        12, 4,                          /* DW_CFA_def_cfa %esp, ... */
4285        (FRAME_SIZE & 0x7f) | 0x80,     /* ... uleb128 FRAME_SIZE */
4286        (FRAME_SIZE >> 7)
4287    },
4288    .fde_reg_ofs = {
4289        0x88, 1,                        /* DW_CFA_offset, %eip, -4 */
4290        /* The following ordering must match tcg_target_callee_save_regs.  */
4291        0x85, 2,                        /* DW_CFA_offset, %ebp, -8 */
4292        0x83, 3,                        /* DW_CFA_offset, %ebx, -12 */
4293        0x86, 4,                        /* DW_CFA_offset, %esi, -16 */
4294        0x87, 5,                        /* DW_CFA_offset, %edi, -20 */
4295    }
4296};
4297#endif
4298
4299#if defined(ELF_HOST_MACHINE)
4300void tcg_register_jit(const void *buf, size_t buf_size)
4301{
4302    tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
4303}
4304#endif
4305