xref: /qemu/include/tcg/tcg.h (revision 785ea711)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #ifndef TCG_H
26 #define TCG_H
27 
28 #include "cpu.h"
29 #include "exec/memop.h"
30 #include "qemu/bitops.h"
31 #include "qemu/plugin.h"
32 #include "qemu/queue.h"
33 #include "tcg/tcg-mo.h"
34 #include "tcg-target.h"
35 #include "qemu/int128.h"
36 #include "tcg/tcg-cond.h"
37 
38 /* XXX: make safe guess about sizes */
39 #define MAX_OP_PER_INSTR 266
40 
41 #if HOST_LONG_BITS == 32
42 #define MAX_OPC_PARAM_PER_ARG 2
43 #else
44 #define MAX_OPC_PARAM_PER_ARG 1
45 #endif
46 #define MAX_OPC_PARAM_IARGS 6
47 #define MAX_OPC_PARAM_OARGS 1
48 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
49 
50 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
51  * and up to 4 + N parameters on 64-bit archs
52  * (N = number of input arguments + output arguments).  */
53 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
54 
55 #define CPU_TEMP_BUF_NLONGS 128
56 #define TCG_STATIC_FRAME_SIZE  (CPU_TEMP_BUF_NLONGS * sizeof(long))
57 
58 /* Default target word size to pointer size.  */
59 #ifndef TCG_TARGET_REG_BITS
60 # if UINTPTR_MAX == UINT32_MAX
61 #  define TCG_TARGET_REG_BITS 32
62 # elif UINTPTR_MAX == UINT64_MAX
63 #  define TCG_TARGET_REG_BITS 64
64 # else
65 #  error Unknown pointer size for tcg target
66 # endif
67 #endif
68 
69 #if TCG_TARGET_REG_BITS == 32
70 typedef int32_t tcg_target_long;
71 typedef uint32_t tcg_target_ulong;
72 #define TCG_PRIlx PRIx32
73 #define TCG_PRIld PRId32
74 #elif TCG_TARGET_REG_BITS == 64
75 typedef int64_t tcg_target_long;
76 typedef uint64_t tcg_target_ulong;
77 #define TCG_PRIlx PRIx64
78 #define TCG_PRIld PRId64
79 #else
80 #error unsupported
81 #endif
82 
83 /* Oversized TCG guests make things like MTTCG hard
84  * as we can't use atomics for cputlb updates.
85  */
86 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
87 #define TCG_OVERSIZED_GUEST 1
88 #else
89 #define TCG_OVERSIZED_GUEST 0
90 #endif
91 
92 #if TCG_TARGET_NB_REGS <= 32
93 typedef uint32_t TCGRegSet;
94 #elif TCG_TARGET_NB_REGS <= 64
95 typedef uint64_t TCGRegSet;
96 #else
97 #error unsupported
98 #endif
99 
100 #if TCG_TARGET_REG_BITS == 32
101 /* Turn some undef macros into false macros.  */
102 #define TCG_TARGET_HAS_extrl_i64_i32    0
103 #define TCG_TARGET_HAS_extrh_i64_i32    0
104 #define TCG_TARGET_HAS_div_i64          0
105 #define TCG_TARGET_HAS_rem_i64          0
106 #define TCG_TARGET_HAS_div2_i64         0
107 #define TCG_TARGET_HAS_rot_i64          0
108 #define TCG_TARGET_HAS_ext8s_i64        0
109 #define TCG_TARGET_HAS_ext16s_i64       0
110 #define TCG_TARGET_HAS_ext32s_i64       0
111 #define TCG_TARGET_HAS_ext8u_i64        0
112 #define TCG_TARGET_HAS_ext16u_i64       0
113 #define TCG_TARGET_HAS_ext32u_i64       0
114 #define TCG_TARGET_HAS_bswap16_i64      0
115 #define TCG_TARGET_HAS_bswap32_i64      0
116 #define TCG_TARGET_HAS_bswap64_i64      0
117 #define TCG_TARGET_HAS_neg_i64          0
118 #define TCG_TARGET_HAS_not_i64          0
119 #define TCG_TARGET_HAS_andc_i64         0
120 #define TCG_TARGET_HAS_orc_i64          0
121 #define TCG_TARGET_HAS_eqv_i64          0
122 #define TCG_TARGET_HAS_nand_i64         0
123 #define TCG_TARGET_HAS_nor_i64          0
124 #define TCG_TARGET_HAS_clz_i64          0
125 #define TCG_TARGET_HAS_ctz_i64          0
126 #define TCG_TARGET_HAS_ctpop_i64        0
127 #define TCG_TARGET_HAS_deposit_i64      0
128 #define TCG_TARGET_HAS_extract_i64      0
129 #define TCG_TARGET_HAS_sextract_i64     0
130 #define TCG_TARGET_HAS_extract2_i64     0
131 #define TCG_TARGET_HAS_movcond_i64      0
132 #define TCG_TARGET_HAS_add2_i64         0
133 #define TCG_TARGET_HAS_sub2_i64         0
134 #define TCG_TARGET_HAS_mulu2_i64        0
135 #define TCG_TARGET_HAS_muls2_i64        0
136 #define TCG_TARGET_HAS_muluh_i64        0
137 #define TCG_TARGET_HAS_mulsh_i64        0
138 /* Turn some undef macros into true macros.  */
139 #define TCG_TARGET_HAS_add2_i32         1
140 #define TCG_TARGET_HAS_sub2_i32         1
141 #endif
142 
143 #ifndef TCG_TARGET_deposit_i32_valid
144 #define TCG_TARGET_deposit_i32_valid(ofs, len) 1
145 #endif
146 #ifndef TCG_TARGET_deposit_i64_valid
147 #define TCG_TARGET_deposit_i64_valid(ofs, len) 1
148 #endif
149 #ifndef TCG_TARGET_extract_i32_valid
150 #define TCG_TARGET_extract_i32_valid(ofs, len) 1
151 #endif
152 #ifndef TCG_TARGET_extract_i64_valid
153 #define TCG_TARGET_extract_i64_valid(ofs, len) 1
154 #endif
155 
156 /* Only one of DIV or DIV2 should be defined.  */
157 #if defined(TCG_TARGET_HAS_div_i32)
158 #define TCG_TARGET_HAS_div2_i32         0
159 #elif defined(TCG_TARGET_HAS_div2_i32)
160 #define TCG_TARGET_HAS_div_i32          0
161 #define TCG_TARGET_HAS_rem_i32          0
162 #endif
163 #if defined(TCG_TARGET_HAS_div_i64)
164 #define TCG_TARGET_HAS_div2_i64         0
165 #elif defined(TCG_TARGET_HAS_div2_i64)
166 #define TCG_TARGET_HAS_div_i64          0
167 #define TCG_TARGET_HAS_rem_i64          0
168 #endif
169 
170 /* For 32-bit targets, some sort of unsigned widening multiply is required.  */
171 #if TCG_TARGET_REG_BITS == 32 \
172     && !(defined(TCG_TARGET_HAS_mulu2_i32) \
173          || defined(TCG_TARGET_HAS_muluh_i32))
174 # error "Missing unsigned widening multiply"
175 #endif
176 
177 #if !defined(TCG_TARGET_HAS_v64) \
178     && !defined(TCG_TARGET_HAS_v128) \
179     && !defined(TCG_TARGET_HAS_v256)
180 #define TCG_TARGET_MAYBE_vec            0
181 #define TCG_TARGET_HAS_abs_vec          0
182 #define TCG_TARGET_HAS_neg_vec          0
183 #define TCG_TARGET_HAS_not_vec          0
184 #define TCG_TARGET_HAS_andc_vec         0
185 #define TCG_TARGET_HAS_orc_vec          0
186 #define TCG_TARGET_HAS_roti_vec         0
187 #define TCG_TARGET_HAS_rots_vec         0
188 #define TCG_TARGET_HAS_rotv_vec         0
189 #define TCG_TARGET_HAS_shi_vec          0
190 #define TCG_TARGET_HAS_shs_vec          0
191 #define TCG_TARGET_HAS_shv_vec          0
192 #define TCG_TARGET_HAS_mul_vec          0
193 #define TCG_TARGET_HAS_sat_vec          0
194 #define TCG_TARGET_HAS_minmax_vec       0
195 #define TCG_TARGET_HAS_bitsel_vec       0
196 #define TCG_TARGET_HAS_cmpsel_vec       0
197 #else
198 #define TCG_TARGET_MAYBE_vec            1
199 #endif
200 #ifndef TCG_TARGET_HAS_v64
201 #define TCG_TARGET_HAS_v64              0
202 #endif
203 #ifndef TCG_TARGET_HAS_v128
204 #define TCG_TARGET_HAS_v128             0
205 #endif
206 #ifndef TCG_TARGET_HAS_v256
207 #define TCG_TARGET_HAS_v256             0
208 #endif
209 
210 #ifndef TARGET_INSN_START_EXTRA_WORDS
211 # define TARGET_INSN_START_WORDS 1
212 #else
213 # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
214 #endif
215 
216 typedef enum TCGOpcode {
217 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
218 #include "tcg/tcg-opc.h"
219 #undef DEF
220     NB_OPS,
221 } TCGOpcode;
222 
223 #define tcg_regset_set_reg(d, r)   ((d) |= (TCGRegSet)1 << (r))
224 #define tcg_regset_reset_reg(d, r) ((d) &= ~((TCGRegSet)1 << (r)))
225 #define tcg_regset_test_reg(d, r)  (((d) >> (r)) & 1)
226 
227 #ifndef TCG_TARGET_INSN_UNIT_SIZE
228 # error "Missing TCG_TARGET_INSN_UNIT_SIZE"
229 #elif TCG_TARGET_INSN_UNIT_SIZE == 1
230 typedef uint8_t tcg_insn_unit;
231 #elif TCG_TARGET_INSN_UNIT_SIZE == 2
232 typedef uint16_t tcg_insn_unit;
233 #elif TCG_TARGET_INSN_UNIT_SIZE == 4
234 typedef uint32_t tcg_insn_unit;
235 #elif TCG_TARGET_INSN_UNIT_SIZE == 8
236 typedef uint64_t tcg_insn_unit;
237 #else
238 /* The port better have done this.  */
239 #endif
240 
241 
242 #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
243 # define tcg_debug_assert(X) do { assert(X); } while (0)
244 #else
245 # define tcg_debug_assert(X) \
246     do { if (!(X)) { __builtin_unreachable(); } } while (0)
247 #endif
248 
249 typedef struct TCGRelocation TCGRelocation;
250 struct TCGRelocation {
251     QSIMPLEQ_ENTRY(TCGRelocation) next;
252     tcg_insn_unit *ptr;
253     intptr_t addend;
254     int type;
255 };
256 
257 typedef struct TCGLabel TCGLabel;
258 struct TCGLabel {
259     unsigned present : 1;
260     unsigned has_value : 1;
261     unsigned id : 14;
262     unsigned refs : 16;
263     union {
264         uintptr_t value;
265         const tcg_insn_unit *value_ptr;
266     } u;
267     QSIMPLEQ_HEAD(, TCGRelocation) relocs;
268     QSIMPLEQ_ENTRY(TCGLabel) next;
269 };
270 
271 typedef struct TCGPool {
272     struct TCGPool *next;
273     int size;
274     uint8_t data[] __attribute__ ((aligned));
275 } TCGPool;
276 
277 #define TCG_POOL_CHUNK_SIZE 32768
278 
279 #define TCG_MAX_TEMPS 512
280 #define TCG_MAX_INSNS 512
281 
282 /* when the size of the arguments of a called function is smaller than
283    this value, they are statically allocated in the TB stack frame */
284 #define TCG_STATIC_CALL_ARGS_SIZE 128
285 
286 typedef enum TCGType {
287     TCG_TYPE_I32,
288     TCG_TYPE_I64,
289 
290     TCG_TYPE_V64,
291     TCG_TYPE_V128,
292     TCG_TYPE_V256,
293 
294     TCG_TYPE_COUNT, /* number of different types */
295 
296     /* An alias for the size of the host register.  */
297 #if TCG_TARGET_REG_BITS == 32
298     TCG_TYPE_REG = TCG_TYPE_I32,
299 #else
300     TCG_TYPE_REG = TCG_TYPE_I64,
301 #endif
302 
303     /* An alias for the size of the native pointer.  */
304 #if UINTPTR_MAX == UINT32_MAX
305     TCG_TYPE_PTR = TCG_TYPE_I32,
306 #else
307     TCG_TYPE_PTR = TCG_TYPE_I64,
308 #endif
309 
310     /* An alias for the size of the target "long", aka register.  */
311 #if TARGET_LONG_BITS == 64
312     TCG_TYPE_TL = TCG_TYPE_I64,
313 #else
314     TCG_TYPE_TL = TCG_TYPE_I32,
315 #endif
316 } TCGType;
317 
318 /**
319  * get_alignment_bits
320  * @memop: MemOp value
321  *
322  * Extract the alignment size from the memop.
323  */
324 static inline unsigned get_alignment_bits(MemOp memop)
325 {
326     unsigned a = memop & MO_AMASK;
327 
328     if (a == MO_UNALN) {
329         /* No alignment required.  */
330         a = 0;
331     } else if (a == MO_ALIGN) {
332         /* A natural alignment requirement.  */
333         a = memop & MO_SIZE;
334     } else {
335         /* A specific alignment requirement.  */
336         a = a >> MO_ASHIFT;
337     }
338 #if defined(CONFIG_SOFTMMU)
339     /* The requested alignment cannot overlap the TLB flags.  */
340     tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
341 #endif
342     return a;
343 }
344 
345 typedef tcg_target_ulong TCGArg;
346 
347 /* Define type and accessor macros for TCG variables.
348 
349    TCG variables are the inputs and outputs of TCG ops, as described
350    in tcg/README. Target CPU front-end code uses these types to deal
351    with TCG variables as it emits TCG code via the tcg_gen_* functions.
352    They come in several flavours:
353     * TCGv_i32 : 32 bit integer type
354     * TCGv_i64 : 64 bit integer type
355     * TCGv_ptr : a host pointer type
356     * TCGv_vec : a host vector type; the exact size is not exposed
357                  to the CPU front-end code.
358     * TCGv : an integer type the same size as target_ulong
359              (an alias for either TCGv_i32 or TCGv_i64)
360    The compiler's type checking will complain if you mix them
361    up and pass the wrong sized TCGv to a function.
362 
363    Users of tcg_gen_* don't need to know about any of the internal
364    details of these, and should treat them as opaque types.
365    You won't be able to look inside them in a debugger either.
366 
367    Internal implementation details follow:
368 
369    Note that there is no definition of the structs TCGv_i32_d etc anywhere.
370    This is deliberate, because the values we store in variables of type
371    TCGv_i32 are not really pointers-to-structures. They're just small
372    integers, but keeping them in pointer types like this means that the
373    compiler will complain if you accidentally pass a TCGv_i32 to a
374    function which takes a TCGv_i64, and so on. Only the internals of
375    TCG need to care about the actual contents of the types.  */
376 
377 typedef struct TCGv_i32_d *TCGv_i32;
378 typedef struct TCGv_i64_d *TCGv_i64;
379 typedef struct TCGv_ptr_d *TCGv_ptr;
380 typedef struct TCGv_vec_d *TCGv_vec;
381 typedef TCGv_ptr TCGv_env;
382 #if TARGET_LONG_BITS == 32
383 #define TCGv TCGv_i32
384 #elif TARGET_LONG_BITS == 64
385 #define TCGv TCGv_i64
386 #else
387 #error Unhandled TARGET_LONG_BITS value
388 #endif
389 
390 /* call flags */
391 /* Helper does not read globals (either directly or through an exception). It
392    implies TCG_CALL_NO_WRITE_GLOBALS. */
393 #define TCG_CALL_NO_READ_GLOBALS    0x0001
394 /* Helper does not write globals */
395 #define TCG_CALL_NO_WRITE_GLOBALS   0x0002
396 /* Helper can be safely suppressed if the return value is not used. */
397 #define TCG_CALL_NO_SIDE_EFFECTS    0x0004
398 /* Helper is QEMU_NORETURN.  */
399 #define TCG_CALL_NO_RETURN          0x0008
400 
401 /* convenience version of most used call flags */
402 #define TCG_CALL_NO_RWG         TCG_CALL_NO_READ_GLOBALS
403 #define TCG_CALL_NO_WG          TCG_CALL_NO_WRITE_GLOBALS
404 #define TCG_CALL_NO_SE          TCG_CALL_NO_SIDE_EFFECTS
405 #define TCG_CALL_NO_RWG_SE      (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
406 #define TCG_CALL_NO_WG_SE       (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
407 
408 /* Used to align parameters.  See the comment before tcgv_i32_temp.  */
409 #define TCG_CALL_DUMMY_ARG      ((TCGArg)0)
410 
411 /*
412  * Flags for the bswap opcodes.
413  * If IZ, the input is zero-extended, otherwise unknown.
414  * If OZ or OS, the output is zero- or sign-extended respectively,
415  * otherwise the high bits are undefined.
416  */
417 enum {
418     TCG_BSWAP_IZ = 1,
419     TCG_BSWAP_OZ = 2,
420     TCG_BSWAP_OS = 4,
421 };
422 
423 typedef enum TCGTempVal {
424     TEMP_VAL_DEAD,
425     TEMP_VAL_REG,
426     TEMP_VAL_MEM,
427     TEMP_VAL_CONST,
428 } TCGTempVal;
429 
430 typedef enum TCGTempKind {
431     /* Temp is dead at the end of all basic blocks. */
432     TEMP_NORMAL,
433     /* Temp is saved across basic blocks but dead at the end of TBs. */
434     TEMP_LOCAL,
435     /* Temp is saved across both basic blocks and translation blocks. */
436     TEMP_GLOBAL,
437     /* Temp is in a fixed register. */
438     TEMP_FIXED,
439     /* Temp is a fixed constant. */
440     TEMP_CONST,
441 } TCGTempKind;
442 
443 typedef struct TCGTemp {
444     TCGReg reg:8;
445     TCGTempVal val_type:8;
446     TCGType base_type:8;
447     TCGType type:8;
448     TCGTempKind kind:3;
449     unsigned int indirect_reg:1;
450     unsigned int indirect_base:1;
451     unsigned int mem_coherent:1;
452     unsigned int mem_allocated:1;
453     unsigned int temp_allocated:1;
454 
455     int64_t val;
456     struct TCGTemp *mem_base;
457     intptr_t mem_offset;
458     const char *name;
459 
460     /* Pass-specific information that can be stored for a temporary.
461        One word worth of integer data, and one pointer to data
462        allocated separately.  */
463     uintptr_t state;
464     void *state_ptr;
465 } TCGTemp;
466 
467 typedef struct TCGContext TCGContext;
468 
469 typedef struct TCGTempSet {
470     unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
471 } TCGTempSet;
472 
473 /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
474    this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
475    There are never more than 2 outputs, which means that we can store all
476    dead + sync data within 16 bits.  */
477 #define DEAD_ARG  4
478 #define SYNC_ARG  1
479 typedef uint16_t TCGLifeData;
480 
481 /* The layout here is designed to avoid a bitfield crossing of
482    a 32-bit boundary, which would cause GCC to add extra padding.  */
483 typedef struct TCGOp {
484     TCGOpcode opc   : 8;        /*  8 */
485 
486     /* Parameters for this opcode.  See below.  */
487     unsigned param1 : 4;        /* 12 */
488     unsigned param2 : 4;        /* 16 */
489 
490     /* Lifetime data of the operands.  */
491     unsigned life   : 16;       /* 32 */
492 
493     /* Next and previous opcodes.  */
494     QTAILQ_ENTRY(TCGOp) link;
495 #ifdef CONFIG_PLUGIN
496     QSIMPLEQ_ENTRY(TCGOp) plugin_link;
497 #endif
498 
499     /* Arguments for the opcode.  */
500     TCGArg args[MAX_OPC_PARAM];
501 
502     /* Register preferences for the output(s).  */
503     TCGRegSet output_pref[2];
504 } TCGOp;
505 
506 #define TCGOP_CALLI(X)    (X)->param1
507 #define TCGOP_CALLO(X)    (X)->param2
508 
509 #define TCGOP_VECL(X)     (X)->param1
510 #define TCGOP_VECE(X)     (X)->param2
511 
512 /* Make sure operands fit in the bitfields above.  */
513 QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
514 
515 typedef struct TCGProfile {
516     int64_t cpu_exec_time;
517     int64_t tb_count1;
518     int64_t tb_count;
519     int64_t op_count; /* total insn count */
520     int op_count_max; /* max insn per TB */
521     int temp_count_max;
522     int64_t temp_count;
523     int64_t del_op_count;
524     int64_t code_in_len;
525     int64_t code_out_len;
526     int64_t search_out_len;
527     int64_t interm_time;
528     int64_t code_time;
529     int64_t la_time;
530     int64_t opt_time;
531     int64_t restore_count;
532     int64_t restore_time;
533     int64_t table_op_count[NB_OPS];
534 } TCGProfile;
535 
536 struct TCGContext {
537     uint8_t *pool_cur, *pool_end;
538     TCGPool *pool_first, *pool_current, *pool_first_large;
539     int nb_labels;
540     int nb_globals;
541     int nb_temps;
542     int nb_indirects;
543     int nb_ops;
544 
545     /* goto_tb support */
546     tcg_insn_unit *code_buf;
547     uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
548     uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
549     uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
550 
551     TCGRegSet reserved_regs;
552     uint32_t tb_cflags; /* cflags of the current TB */
553     intptr_t current_frame_offset;
554     intptr_t frame_start;
555     intptr_t frame_end;
556     TCGTemp *frame_temp;
557 
558     tcg_insn_unit *code_ptr;
559 
560 #ifdef CONFIG_PROFILER
561     TCGProfile prof;
562 #endif
563 
564 #ifdef CONFIG_DEBUG_TCG
565     int temps_in_use;
566     int goto_tb_issue_mask;
567     const TCGOpcode *vecop_list;
568 #endif
569 
570     /* Code generation.  Note that we specifically do not use tcg_insn_unit
571        here, because there's too much arithmetic throughout that relies
572        on addition and subtraction working on bytes.  Rely on the GCC
573        extension that allows arithmetic on void*.  */
574     void *code_gen_buffer;
575     size_t code_gen_buffer_size;
576     void *code_gen_ptr;
577     void *data_gen_ptr;
578 
579     /* Threshold to flush the translated code buffer.  */
580     void *code_gen_highwater;
581 
582     /* Track which vCPU triggers events */
583     CPUState *cpu;                      /* *_trans */
584 
585     /* These structures are private to tcg-target.c.inc.  */
586 #ifdef TCG_TARGET_NEED_LDST_LABELS
587     QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels;
588 #endif
589 #ifdef TCG_TARGET_NEED_POOL_LABELS
590     struct TCGLabelPoolData *pool_labels;
591 #endif
592 
593     TCGLabel *exitreq_label;
594 
595 #ifdef CONFIG_PLUGIN
596     /*
597      * We keep one plugin_tb struct per TCGContext. Note that on every TB
598      * translation we clear but do not free its contents; this way we
599      * avoid a lot of malloc/free churn, since after a few TB's it's
600      * unlikely that we'll need to allocate either more instructions or more
601      * space for instructions (for variable-instruction-length ISAs).
602      */
603     struct qemu_plugin_tb *plugin_tb;
604 
605     /* descriptor of the instruction being translated */
606     struct qemu_plugin_insn *plugin_insn;
607 
608     /* list to quickly access the injected ops */
609     QSIMPLEQ_HEAD(, TCGOp) plugin_ops;
610 #endif
611 
612     GHashTable *const_table[TCG_TYPE_COUNT];
613     TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
614     TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
615 
616     QTAILQ_HEAD(, TCGOp) ops, free_ops;
617     QSIMPLEQ_HEAD(, TCGLabel) labels;
618 
619     /* Tells which temporary holds a given register.
620        It does not take into account fixed registers */
621     TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
622 
623     uint16_t gen_insn_end_off[TCG_MAX_INSNS];
624     target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
625 
626     /* Exit to translator on overflow. */
627     sigjmp_buf jmp_trans;
628 };
629 
630 static inline bool temp_readonly(TCGTemp *ts)
631 {
632     return ts->kind >= TEMP_FIXED;
633 }
634 
635 extern __thread TCGContext *tcg_ctx;
636 extern const void *tcg_code_gen_epilogue;
637 extern uintptr_t tcg_splitwx_diff;
638 extern TCGv_env cpu_env;
639 
640 bool in_code_gen_buffer(const void *p);
641 
642 #ifdef CONFIG_DEBUG_TCG
643 const void *tcg_splitwx_to_rx(void *rw);
644 void *tcg_splitwx_to_rw(const void *rx);
645 #else
646 static inline const void *tcg_splitwx_to_rx(void *rw)
647 {
648     return rw ? rw + tcg_splitwx_diff : NULL;
649 }
650 
651 static inline void *tcg_splitwx_to_rw(const void *rx)
652 {
653     return rx ? (void *)rx - tcg_splitwx_diff : NULL;
654 }
655 #endif
656 
657 static inline size_t temp_idx(TCGTemp *ts)
658 {
659     ptrdiff_t n = ts - tcg_ctx->temps;
660     tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
661     return n;
662 }
663 
664 static inline TCGArg temp_arg(TCGTemp *ts)
665 {
666     return (uintptr_t)ts;
667 }
668 
669 static inline TCGTemp *arg_temp(TCGArg a)
670 {
671     return (TCGTemp *)(uintptr_t)a;
672 }
673 
674 /* Using the offset of a temporary, relative to TCGContext, rather than
675    its index means that we don't use 0.  That leaves offset 0 free for
676    a NULL representation without having to leave index 0 unused.  */
677 static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
678 {
679     uintptr_t o = (uintptr_t)v;
680     TCGTemp *t = (void *)tcg_ctx + o;
681     tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
682     return t;
683 }
684 
685 static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
686 {
687     return tcgv_i32_temp((TCGv_i32)v);
688 }
689 
690 static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
691 {
692     return tcgv_i32_temp((TCGv_i32)v);
693 }
694 
695 static inline TCGTemp *tcgv_vec_temp(TCGv_vec v)
696 {
697     return tcgv_i32_temp((TCGv_i32)v);
698 }
699 
700 static inline TCGArg tcgv_i32_arg(TCGv_i32 v)
701 {
702     return temp_arg(tcgv_i32_temp(v));
703 }
704 
705 static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
706 {
707     return temp_arg(tcgv_i64_temp(v));
708 }
709 
710 static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
711 {
712     return temp_arg(tcgv_ptr_temp(v));
713 }
714 
715 static inline TCGArg tcgv_vec_arg(TCGv_vec v)
716 {
717     return temp_arg(tcgv_vec_temp(v));
718 }
719 
720 static inline TCGv_i32 temp_tcgv_i32(TCGTemp *t)
721 {
722     (void)temp_idx(t); /* trigger embedded assert */
723     return (TCGv_i32)((void *)t - (void *)tcg_ctx);
724 }
725 
726 static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
727 {
728     return (TCGv_i64)temp_tcgv_i32(t);
729 }
730 
731 static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
732 {
733     return (TCGv_ptr)temp_tcgv_i32(t);
734 }
735 
736 static inline TCGv_vec temp_tcgv_vec(TCGTemp *t)
737 {
738     return (TCGv_vec)temp_tcgv_i32(t);
739 }
740 
741 #if TCG_TARGET_REG_BITS == 32
742 static inline TCGv_i32 TCGV_LOW(TCGv_i64 t)
743 {
744     return temp_tcgv_i32(tcgv_i64_temp(t));
745 }
746 
747 static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
748 {
749     return temp_tcgv_i32(tcgv_i64_temp(t) + 1);
750 }
751 #endif
752 
753 static inline TCGArg tcg_get_insn_param(TCGOp *op, int arg)
754 {
755     return op->args[arg];
756 }
757 
758 static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
759 {
760     op->args[arg] = v;
761 }
762 
763 static inline target_ulong tcg_get_insn_start_param(TCGOp *op, int arg)
764 {
765 #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
766     return tcg_get_insn_param(op, arg);
767 #else
768     return tcg_get_insn_param(op, arg * 2) |
769            ((uint64_t)tcg_get_insn_param(op, arg * 2 + 1) << 32);
770 #endif
771 }
772 
773 static inline void tcg_set_insn_start_param(TCGOp *op, int arg, target_ulong v)
774 {
775 #if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
776     tcg_set_insn_param(op, arg, v);
777 #else
778     tcg_set_insn_param(op, arg * 2, v);
779     tcg_set_insn_param(op, arg * 2 + 1, v >> 32);
780 #endif
781 }
782 
783 /* The last op that was emitted.  */
784 static inline TCGOp *tcg_last_op(void)
785 {
786     return QTAILQ_LAST(&tcg_ctx->ops);
787 }
788 
789 /* Test for whether to terminate the TB for using too many opcodes.  */
790 static inline bool tcg_op_buf_full(void)
791 {
792     /* This is not a hard limit, it merely stops translation when
793      * we have produced "enough" opcodes.  We want to limit TB size
794      * such that a RISC host can reasonably use a 16-bit signed
795      * branch within the TB.  We also need to be mindful of the
796      * 16-bit unsigned offsets, TranslationBlock.jmp_reset_offset[]
797      * and TCGContext.gen_insn_end_off[].
798      */
799     return tcg_ctx->nb_ops >= 4000;
800 }
801 
802 /* pool based memory allocation */
803 
804 /* user-mode: mmap_lock must be held for tcg_malloc_internal. */
805 void *tcg_malloc_internal(TCGContext *s, int size);
806 void tcg_pool_reset(TCGContext *s);
807 TranslationBlock *tcg_tb_alloc(TCGContext *s);
808 
809 void tcg_region_reset_all(void);
810 
811 size_t tcg_code_size(void);
812 size_t tcg_code_capacity(void);
813 
814 void tcg_tb_insert(TranslationBlock *tb);
815 void tcg_tb_remove(TranslationBlock *tb);
816 TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
817 void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
818 size_t tcg_nb_tbs(void);
819 
820 /* user-mode: Called with mmap_lock held.  */
821 static inline void *tcg_malloc(int size)
822 {
823     TCGContext *s = tcg_ctx;
824     uint8_t *ptr, *ptr_end;
825 
826     /* ??? This is a weak placeholder for minimum malloc alignment.  */
827     size = QEMU_ALIGN_UP(size, 8);
828 
829     ptr = s->pool_cur;
830     ptr_end = ptr + size;
831     if (unlikely(ptr_end > s->pool_end)) {
832         return tcg_malloc_internal(tcg_ctx, size);
833     } else {
834         s->pool_cur = ptr_end;
835         return ptr;
836     }
837 }
838 
839 void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus);
840 void tcg_register_thread(void);
841 void tcg_prologue_init(TCGContext *s);
842 void tcg_func_start(TCGContext *s);
843 
844 int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
845 
846 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
847 
848 TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
849                                      intptr_t, const char *);
850 TCGTemp *tcg_temp_new_internal(TCGType, bool);
851 void tcg_temp_free_internal(TCGTemp *);
852 TCGv_vec tcg_temp_new_vec(TCGType type);
853 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match);
854 
855 static inline void tcg_temp_free_i32(TCGv_i32 arg)
856 {
857     tcg_temp_free_internal(tcgv_i32_temp(arg));
858 }
859 
860 static inline void tcg_temp_free_i64(TCGv_i64 arg)
861 {
862     tcg_temp_free_internal(tcgv_i64_temp(arg));
863 }
864 
865 static inline void tcg_temp_free_ptr(TCGv_ptr arg)
866 {
867     tcg_temp_free_internal(tcgv_ptr_temp(arg));
868 }
869 
870 static inline void tcg_temp_free_vec(TCGv_vec arg)
871 {
872     tcg_temp_free_internal(tcgv_vec_temp(arg));
873 }
874 
875 static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
876                                               const char *name)
877 {
878     TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
879     return temp_tcgv_i32(t);
880 }
881 
882 static inline TCGv_i32 tcg_temp_new_i32(void)
883 {
884     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, false);
885     return temp_tcgv_i32(t);
886 }
887 
888 static inline TCGv_i32 tcg_temp_local_new_i32(void)
889 {
890     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, true);
891     return temp_tcgv_i32(t);
892 }
893 
894 static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
895                                               const char *name)
896 {
897     TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
898     return temp_tcgv_i64(t);
899 }
900 
901 static inline TCGv_i64 tcg_temp_new_i64(void)
902 {
903     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, false);
904     return temp_tcgv_i64(t);
905 }
906 
907 static inline TCGv_i64 tcg_temp_local_new_i64(void)
908 {
909     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, true);
910     return temp_tcgv_i64(t);
911 }
912 
913 static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
914                                               const char *name)
915 {
916     TCGTemp *t = tcg_global_mem_new_internal(TCG_TYPE_PTR, reg, offset, name);
917     return temp_tcgv_ptr(t);
918 }
919 
920 static inline TCGv_ptr tcg_temp_new_ptr(void)
921 {
922     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, false);
923     return temp_tcgv_ptr(t);
924 }
925 
926 static inline TCGv_ptr tcg_temp_local_new_ptr(void)
927 {
928     TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_PTR, true);
929     return temp_tcgv_ptr(t);
930 }
931 
932 #if defined(CONFIG_DEBUG_TCG)
933 /* If you call tcg_clear_temp_count() at the start of a section of
934  * code which is not supposed to leak any TCG temporaries, then
935  * calling tcg_check_temp_count() at the end of the section will
936  * return 1 if the section did in fact leak a temporary.
937  */
938 void tcg_clear_temp_count(void);
939 int tcg_check_temp_count(void);
940 #else
941 #define tcg_clear_temp_count() do { } while (0)
942 #define tcg_check_temp_count() 0
943 #endif
944 
945 int64_t tcg_cpu_exec_time(void);
946 void tcg_dump_info(void);
947 void tcg_dump_op_count(void);
948 
949 #define TCG_CT_CONST  1 /* any constant of register size */
950 
951 typedef struct TCGArgConstraint {
952     unsigned ct : 16;
953     unsigned alias_index : 4;
954     unsigned sort_index : 4;
955     bool oalias : 1;
956     bool ialias : 1;
957     bool newreg : 1;
958     TCGRegSet regs;
959 } TCGArgConstraint;
960 
961 #define TCG_MAX_OP_ARGS 16
962 
963 /* Bits for TCGOpDef->flags, 8 bits available, all used.  */
964 enum {
965     /* Instruction exits the translation block.  */
966     TCG_OPF_BB_EXIT      = 0x01,
967     /* Instruction defines the end of a basic block.  */
968     TCG_OPF_BB_END       = 0x02,
969     /* Instruction clobbers call registers and potentially update globals.  */
970     TCG_OPF_CALL_CLOBBER = 0x04,
971     /* Instruction has side effects: it cannot be removed if its outputs
972        are not used, and might trigger exceptions.  */
973     TCG_OPF_SIDE_EFFECTS = 0x08,
974     /* Instruction operands are 64-bits (otherwise 32-bits).  */
975     TCG_OPF_64BIT        = 0x10,
976     /* Instruction is optional and not implemented by the host, or insn
977        is generic and should not be implemened by the host.  */
978     TCG_OPF_NOT_PRESENT  = 0x20,
979     /* Instruction operands are vectors.  */
980     TCG_OPF_VECTOR       = 0x40,
981     /* Instruction is a conditional branch. */
982     TCG_OPF_COND_BRANCH  = 0x80
983 };
984 
985 typedef struct TCGOpDef {
986     const char *name;
987     uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
988     uint8_t flags;
989     TCGArgConstraint *args_ct;
990 } TCGOpDef;
991 
992 extern TCGOpDef tcg_op_defs[];
993 extern const size_t tcg_op_defs_max;
994 
995 typedef struct TCGTargetOpDef {
996     TCGOpcode op;
997     const char *args_ct_str[TCG_MAX_OP_ARGS];
998 } TCGTargetOpDef;
999 
1000 #define tcg_abort() \
1001 do {\
1002     fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
1003     abort();\
1004 } while (0)
1005 
1006 bool tcg_op_supported(TCGOpcode op);
1007 
1008 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
1009 
1010 TCGOp *tcg_emit_op(TCGOpcode opc);
1011 void tcg_op_remove(TCGContext *s, TCGOp *op);
1012 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc);
1013 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc);
1014 
1015 /**
1016  * tcg_remove_ops_after:
1017  * @op: target operation
1018  *
1019  * Discard any opcodes emitted since @op.  Expected usage is to save
1020  * a starting point with tcg_last_op(), speculatively emit opcodes,
1021  * then decide whether or not to keep those opcodes after the fact.
1022  */
1023 void tcg_remove_ops_after(TCGOp *op);
1024 
1025 void tcg_optimize(TCGContext *s);
1026 
1027 /* Allocate a new temporary and initialize it with a constant. */
1028 TCGv_i32 tcg_const_i32(int32_t val);
1029 TCGv_i64 tcg_const_i64(int64_t val);
1030 TCGv_i32 tcg_const_local_i32(int32_t val);
1031 TCGv_i64 tcg_const_local_i64(int64_t val);
1032 TCGv_vec tcg_const_zeros_vec(TCGType);
1033 TCGv_vec tcg_const_ones_vec(TCGType);
1034 TCGv_vec tcg_const_zeros_vec_matching(TCGv_vec);
1035 TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
1036 
1037 /*
1038  * Locate or create a read-only temporary that is a constant.
1039  * This kind of temporary need not be freed, but for convenience
1040  * will be silently ignored by tcg_temp_free_*.
1041  */
1042 TCGTemp *tcg_constant_internal(TCGType type, int64_t val);
1043 
1044 static inline TCGv_i32 tcg_constant_i32(int32_t val)
1045 {
1046     return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32, val));
1047 }
1048 
1049 static inline TCGv_i64 tcg_constant_i64(int64_t val)
1050 {
1051     return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val));
1052 }
1053 
1054 TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val);
1055 TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val);
1056 
1057 #if UINTPTR_MAX == UINT32_MAX
1058 # define tcg_const_ptr(x)        ((TCGv_ptr)tcg_const_i32((intptr_t)(x)))
1059 # define tcg_const_local_ptr(x)  ((TCGv_ptr)tcg_const_local_i32((intptr_t)(x)))
1060 #else
1061 # define tcg_const_ptr(x)        ((TCGv_ptr)tcg_const_i64((intptr_t)(x)))
1062 # define tcg_const_local_ptr(x)  ((TCGv_ptr)tcg_const_local_i64((intptr_t)(x)))
1063 #endif
1064 
1065 TCGLabel *gen_new_label(void);
1066 
1067 /**
1068  * label_arg
1069  * @l: label
1070  *
1071  * Encode a label for storage in the TCG opcode stream.
1072  */
1073 
1074 static inline TCGArg label_arg(TCGLabel *l)
1075 {
1076     return (uintptr_t)l;
1077 }
1078 
1079 /**
1080  * arg_label
1081  * @i: value
1082  *
1083  * The opposite of label_arg.  Retrieve a label from the
1084  * encoding of the TCG opcode stream.
1085  */
1086 
1087 static inline TCGLabel *arg_label(TCGArg i)
1088 {
1089     return (TCGLabel *)(uintptr_t)i;
1090 }
1091 
1092 /**
1093  * tcg_ptr_byte_diff
1094  * @a, @b: addresses to be differenced
1095  *
1096  * There are many places within the TCG backends where we need a byte
1097  * difference between two pointers.  While this can be accomplished
1098  * with local casting, it's easy to get wrong -- especially if one is
1099  * concerned with the signedness of the result.
1100  *
1101  * This version relies on GCC's void pointer arithmetic to get the
1102  * correct result.
1103  */
1104 
1105 static inline ptrdiff_t tcg_ptr_byte_diff(const void *a, const void *b)
1106 {
1107     return a - b;
1108 }
1109 
1110 /**
1111  * tcg_pcrel_diff
1112  * @s: the tcg context
1113  * @target: address of the target
1114  *
1115  * Produce a pc-relative difference, from the current code_ptr
1116  * to the destination address.
1117  */
1118 
1119 static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, const void *target)
1120 {
1121     return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_ptr));
1122 }
1123 
1124 /**
1125  * tcg_tbrel_diff
1126  * @s: the tcg context
1127  * @target: address of the target
1128  *
1129  * Produce a difference, from the beginning of the current TB code
1130  * to the destination address.
1131  */
1132 static inline ptrdiff_t tcg_tbrel_diff(TCGContext *s, const void *target)
1133 {
1134     return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_buf));
1135 }
1136 
1137 /**
1138  * tcg_current_code_size
1139  * @s: the tcg context
1140  *
1141  * Compute the current code size within the translation block.
1142  * This is used to fill in qemu's data structures for goto_tb.
1143  */
1144 
1145 static inline size_t tcg_current_code_size(TCGContext *s)
1146 {
1147     return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
1148 }
1149 
1150 /* Combine the MemOp and mmu_idx parameters into a single value.  */
1151 typedef uint32_t TCGMemOpIdx;
1152 
1153 /**
1154  * make_memop_idx
1155  * @op: memory operation
1156  * @idx: mmu index
1157  *
1158  * Encode these values into a single parameter.
1159  */
1160 static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx)
1161 {
1162     tcg_debug_assert(idx <= 15);
1163     return (op << 4) | idx;
1164 }
1165 
1166 /**
1167  * get_memop
1168  * @oi: combined op/idx parameter
1169  *
1170  * Extract the memory operation from the combined value.
1171  */
1172 static inline MemOp get_memop(TCGMemOpIdx oi)
1173 {
1174     return oi >> 4;
1175 }
1176 
1177 /**
1178  * get_mmuidx
1179  * @oi: combined op/idx parameter
1180  *
1181  * Extract the mmu index from the combined value.
1182  */
1183 static inline unsigned get_mmuidx(TCGMemOpIdx oi)
1184 {
1185     return oi & 15;
1186 }
1187 
1188 /**
1189  * tcg_qemu_tb_exec:
1190  * @env: pointer to CPUArchState for the CPU
1191  * @tb_ptr: address of generated code for the TB to execute
1192  *
1193  * Start executing code from a given translation block.
1194  * Where translation blocks have been linked, execution
1195  * may proceed from the given TB into successive ones.
1196  * Control eventually returns only when some action is needed
1197  * from the top-level loop: either control must pass to a TB
1198  * which has not yet been directly linked, or an asynchronous
1199  * event such as an interrupt needs handling.
1200  *
1201  * Return: The return value is the value passed to the corresponding
1202  * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1203  * The value is either zero or a 4-byte aligned pointer to that TB combined
1204  * with additional information in its two least significant bits. The
1205  * additional information is encoded as follows:
1206  *  0, 1: the link between this TB and the next is via the specified
1207  *        TB index (0 or 1). That is, we left the TB via (the equivalent
1208  *        of) "goto_tb <index>". The main loop uses this to determine
1209  *        how to link the TB just executed to the next.
1210  *  2:    we are using instruction counting code generation, and we
1211  *        did not start executing this TB because the instruction counter
1212  *        would hit zero midway through it. In this case the pointer
1213  *        returned is the TB we were about to execute, and the caller must
1214  *        arrange to execute the remaining count of instructions.
1215  *  3:    we stopped because the CPU's exit_request flag was set
1216  *        (usually meaning that there is an interrupt that needs to be
1217  *        handled). The pointer returned is the TB we were about to execute
1218  *        when we noticed the pending exit request.
1219  *
1220  * If the bottom two bits indicate an exit-via-index then the CPU
1221  * state is correctly synchronised and ready for execution of the next
1222  * TB (and in particular the guest PC is the address to execute next).
1223  * Otherwise, we gave up on execution of this TB before it started, and
1224  * the caller must fix up the CPU state by calling the CPU's
1225  * synchronize_from_tb() method with the TB pointer we return (falling
1226  * back to calling the CPU's set_pc method with tb->pb if no
1227  * synchronize_from_tb() method exists).
1228  *
1229  * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1230  * to this default (which just calls the prologue.code emitted by
1231  * tcg_target_qemu_prologue()).
1232  */
1233 #define TB_EXIT_MASK      3
1234 #define TB_EXIT_IDX0      0
1235 #define TB_EXIT_IDX1      1
1236 #define TB_EXIT_IDXMAX    1
1237 #define TB_EXIT_REQUESTED 3
1238 
1239 #ifdef CONFIG_TCG_INTERPRETER
1240 uintptr_t tcg_qemu_tb_exec(CPUArchState *env, const void *tb_ptr);
1241 #else
1242 typedef uintptr_t tcg_prologue_fn(CPUArchState *env, const void *tb_ptr);
1243 extern tcg_prologue_fn *tcg_qemu_tb_exec;
1244 #endif
1245 
1246 void tcg_register_jit(const void *buf, size_t buf_size);
1247 
1248 #if TCG_TARGET_MAYBE_vec
1249 /* Return zero if the tuple (opc, type, vece) is unsupportable;
1250    return > 0 if it is directly supportable;
1251    return < 0 if we must call tcg_expand_vec_op.  */
1252 int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned);
1253 #else
1254 static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
1255 {
1256     return 0;
1257 }
1258 #endif
1259 
1260 /* Expand the tuple (opc, type, vece) on the given arguments.  */
1261 void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
1262 
1263 /* Replicate a constant C accoring to the log2 of the element size.  */
1264 uint64_t dup_const(unsigned vece, uint64_t c);
1265 
1266 #define dup_const(VECE, C)                                         \
1267     (__builtin_constant_p(VECE)                                    \
1268      ? (  (VECE) == MO_8  ? 0x0101010101010101ull * (uint8_t)(C)   \
1269         : (VECE) == MO_16 ? 0x0001000100010001ull * (uint16_t)(C)  \
1270         : (VECE) == MO_32 ? 0x0000000100000001ull * (uint32_t)(C)  \
1271         : (VECE) == MO_64 ? (uint64_t)(C)                          \
1272         : (qemu_build_not_reached_always(), 0))                    \
1273      : dup_const(VECE, C))
1274 
1275 /*
1276  * Memory helpers that will be used by TCG generated code.
1277  */
1278 #ifdef CONFIG_SOFTMMU
1279 /* Value zero-extended to tcg register size.  */
1280 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1281                                      TCGMemOpIdx oi, uintptr_t retaddr);
1282 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1283                                     TCGMemOpIdx oi, uintptr_t retaddr);
1284 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1285                                     TCGMemOpIdx oi, uintptr_t retaddr);
1286 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1287                            TCGMemOpIdx oi, uintptr_t retaddr);
1288 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1289                                     TCGMemOpIdx oi, uintptr_t retaddr);
1290 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1291                                     TCGMemOpIdx oi, uintptr_t retaddr);
1292 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1293                            TCGMemOpIdx oi, uintptr_t retaddr);
1294 
1295 /* Value sign-extended to tcg register size.  */
1296 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1297                                      TCGMemOpIdx oi, uintptr_t retaddr);
1298 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1299                                     TCGMemOpIdx oi, uintptr_t retaddr);
1300 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1301                                     TCGMemOpIdx oi, uintptr_t retaddr);
1302 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1303                                     TCGMemOpIdx oi, uintptr_t retaddr);
1304 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1305                                     TCGMemOpIdx oi, uintptr_t retaddr);
1306 
1307 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1308                         TCGMemOpIdx oi, uintptr_t retaddr);
1309 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1310                        TCGMemOpIdx oi, uintptr_t retaddr);
1311 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1312                        TCGMemOpIdx oi, uintptr_t retaddr);
1313 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1314                        TCGMemOpIdx oi, uintptr_t retaddr);
1315 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1316                        TCGMemOpIdx oi, uintptr_t retaddr);
1317 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1318                        TCGMemOpIdx oi, uintptr_t retaddr);
1319 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1320                        TCGMemOpIdx oi, uintptr_t retaddr);
1321 
1322 /* Temporary aliases until backends are converted.  */
1323 #ifdef TARGET_WORDS_BIGENDIAN
1324 # define helper_ret_ldsw_mmu  helper_be_ldsw_mmu
1325 # define helper_ret_lduw_mmu  helper_be_lduw_mmu
1326 # define helper_ret_ldsl_mmu  helper_be_ldsl_mmu
1327 # define helper_ret_ldul_mmu  helper_be_ldul_mmu
1328 # define helper_ret_ldl_mmu   helper_be_ldul_mmu
1329 # define helper_ret_ldq_mmu   helper_be_ldq_mmu
1330 # define helper_ret_stw_mmu   helper_be_stw_mmu
1331 # define helper_ret_stl_mmu   helper_be_stl_mmu
1332 # define helper_ret_stq_mmu   helper_be_stq_mmu
1333 #else
1334 # define helper_ret_ldsw_mmu  helper_le_ldsw_mmu
1335 # define helper_ret_lduw_mmu  helper_le_lduw_mmu
1336 # define helper_ret_ldsl_mmu  helper_le_ldsl_mmu
1337 # define helper_ret_ldul_mmu  helper_le_ldul_mmu
1338 # define helper_ret_ldl_mmu   helper_le_ldul_mmu
1339 # define helper_ret_ldq_mmu   helper_le_ldq_mmu
1340 # define helper_ret_stw_mmu   helper_le_stw_mmu
1341 # define helper_ret_stl_mmu   helper_le_stl_mmu
1342 # define helper_ret_stq_mmu   helper_le_stq_mmu
1343 #endif
1344 #endif /* CONFIG_SOFTMMU */
1345 
1346 uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
1347                                  uint32_t cmpv, uint32_t newv,
1348                                  TCGMemOpIdx oi, uintptr_t retaddr);
1349 uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
1350                                     uint32_t cmpv, uint32_t newv,
1351                                     TCGMemOpIdx oi, uintptr_t retaddr);
1352 uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
1353                                     uint32_t cmpv, uint32_t newv,
1354                                     TCGMemOpIdx oi, uintptr_t retaddr);
1355 uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
1356                                     uint64_t cmpv, uint64_t newv,
1357                                     TCGMemOpIdx oi, uintptr_t retaddr);
1358 uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
1359                                     uint32_t cmpv, uint32_t newv,
1360                                     TCGMemOpIdx oi, uintptr_t retaddr);
1361 uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
1362                                     uint32_t cmpv, uint32_t newv,
1363                                     TCGMemOpIdx oi, uintptr_t retaddr);
1364 uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
1365                                     uint64_t cmpv, uint64_t newv,
1366                                     TCGMemOpIdx oi, uintptr_t retaddr);
1367 
1368 #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX)         \
1369 TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu            \
1370     (CPUArchState *env, target_ulong addr, TYPE val,  \
1371      TCGMemOpIdx oi, uintptr_t retaddr);
1372 
1373 #ifdef CONFIG_ATOMIC64
1374 #define GEN_ATOMIC_HELPER_ALL(NAME)          \
1375     GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
1376     GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
1377     GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
1378     GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
1379     GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)  \
1380     GEN_ATOMIC_HELPER(NAME, uint64_t, q_le)  \
1381     GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
1382 #else
1383 #define GEN_ATOMIC_HELPER_ALL(NAME)          \
1384     GEN_ATOMIC_HELPER(NAME, uint32_t, b)     \
1385     GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
1386     GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
1387     GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
1388     GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
1389 #endif
1390 
1391 GEN_ATOMIC_HELPER_ALL(fetch_add)
1392 GEN_ATOMIC_HELPER_ALL(fetch_sub)
1393 GEN_ATOMIC_HELPER_ALL(fetch_and)
1394 GEN_ATOMIC_HELPER_ALL(fetch_or)
1395 GEN_ATOMIC_HELPER_ALL(fetch_xor)
1396 GEN_ATOMIC_HELPER_ALL(fetch_smin)
1397 GEN_ATOMIC_HELPER_ALL(fetch_umin)
1398 GEN_ATOMIC_HELPER_ALL(fetch_smax)
1399 GEN_ATOMIC_HELPER_ALL(fetch_umax)
1400 
1401 GEN_ATOMIC_HELPER_ALL(add_fetch)
1402 GEN_ATOMIC_HELPER_ALL(sub_fetch)
1403 GEN_ATOMIC_HELPER_ALL(and_fetch)
1404 GEN_ATOMIC_HELPER_ALL(or_fetch)
1405 GEN_ATOMIC_HELPER_ALL(xor_fetch)
1406 GEN_ATOMIC_HELPER_ALL(smin_fetch)
1407 GEN_ATOMIC_HELPER_ALL(umin_fetch)
1408 GEN_ATOMIC_HELPER_ALL(smax_fetch)
1409 GEN_ATOMIC_HELPER_ALL(umax_fetch)
1410 
1411 GEN_ATOMIC_HELPER_ALL(xchg)
1412 
1413 #undef GEN_ATOMIC_HELPER_ALL
1414 #undef GEN_ATOMIC_HELPER
1415 
1416 Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
1417                                   Int128 cmpv, Int128 newv,
1418                                   TCGMemOpIdx oi, uintptr_t retaddr);
1419 Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
1420                                   Int128 cmpv, Int128 newv,
1421                                   TCGMemOpIdx oi, uintptr_t retaddr);
1422 
1423 Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
1424                              TCGMemOpIdx oi, uintptr_t retaddr);
1425 Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
1426                              TCGMemOpIdx oi, uintptr_t retaddr);
1427 void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1428                            TCGMemOpIdx oi, uintptr_t retaddr);
1429 void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
1430                            TCGMemOpIdx oi, uintptr_t retaddr);
1431 
1432 #ifdef CONFIG_DEBUG_TCG
1433 void tcg_assert_listed_vecop(TCGOpcode);
1434 #else
1435 static inline void tcg_assert_listed_vecop(TCGOpcode op) { }
1436 #endif
1437 
1438 static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n)
1439 {
1440 #ifdef CONFIG_DEBUG_TCG
1441     const TCGOpcode *o = tcg_ctx->vecop_list;
1442     tcg_ctx->vecop_list = n;
1443     return o;
1444 #else
1445     return NULL;
1446 #endif
1447 }
1448 
1449 bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned);
1450 
1451 #endif /* TCG_H */
1452