xref: /qemu/tcg/tcg.c (revision 45b1f68c)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 /* define it to use liveness analysis (better code) */
26 #define USE_TCG_OPTIMIZATIONS
27 
28 #include "qemu/osdep.h"
29 
30 /* Define to jump the ELF file used to communicate with GDB.  */
31 #undef DEBUG_JIT
32 
33 #include "qemu/error-report.h"
34 #include "qemu/cutils.h"
35 #include "qemu/host-utils.h"
36 #include "qemu/qemu-print.h"
37 #include "qemu/timer.h"
38 
39 /* Note: the long term plan is to reduce the dependencies on the QEMU
40    CPU definitions. Currently they are used for qemu_ld/st
41    instructions */
42 #define NO_CPU_IO_DEFS
43 #include "cpu.h"
44 
45 #include "exec/cpu-common.h"
46 #include "exec/exec-all.h"
47 
48 #include "tcg-op.h"
49 
50 #if UINTPTR_MAX == UINT32_MAX
51 # define ELF_CLASS  ELFCLASS32
52 #else
53 # define ELF_CLASS  ELFCLASS64
54 #endif
55 #ifdef HOST_WORDS_BIGENDIAN
56 # define ELF_DATA   ELFDATA2MSB
57 #else
58 # define ELF_DATA   ELFDATA2LSB
59 #endif
60 
61 #include "elf.h"
62 #include "exec/log.h"
63 #include "sysemu/sysemu.h"
64 
65 /* Forward declarations for functions declared in tcg-target.inc.c and
66    used here. */
67 static void tcg_target_init(TCGContext *s);
68 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
69 static void tcg_target_qemu_prologue(TCGContext *s);
70 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
71                         intptr_t value, intptr_t addend);
72 
73 /* The CIE and FDE header definitions will be common to all hosts.  */
74 typedef struct {
75     uint32_t len __attribute__((aligned((sizeof(void *)))));
76     uint32_t id;
77     uint8_t version;
78     char augmentation[1];
79     uint8_t code_align;
80     uint8_t data_align;
81     uint8_t return_column;
82 } DebugFrameCIE;
83 
84 typedef struct QEMU_PACKED {
85     uint32_t len __attribute__((aligned((sizeof(void *)))));
86     uint32_t cie_offset;
87     uintptr_t func_start;
88     uintptr_t func_len;
89 } DebugFrameFDEHeader;
90 
91 typedef struct QEMU_PACKED {
92     DebugFrameCIE cie;
93     DebugFrameFDEHeader fde;
94 } DebugFrameHeader;
95 
96 static void tcg_register_jit_int(void *buf, size_t size,
97                                  const void *debug_frame,
98                                  size_t debug_frame_size)
99     __attribute__((unused));
100 
101 /* Forward declarations for functions declared and used in tcg-target.inc.c. */
102 static const char *target_parse_constraint(TCGArgConstraint *ct,
103                                            const char *ct_str, TCGType type);
104 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
105                        intptr_t arg2);
106 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
107 static void tcg_out_movi(TCGContext *s, TCGType type,
108                          TCGReg ret, tcg_target_long arg);
109 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
110                        const int *const_args);
111 #if TCG_TARGET_MAYBE_vec
112 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
113                            unsigned vece, const TCGArg *args,
114                            const int *const_args);
115 #else
116 static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
117                                   unsigned vece, const TCGArg *args,
118                                   const int *const_args)
119 {
120     g_assert_not_reached();
121 }
122 #endif
123 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
124                        intptr_t arg2);
125 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
126                         TCGReg base, intptr_t ofs);
127 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
128 static int tcg_target_const_match(tcg_target_long val, TCGType type,
129                                   const TCGArgConstraint *arg_ct);
130 #ifdef TCG_TARGET_NEED_LDST_LABELS
131 static int tcg_out_ldst_finalize(TCGContext *s);
132 #endif
133 
134 #define TCG_HIGHWATER 1024
135 
136 static TCGContext **tcg_ctxs;
137 static unsigned int n_tcg_ctxs;
138 TCGv_env cpu_env = 0;
139 
140 struct tcg_region_tree {
141     QemuMutex lock;
142     GTree *tree;
143     /* padding to avoid false sharing is computed at run-time */
144 };
145 
146 /*
147  * We divide code_gen_buffer into equally-sized "regions" that TCG threads
148  * dynamically allocate from as demand dictates. Given appropriate region
149  * sizing, this minimizes flushes even when some TCG threads generate a lot
150  * more code than others.
151  */
152 struct tcg_region_state {
153     QemuMutex lock;
154 
155     /* fields set at init time */
156     void *start;
157     void *start_aligned;
158     void *end;
159     size_t n;
160     size_t size; /* size of one region */
161     size_t stride; /* .size + guard size */
162 
163     /* fields protected by the lock */
164     size_t current; /* current region index */
165     size_t agg_size_full; /* aggregate size of full regions */
166 };
167 
168 static struct tcg_region_state region;
169 /*
170  * This is an array of struct tcg_region_tree's, with padding.
171  * We use void * to simplify the computation of region_trees[i]; each
172  * struct is found every tree_size bytes.
173  */
174 static void *region_trees;
175 static size_t tree_size;
176 static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
177 static TCGRegSet tcg_target_call_clobber_regs;
178 
179 #if TCG_TARGET_INSN_UNIT_SIZE == 1
180 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
181 {
182     *s->code_ptr++ = v;
183 }
184 
185 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
186                                                       uint8_t v)
187 {
188     *p = v;
189 }
190 #endif
191 
192 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
193 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
194 {
195     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
196         *s->code_ptr++ = v;
197     } else {
198         tcg_insn_unit *p = s->code_ptr;
199         memcpy(p, &v, sizeof(v));
200         s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
201     }
202 }
203 
204 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
205                                                        uint16_t v)
206 {
207     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
208         *p = v;
209     } else {
210         memcpy(p, &v, sizeof(v));
211     }
212 }
213 #endif
214 
215 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
216 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
217 {
218     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
219         *s->code_ptr++ = v;
220     } else {
221         tcg_insn_unit *p = s->code_ptr;
222         memcpy(p, &v, sizeof(v));
223         s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
224     }
225 }
226 
227 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
228                                                        uint32_t v)
229 {
230     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
231         *p = v;
232     } else {
233         memcpy(p, &v, sizeof(v));
234     }
235 }
236 #endif
237 
238 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
239 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
240 {
241     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
242         *s->code_ptr++ = v;
243     } else {
244         tcg_insn_unit *p = s->code_ptr;
245         memcpy(p, &v, sizeof(v));
246         s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
247     }
248 }
249 
250 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
251                                                        uint64_t v)
252 {
253     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
254         *p = v;
255     } else {
256         memcpy(p, &v, sizeof(v));
257     }
258 }
259 #endif
260 
261 /* label relocation processing */
262 
263 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
264                           TCGLabel *l, intptr_t addend)
265 {
266     TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
267 
268     r->type = type;
269     r->ptr = code_ptr;
270     r->addend = addend;
271     QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
272 }
273 
274 static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
275 {
276     tcg_debug_assert(!l->has_value);
277     l->has_value = 1;
278     l->u.value_ptr = ptr;
279 }
280 
281 TCGLabel *gen_new_label(void)
282 {
283     TCGContext *s = tcg_ctx;
284     TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
285 
286     memset(l, 0, sizeof(TCGLabel));
287     l->id = s->nb_labels++;
288     QSIMPLEQ_INIT(&l->relocs);
289 
290     QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
291 
292     return l;
293 }
294 
295 static bool tcg_resolve_relocs(TCGContext *s)
296 {
297     TCGLabel *l;
298 
299     QSIMPLEQ_FOREACH(l, &s->labels, next) {
300         TCGRelocation *r;
301         uintptr_t value = l->u.value;
302 
303         QSIMPLEQ_FOREACH(r, &l->relocs, next) {
304             if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
305                 return false;
306             }
307         }
308     }
309     return true;
310 }
311 
312 static void set_jmp_reset_offset(TCGContext *s, int which)
313 {
314     size_t off = tcg_current_code_size(s);
315     s->tb_jmp_reset_offset[which] = off;
316     /* Make sure that we didn't overflow the stored offset.  */
317     assert(s->tb_jmp_reset_offset[which] == off);
318 }
319 
320 #include "tcg-target.inc.c"
321 
322 /* compare a pointer @ptr and a tb_tc @s */
323 static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
324 {
325     if (ptr >= s->ptr + s->size) {
326         return 1;
327     } else if (ptr < s->ptr) {
328         return -1;
329     }
330     return 0;
331 }
332 
333 static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
334 {
335     const struct tb_tc *a = ap;
336     const struct tb_tc *b = bp;
337 
338     /*
339      * When both sizes are set, we know this isn't a lookup.
340      * This is the most likely case: every TB must be inserted; lookups
341      * are a lot less frequent.
342      */
343     if (likely(a->size && b->size)) {
344         if (a->ptr > b->ptr) {
345             return 1;
346         } else if (a->ptr < b->ptr) {
347             return -1;
348         }
349         /* a->ptr == b->ptr should happen only on deletions */
350         g_assert(a->size == b->size);
351         return 0;
352     }
353     /*
354      * All lookups have either .size field set to 0.
355      * From the glib sources we see that @ap is always the lookup key. However
356      * the docs provide no guarantee, so we just mark this case as likely.
357      */
358     if (likely(a->size == 0)) {
359         return ptr_cmp_tb_tc(a->ptr, b);
360     }
361     return ptr_cmp_tb_tc(b->ptr, a);
362 }
363 
364 static void tcg_region_trees_init(void)
365 {
366     size_t i;
367 
368     tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
369     region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
370     for (i = 0; i < region.n; i++) {
371         struct tcg_region_tree *rt = region_trees + i * tree_size;
372 
373         qemu_mutex_init(&rt->lock);
374         rt->tree = g_tree_new(tb_tc_cmp);
375     }
376 }
377 
378 static struct tcg_region_tree *tc_ptr_to_region_tree(void *p)
379 {
380     size_t region_idx;
381 
382     if (p < region.start_aligned) {
383         region_idx = 0;
384     } else {
385         ptrdiff_t offset = p - region.start_aligned;
386 
387         if (offset > region.stride * (region.n - 1)) {
388             region_idx = region.n - 1;
389         } else {
390             region_idx = offset / region.stride;
391         }
392     }
393     return region_trees + region_idx * tree_size;
394 }
395 
396 void tcg_tb_insert(TranslationBlock *tb)
397 {
398     struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
399 
400     qemu_mutex_lock(&rt->lock);
401     g_tree_insert(rt->tree, &tb->tc, tb);
402     qemu_mutex_unlock(&rt->lock);
403 }
404 
405 void tcg_tb_remove(TranslationBlock *tb)
406 {
407     struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
408 
409     qemu_mutex_lock(&rt->lock);
410     g_tree_remove(rt->tree, &tb->tc);
411     qemu_mutex_unlock(&rt->lock);
412 }
413 
414 /*
415  * Find the TB 'tb' such that
416  * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
417  * Return NULL if not found.
418  */
419 TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
420 {
421     struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
422     TranslationBlock *tb;
423     struct tb_tc s = { .ptr = (void *)tc_ptr };
424 
425     qemu_mutex_lock(&rt->lock);
426     tb = g_tree_lookup(rt->tree, &s);
427     qemu_mutex_unlock(&rt->lock);
428     return tb;
429 }
430 
431 static void tcg_region_tree_lock_all(void)
432 {
433     size_t i;
434 
435     for (i = 0; i < region.n; i++) {
436         struct tcg_region_tree *rt = region_trees + i * tree_size;
437 
438         qemu_mutex_lock(&rt->lock);
439     }
440 }
441 
442 static void tcg_region_tree_unlock_all(void)
443 {
444     size_t i;
445 
446     for (i = 0; i < region.n; i++) {
447         struct tcg_region_tree *rt = region_trees + i * tree_size;
448 
449         qemu_mutex_unlock(&rt->lock);
450     }
451 }
452 
453 void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
454 {
455     size_t i;
456 
457     tcg_region_tree_lock_all();
458     for (i = 0; i < region.n; i++) {
459         struct tcg_region_tree *rt = region_trees + i * tree_size;
460 
461         g_tree_foreach(rt->tree, func, user_data);
462     }
463     tcg_region_tree_unlock_all();
464 }
465 
466 size_t tcg_nb_tbs(void)
467 {
468     size_t nb_tbs = 0;
469     size_t i;
470 
471     tcg_region_tree_lock_all();
472     for (i = 0; i < region.n; i++) {
473         struct tcg_region_tree *rt = region_trees + i * tree_size;
474 
475         nb_tbs += g_tree_nnodes(rt->tree);
476     }
477     tcg_region_tree_unlock_all();
478     return nb_tbs;
479 }
480 
481 static void tcg_region_tree_reset_all(void)
482 {
483     size_t i;
484 
485     tcg_region_tree_lock_all();
486     for (i = 0; i < region.n; i++) {
487         struct tcg_region_tree *rt = region_trees + i * tree_size;
488 
489         /* Increment the refcount first so that destroy acts as a reset */
490         g_tree_ref(rt->tree);
491         g_tree_destroy(rt->tree);
492     }
493     tcg_region_tree_unlock_all();
494 }
495 
496 static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
497 {
498     void *start, *end;
499 
500     start = region.start_aligned + curr_region * region.stride;
501     end = start + region.size;
502 
503     if (curr_region == 0) {
504         start = region.start;
505     }
506     if (curr_region == region.n - 1) {
507         end = region.end;
508     }
509 
510     *pstart = start;
511     *pend = end;
512 }
513 
514 static void tcg_region_assign(TCGContext *s, size_t curr_region)
515 {
516     void *start, *end;
517 
518     tcg_region_bounds(curr_region, &start, &end);
519 
520     s->code_gen_buffer = start;
521     s->code_gen_ptr = start;
522     s->code_gen_buffer_size = end - start;
523     s->code_gen_highwater = end - TCG_HIGHWATER;
524 }
525 
526 static bool tcg_region_alloc__locked(TCGContext *s)
527 {
528     if (region.current == region.n) {
529         return true;
530     }
531     tcg_region_assign(s, region.current);
532     region.current++;
533     return false;
534 }
535 
536 /*
537  * Request a new region once the one in use has filled up.
538  * Returns true on error.
539  */
540 static bool tcg_region_alloc(TCGContext *s)
541 {
542     bool err;
543     /* read the region size now; alloc__locked will overwrite it on success */
544     size_t size_full = s->code_gen_buffer_size;
545 
546     qemu_mutex_lock(&region.lock);
547     err = tcg_region_alloc__locked(s);
548     if (!err) {
549         region.agg_size_full += size_full - TCG_HIGHWATER;
550     }
551     qemu_mutex_unlock(&region.lock);
552     return err;
553 }
554 
555 /*
556  * Perform a context's first region allocation.
557  * This function does _not_ increment region.agg_size_full.
558  */
559 static inline bool tcg_region_initial_alloc__locked(TCGContext *s)
560 {
561     return tcg_region_alloc__locked(s);
562 }
563 
564 /* Call from a safe-work context */
565 void tcg_region_reset_all(void)
566 {
567     unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
568     unsigned int i;
569 
570     qemu_mutex_lock(&region.lock);
571     region.current = 0;
572     region.agg_size_full = 0;
573 
574     for (i = 0; i < n_ctxs; i++) {
575         TCGContext *s = atomic_read(&tcg_ctxs[i]);
576         bool err = tcg_region_initial_alloc__locked(s);
577 
578         g_assert(!err);
579     }
580     qemu_mutex_unlock(&region.lock);
581 
582     tcg_region_tree_reset_all();
583 }
584 
585 #ifdef CONFIG_USER_ONLY
586 static size_t tcg_n_regions(void)
587 {
588     return 1;
589 }
590 #else
591 /*
592  * It is likely that some vCPUs will translate more code than others, so we
593  * first try to set more regions than max_cpus, with those regions being of
594  * reasonable size. If that's not possible we make do by evenly dividing
595  * the code_gen_buffer among the vCPUs.
596  */
597 static size_t tcg_n_regions(void)
598 {
599     size_t i;
600 
601     /* Use a single region if all we have is one vCPU thread */
602     if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
603         return 1;
604     }
605 
606     /* Try to have more regions than max_cpus, with each region being >= 2 MB */
607     for (i = 8; i > 0; i--) {
608         size_t regions_per_thread = i;
609         size_t region_size;
610 
611         region_size = tcg_init_ctx.code_gen_buffer_size;
612         region_size /= max_cpus * regions_per_thread;
613 
614         if (region_size >= 2 * 1024u * 1024) {
615             return max_cpus * regions_per_thread;
616         }
617     }
618     /* If we can't, then just allocate one region per vCPU thread */
619     return max_cpus;
620 }
621 #endif
622 
623 /*
624  * Initializes region partitioning.
625  *
626  * Called at init time from the parent thread (i.e. the one calling
627  * tcg_context_init), after the target's TCG globals have been set.
628  *
629  * Region partitioning works by splitting code_gen_buffer into separate regions,
630  * and then assigning regions to TCG threads so that the threads can translate
631  * code in parallel without synchronization.
632  *
633  * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
634  * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
635  * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
636  * must have been parsed before calling this function, since it calls
637  * qemu_tcg_mttcg_enabled().
638  *
639  * In user-mode we use a single region.  Having multiple regions in user-mode
640  * is not supported, because the number of vCPU threads (recall that each thread
641  * spawned by the guest corresponds to a vCPU thread) is only bounded by the
642  * OS, and usually this number is huge (tens of thousands is not uncommon).
643  * Thus, given this large bound on the number of vCPU threads and the fact
644  * that code_gen_buffer is allocated at compile-time, we cannot guarantee
645  * that the availability of at least one region per vCPU thread.
646  *
647  * However, this user-mode limitation is unlikely to be a significant problem
648  * in practice. Multi-threaded guests share most if not all of their translated
649  * code, which makes parallel code generation less appealing than in softmmu.
650  */
651 void tcg_region_init(void)
652 {
653     void *buf = tcg_init_ctx.code_gen_buffer;
654     void *aligned;
655     size_t size = tcg_init_ctx.code_gen_buffer_size;
656     size_t page_size = qemu_real_host_page_size;
657     size_t region_size;
658     size_t n_regions;
659     size_t i;
660 
661     n_regions = tcg_n_regions();
662 
663     /* The first region will be 'aligned - buf' bytes larger than the others */
664     aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
665     g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
666     /*
667      * Make region_size a multiple of page_size, using aligned as the start.
668      * As a result of this we might end up with a few extra pages at the end of
669      * the buffer; we will assign those to the last region.
670      */
671     region_size = (size - (aligned - buf)) / n_regions;
672     region_size = QEMU_ALIGN_DOWN(region_size, page_size);
673 
674     /* A region must have at least 2 pages; one code, one guard */
675     g_assert(region_size >= 2 * page_size);
676 
677     /* init the region struct */
678     qemu_mutex_init(&region.lock);
679     region.n = n_regions;
680     region.size = region_size - page_size;
681     region.stride = region_size;
682     region.start = buf;
683     region.start_aligned = aligned;
684     /* page-align the end, since its last page will be a guard page */
685     region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
686     /* account for that last guard page */
687     region.end -= page_size;
688 
689     /* set guard pages */
690     for (i = 0; i < region.n; i++) {
691         void *start, *end;
692         int rc;
693 
694         tcg_region_bounds(i, &start, &end);
695         rc = qemu_mprotect_none(end, page_size);
696         g_assert(!rc);
697     }
698 
699     tcg_region_trees_init();
700 
701     /* In user-mode we support only one ctx, so do the initial allocation now */
702 #ifdef CONFIG_USER_ONLY
703     {
704         bool err = tcg_region_initial_alloc__locked(tcg_ctx);
705 
706         g_assert(!err);
707     }
708 #endif
709 }
710 
711 /*
712  * All TCG threads except the parent (i.e. the one that called tcg_context_init
713  * and registered the target's TCG globals) must register with this function
714  * before initiating translation.
715  *
716  * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
717  * of tcg_region_init() for the reasoning behind this.
718  *
719  * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
720  * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
721  * is not used anymore for translation once this function is called.
722  *
723  * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
724  * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
725  */
726 #ifdef CONFIG_USER_ONLY
727 void tcg_register_thread(void)
728 {
729     tcg_ctx = &tcg_init_ctx;
730 }
731 #else
732 void tcg_register_thread(void)
733 {
734     TCGContext *s = g_malloc(sizeof(*s));
735     unsigned int i, n;
736     bool err;
737 
738     *s = tcg_init_ctx;
739 
740     /* Relink mem_base.  */
741     for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
742         if (tcg_init_ctx.temps[i].mem_base) {
743             ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
744             tcg_debug_assert(b >= 0 && b < n);
745             s->temps[i].mem_base = &s->temps[b];
746         }
747     }
748 
749     /* Claim an entry in tcg_ctxs */
750     n = atomic_fetch_inc(&n_tcg_ctxs);
751     g_assert(n < max_cpus);
752     atomic_set(&tcg_ctxs[n], s);
753 
754     tcg_ctx = s;
755     qemu_mutex_lock(&region.lock);
756     err = tcg_region_initial_alloc__locked(tcg_ctx);
757     g_assert(!err);
758     qemu_mutex_unlock(&region.lock);
759 }
760 #endif /* !CONFIG_USER_ONLY */
761 
762 /*
763  * Returns the size (in bytes) of all translated code (i.e. from all regions)
764  * currently in the cache.
765  * See also: tcg_code_capacity()
766  * Do not confuse with tcg_current_code_size(); that one applies to a single
767  * TCG context.
768  */
769 size_t tcg_code_size(void)
770 {
771     unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
772     unsigned int i;
773     size_t total;
774 
775     qemu_mutex_lock(&region.lock);
776     total = region.agg_size_full;
777     for (i = 0; i < n_ctxs; i++) {
778         const TCGContext *s = atomic_read(&tcg_ctxs[i]);
779         size_t size;
780 
781         size = atomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
782         g_assert(size <= s->code_gen_buffer_size);
783         total += size;
784     }
785     qemu_mutex_unlock(&region.lock);
786     return total;
787 }
788 
789 /*
790  * Returns the code capacity (in bytes) of the entire cache, i.e. including all
791  * regions.
792  * See also: tcg_code_size()
793  */
794 size_t tcg_code_capacity(void)
795 {
796     size_t guard_size, capacity;
797 
798     /* no need for synchronization; these variables are set at init time */
799     guard_size = region.stride - region.size;
800     capacity = region.end + guard_size - region.start;
801     capacity -= region.n * (guard_size + TCG_HIGHWATER);
802     return capacity;
803 }
804 
805 size_t tcg_tb_phys_invalidate_count(void)
806 {
807     unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
808     unsigned int i;
809     size_t total = 0;
810 
811     for (i = 0; i < n_ctxs; i++) {
812         const TCGContext *s = atomic_read(&tcg_ctxs[i]);
813 
814         total += atomic_read(&s->tb_phys_invalidate_count);
815     }
816     return total;
817 }
818 
819 /* pool based memory allocation */
820 void *tcg_malloc_internal(TCGContext *s, int size)
821 {
822     TCGPool *p;
823     int pool_size;
824 
825     if (size > TCG_POOL_CHUNK_SIZE) {
826         /* big malloc: insert a new pool (XXX: could optimize) */
827         p = g_malloc(sizeof(TCGPool) + size);
828         p->size = size;
829         p->next = s->pool_first_large;
830         s->pool_first_large = p;
831         return p->data;
832     } else {
833         p = s->pool_current;
834         if (!p) {
835             p = s->pool_first;
836             if (!p)
837                 goto new_pool;
838         } else {
839             if (!p->next) {
840             new_pool:
841                 pool_size = TCG_POOL_CHUNK_SIZE;
842                 p = g_malloc(sizeof(TCGPool) + pool_size);
843                 p->size = pool_size;
844                 p->next = NULL;
845                 if (s->pool_current)
846                     s->pool_current->next = p;
847                 else
848                     s->pool_first = p;
849             } else {
850                 p = p->next;
851             }
852         }
853     }
854     s->pool_current = p;
855     s->pool_cur = p->data + size;
856     s->pool_end = p->data + p->size;
857     return p->data;
858 }
859 
860 void tcg_pool_reset(TCGContext *s)
861 {
862     TCGPool *p, *t;
863     for (p = s->pool_first_large; p; p = t) {
864         t = p->next;
865         g_free(p);
866     }
867     s->pool_first_large = NULL;
868     s->pool_cur = s->pool_end = NULL;
869     s->pool_current = NULL;
870 }
871 
872 typedef struct TCGHelperInfo {
873     void *func;
874     const char *name;
875     unsigned flags;
876     unsigned sizemask;
877 } TCGHelperInfo;
878 
879 #include "exec/helper-proto.h"
880 
881 static const TCGHelperInfo all_helpers[] = {
882 #include "exec/helper-tcg.h"
883 };
884 static GHashTable *helper_table;
885 
886 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
887 static void process_op_defs(TCGContext *s);
888 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
889                                             TCGReg reg, const char *name);
890 
891 void tcg_context_init(TCGContext *s)
892 {
893     int op, total_args, n, i;
894     TCGOpDef *def;
895     TCGArgConstraint *args_ct;
896     int *sorted_args;
897     TCGTemp *ts;
898 
899     memset(s, 0, sizeof(*s));
900     s->nb_globals = 0;
901 
902     /* Count total number of arguments and allocate the corresponding
903        space */
904     total_args = 0;
905     for(op = 0; op < NB_OPS; op++) {
906         def = &tcg_op_defs[op];
907         n = def->nb_iargs + def->nb_oargs;
908         total_args += n;
909     }
910 
911     args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
912     sorted_args = g_malloc(sizeof(int) * total_args);
913 
914     for(op = 0; op < NB_OPS; op++) {
915         def = &tcg_op_defs[op];
916         def->args_ct = args_ct;
917         def->sorted_args = sorted_args;
918         n = def->nb_iargs + def->nb_oargs;
919         sorted_args += n;
920         args_ct += n;
921     }
922 
923     /* Register helpers.  */
924     /* Use g_direct_hash/equal for direct pointer comparisons on func.  */
925     helper_table = g_hash_table_new(NULL, NULL);
926 
927     for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
928         g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
929                             (gpointer)&all_helpers[i]);
930     }
931 
932     tcg_target_init(s);
933     process_op_defs(s);
934 
935     /* Reverse the order of the saved registers, assuming they're all at
936        the start of tcg_target_reg_alloc_order.  */
937     for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
938         int r = tcg_target_reg_alloc_order[n];
939         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
940             break;
941         }
942     }
943     for (i = 0; i < n; ++i) {
944         indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
945     }
946     for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
947         indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
948     }
949 
950     tcg_ctx = s;
951     /*
952      * In user-mode we simply share the init context among threads, since we
953      * use a single region. See the documentation tcg_region_init() for the
954      * reasoning behind this.
955      * In softmmu we will have at most max_cpus TCG threads.
956      */
957 #ifdef CONFIG_USER_ONLY
958     tcg_ctxs = &tcg_ctx;
959     n_tcg_ctxs = 1;
960 #else
961     tcg_ctxs = g_new(TCGContext *, max_cpus);
962 #endif
963 
964     tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
965     ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
966     cpu_env = temp_tcgv_ptr(ts);
967 }
968 
969 /*
970  * Allocate TBs right before their corresponding translated code, making
971  * sure that TBs and code are on different cache lines.
972  */
973 TranslationBlock *tcg_tb_alloc(TCGContext *s)
974 {
975     uintptr_t align = qemu_icache_linesize;
976     TranslationBlock *tb;
977     void *next;
978 
979  retry:
980     tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
981     next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
982 
983     if (unlikely(next > s->code_gen_highwater)) {
984         if (tcg_region_alloc(s)) {
985             return NULL;
986         }
987         goto retry;
988     }
989     atomic_set(&s->code_gen_ptr, next);
990     s->data_gen_ptr = NULL;
991     return tb;
992 }
993 
994 void tcg_prologue_init(TCGContext *s)
995 {
996     size_t prologue_size, total_size;
997     void *buf0, *buf1;
998 
999     /* Put the prologue at the beginning of code_gen_buffer.  */
1000     buf0 = s->code_gen_buffer;
1001     total_size = s->code_gen_buffer_size;
1002     s->code_ptr = buf0;
1003     s->code_buf = buf0;
1004     s->data_gen_ptr = NULL;
1005     s->code_gen_prologue = buf0;
1006 
1007     /* Compute a high-water mark, at which we voluntarily flush the buffer
1008        and start over.  The size here is arbitrary, significantly larger
1009        than we expect the code generation for any one opcode to require.  */
1010     s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
1011 
1012 #ifdef TCG_TARGET_NEED_POOL_LABELS
1013     s->pool_labels = NULL;
1014 #endif
1015 
1016     /* Generate the prologue.  */
1017     tcg_target_qemu_prologue(s);
1018 
1019 #ifdef TCG_TARGET_NEED_POOL_LABELS
1020     /* Allow the prologue to put e.g. guest_base into a pool entry.  */
1021     {
1022         int result = tcg_out_pool_finalize(s);
1023         tcg_debug_assert(result == 0);
1024     }
1025 #endif
1026 
1027     buf1 = s->code_ptr;
1028     flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
1029 
1030     /* Deduct the prologue from the buffer.  */
1031     prologue_size = tcg_current_code_size(s);
1032     s->code_gen_ptr = buf1;
1033     s->code_gen_buffer = buf1;
1034     s->code_buf = buf1;
1035     total_size -= prologue_size;
1036     s->code_gen_buffer_size = total_size;
1037 
1038     tcg_register_jit(s->code_gen_buffer, total_size);
1039 
1040 #ifdef DEBUG_DISAS
1041     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1042         qemu_log_lock();
1043         qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
1044         if (s->data_gen_ptr) {
1045             size_t code_size = s->data_gen_ptr - buf0;
1046             size_t data_size = prologue_size - code_size;
1047             size_t i;
1048 
1049             log_disas(buf0, code_size);
1050 
1051             for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1052                 if (sizeof(tcg_target_ulong) == 8) {
1053                     qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1054                              (uintptr_t)s->data_gen_ptr + i,
1055                              *(uint64_t *)(s->data_gen_ptr + i));
1056                 } else {
1057                     qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
1058                              (uintptr_t)s->data_gen_ptr + i,
1059                              *(uint32_t *)(s->data_gen_ptr + i));
1060                 }
1061             }
1062         } else {
1063             log_disas(buf0, prologue_size);
1064         }
1065         qemu_log("\n");
1066         qemu_log_flush();
1067         qemu_log_unlock();
1068     }
1069 #endif
1070 
1071     /* Assert that goto_ptr is implemented completely.  */
1072     if (TCG_TARGET_HAS_goto_ptr) {
1073         tcg_debug_assert(s->code_gen_epilogue != NULL);
1074     }
1075 }
1076 
1077 void tcg_func_start(TCGContext *s)
1078 {
1079     tcg_pool_reset(s);
1080     s->nb_temps = s->nb_globals;
1081 
1082     /* No temps have been previously allocated for size or locality.  */
1083     memset(s->free_temps, 0, sizeof(s->free_temps));
1084 
1085     s->nb_ops = 0;
1086     s->nb_labels = 0;
1087     s->current_frame_offset = s->frame_start;
1088 
1089 #ifdef CONFIG_DEBUG_TCG
1090     s->goto_tb_issue_mask = 0;
1091 #endif
1092 
1093     QTAILQ_INIT(&s->ops);
1094     QTAILQ_INIT(&s->free_ops);
1095     QSIMPLEQ_INIT(&s->labels);
1096 }
1097 
1098 static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
1099 {
1100     int n = s->nb_temps++;
1101     tcg_debug_assert(n < TCG_MAX_TEMPS);
1102     return memset(&s->temps[n], 0, sizeof(TCGTemp));
1103 }
1104 
1105 static inline TCGTemp *tcg_global_alloc(TCGContext *s)
1106 {
1107     TCGTemp *ts;
1108 
1109     tcg_debug_assert(s->nb_globals == s->nb_temps);
1110     s->nb_globals++;
1111     ts = tcg_temp_alloc(s);
1112     ts->temp_global = 1;
1113 
1114     return ts;
1115 }
1116 
1117 static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
1118                                             TCGReg reg, const char *name)
1119 {
1120     TCGTemp *ts;
1121 
1122     if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
1123         tcg_abort();
1124     }
1125 
1126     ts = tcg_global_alloc(s);
1127     ts->base_type = type;
1128     ts->type = type;
1129     ts->fixed_reg = 1;
1130     ts->reg = reg;
1131     ts->name = name;
1132     tcg_regset_set_reg(s->reserved_regs, reg);
1133 
1134     return ts;
1135 }
1136 
1137 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
1138 {
1139     s->frame_start = start;
1140     s->frame_end = start + size;
1141     s->frame_temp
1142         = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
1143 }
1144 
1145 TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
1146                                      intptr_t offset, const char *name)
1147 {
1148     TCGContext *s = tcg_ctx;
1149     TCGTemp *base_ts = tcgv_ptr_temp(base);
1150     TCGTemp *ts = tcg_global_alloc(s);
1151     int indirect_reg = 0, bigendian = 0;
1152 #ifdef HOST_WORDS_BIGENDIAN
1153     bigendian = 1;
1154 #endif
1155 
1156     if (!base_ts->fixed_reg) {
1157         /* We do not support double-indirect registers.  */
1158         tcg_debug_assert(!base_ts->indirect_reg);
1159         base_ts->indirect_base = 1;
1160         s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
1161                             ? 2 : 1);
1162         indirect_reg = 1;
1163     }
1164 
1165     if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1166         TCGTemp *ts2 = tcg_global_alloc(s);
1167         char buf[64];
1168 
1169         ts->base_type = TCG_TYPE_I64;
1170         ts->type = TCG_TYPE_I32;
1171         ts->indirect_reg = indirect_reg;
1172         ts->mem_allocated = 1;
1173         ts->mem_base = base_ts;
1174         ts->mem_offset = offset + bigendian * 4;
1175         pstrcpy(buf, sizeof(buf), name);
1176         pstrcat(buf, sizeof(buf), "_0");
1177         ts->name = strdup(buf);
1178 
1179         tcg_debug_assert(ts2 == ts + 1);
1180         ts2->base_type = TCG_TYPE_I64;
1181         ts2->type = TCG_TYPE_I32;
1182         ts2->indirect_reg = indirect_reg;
1183         ts2->mem_allocated = 1;
1184         ts2->mem_base = base_ts;
1185         ts2->mem_offset = offset + (1 - bigendian) * 4;
1186         pstrcpy(buf, sizeof(buf), name);
1187         pstrcat(buf, sizeof(buf), "_1");
1188         ts2->name = strdup(buf);
1189     } else {
1190         ts->base_type = type;
1191         ts->type = type;
1192         ts->indirect_reg = indirect_reg;
1193         ts->mem_allocated = 1;
1194         ts->mem_base = base_ts;
1195         ts->mem_offset = offset;
1196         ts->name = name;
1197     }
1198     return ts;
1199 }
1200 
1201 TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
1202 {
1203     TCGContext *s = tcg_ctx;
1204     TCGTemp *ts;
1205     int idx, k;
1206 
1207     k = type + (temp_local ? TCG_TYPE_COUNT : 0);
1208     idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
1209     if (idx < TCG_MAX_TEMPS) {
1210         /* There is already an available temp with the right type.  */
1211         clear_bit(idx, s->free_temps[k].l);
1212 
1213         ts = &s->temps[idx];
1214         ts->temp_allocated = 1;
1215         tcg_debug_assert(ts->base_type == type);
1216         tcg_debug_assert(ts->temp_local == temp_local);
1217     } else {
1218         ts = tcg_temp_alloc(s);
1219         if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1220             TCGTemp *ts2 = tcg_temp_alloc(s);
1221 
1222             ts->base_type = type;
1223             ts->type = TCG_TYPE_I32;
1224             ts->temp_allocated = 1;
1225             ts->temp_local = temp_local;
1226 
1227             tcg_debug_assert(ts2 == ts + 1);
1228             ts2->base_type = TCG_TYPE_I64;
1229             ts2->type = TCG_TYPE_I32;
1230             ts2->temp_allocated = 1;
1231             ts2->temp_local = temp_local;
1232         } else {
1233             ts->base_type = type;
1234             ts->type = type;
1235             ts->temp_allocated = 1;
1236             ts->temp_local = temp_local;
1237         }
1238     }
1239 
1240 #if defined(CONFIG_DEBUG_TCG)
1241     s->temps_in_use++;
1242 #endif
1243     return ts;
1244 }
1245 
1246 TCGv_vec tcg_temp_new_vec(TCGType type)
1247 {
1248     TCGTemp *t;
1249 
1250 #ifdef CONFIG_DEBUG_TCG
1251     switch (type) {
1252     case TCG_TYPE_V64:
1253         assert(TCG_TARGET_HAS_v64);
1254         break;
1255     case TCG_TYPE_V128:
1256         assert(TCG_TARGET_HAS_v128);
1257         break;
1258     case TCG_TYPE_V256:
1259         assert(TCG_TARGET_HAS_v256);
1260         break;
1261     default:
1262         g_assert_not_reached();
1263     }
1264 #endif
1265 
1266     t = tcg_temp_new_internal(type, 0);
1267     return temp_tcgv_vec(t);
1268 }
1269 
1270 /* Create a new temp of the same type as an existing temp.  */
1271 TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
1272 {
1273     TCGTemp *t = tcgv_vec_temp(match);
1274 
1275     tcg_debug_assert(t->temp_allocated != 0);
1276 
1277     t = tcg_temp_new_internal(t->base_type, 0);
1278     return temp_tcgv_vec(t);
1279 }
1280 
1281 void tcg_temp_free_internal(TCGTemp *ts)
1282 {
1283     TCGContext *s = tcg_ctx;
1284     int k, idx;
1285 
1286 #if defined(CONFIG_DEBUG_TCG)
1287     s->temps_in_use--;
1288     if (s->temps_in_use < 0) {
1289         fprintf(stderr, "More temporaries freed than allocated!\n");
1290     }
1291 #endif
1292 
1293     tcg_debug_assert(ts->temp_global == 0);
1294     tcg_debug_assert(ts->temp_allocated != 0);
1295     ts->temp_allocated = 0;
1296 
1297     idx = temp_idx(ts);
1298     k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
1299     set_bit(idx, s->free_temps[k].l);
1300 }
1301 
1302 TCGv_i32 tcg_const_i32(int32_t val)
1303 {
1304     TCGv_i32 t0;
1305     t0 = tcg_temp_new_i32();
1306     tcg_gen_movi_i32(t0, val);
1307     return t0;
1308 }
1309 
1310 TCGv_i64 tcg_const_i64(int64_t val)
1311 {
1312     TCGv_i64 t0;
1313     t0 = tcg_temp_new_i64();
1314     tcg_gen_movi_i64(t0, val);
1315     return t0;
1316 }
1317 
1318 TCGv_i32 tcg_const_local_i32(int32_t val)
1319 {
1320     TCGv_i32 t0;
1321     t0 = tcg_temp_local_new_i32();
1322     tcg_gen_movi_i32(t0, val);
1323     return t0;
1324 }
1325 
1326 TCGv_i64 tcg_const_local_i64(int64_t val)
1327 {
1328     TCGv_i64 t0;
1329     t0 = tcg_temp_local_new_i64();
1330     tcg_gen_movi_i64(t0, val);
1331     return t0;
1332 }
1333 
1334 #if defined(CONFIG_DEBUG_TCG)
1335 void tcg_clear_temp_count(void)
1336 {
1337     TCGContext *s = tcg_ctx;
1338     s->temps_in_use = 0;
1339 }
1340 
1341 int tcg_check_temp_count(void)
1342 {
1343     TCGContext *s = tcg_ctx;
1344     if (s->temps_in_use) {
1345         /* Clear the count so that we don't give another
1346          * warning immediately next time around.
1347          */
1348         s->temps_in_use = 0;
1349         return 1;
1350     }
1351     return 0;
1352 }
1353 #endif
1354 
1355 /* Return true if OP may appear in the opcode stream.
1356    Test the runtime variable that controls each opcode.  */
1357 bool tcg_op_supported(TCGOpcode op)
1358 {
1359     const bool have_vec
1360         = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1361 
1362     switch (op) {
1363     case INDEX_op_discard:
1364     case INDEX_op_set_label:
1365     case INDEX_op_call:
1366     case INDEX_op_br:
1367     case INDEX_op_mb:
1368     case INDEX_op_insn_start:
1369     case INDEX_op_exit_tb:
1370     case INDEX_op_goto_tb:
1371     case INDEX_op_qemu_ld_i32:
1372     case INDEX_op_qemu_st_i32:
1373     case INDEX_op_qemu_ld_i64:
1374     case INDEX_op_qemu_st_i64:
1375         return true;
1376 
1377     case INDEX_op_goto_ptr:
1378         return TCG_TARGET_HAS_goto_ptr;
1379 
1380     case INDEX_op_mov_i32:
1381     case INDEX_op_movi_i32:
1382     case INDEX_op_setcond_i32:
1383     case INDEX_op_brcond_i32:
1384     case INDEX_op_ld8u_i32:
1385     case INDEX_op_ld8s_i32:
1386     case INDEX_op_ld16u_i32:
1387     case INDEX_op_ld16s_i32:
1388     case INDEX_op_ld_i32:
1389     case INDEX_op_st8_i32:
1390     case INDEX_op_st16_i32:
1391     case INDEX_op_st_i32:
1392     case INDEX_op_add_i32:
1393     case INDEX_op_sub_i32:
1394     case INDEX_op_mul_i32:
1395     case INDEX_op_and_i32:
1396     case INDEX_op_or_i32:
1397     case INDEX_op_xor_i32:
1398     case INDEX_op_shl_i32:
1399     case INDEX_op_shr_i32:
1400     case INDEX_op_sar_i32:
1401         return true;
1402 
1403     case INDEX_op_movcond_i32:
1404         return TCG_TARGET_HAS_movcond_i32;
1405     case INDEX_op_div_i32:
1406     case INDEX_op_divu_i32:
1407         return TCG_TARGET_HAS_div_i32;
1408     case INDEX_op_rem_i32:
1409     case INDEX_op_remu_i32:
1410         return TCG_TARGET_HAS_rem_i32;
1411     case INDEX_op_div2_i32:
1412     case INDEX_op_divu2_i32:
1413         return TCG_TARGET_HAS_div2_i32;
1414     case INDEX_op_rotl_i32:
1415     case INDEX_op_rotr_i32:
1416         return TCG_TARGET_HAS_rot_i32;
1417     case INDEX_op_deposit_i32:
1418         return TCG_TARGET_HAS_deposit_i32;
1419     case INDEX_op_extract_i32:
1420         return TCG_TARGET_HAS_extract_i32;
1421     case INDEX_op_sextract_i32:
1422         return TCG_TARGET_HAS_sextract_i32;
1423     case INDEX_op_extract2_i32:
1424         return TCG_TARGET_HAS_extract2_i32;
1425     case INDEX_op_add2_i32:
1426         return TCG_TARGET_HAS_add2_i32;
1427     case INDEX_op_sub2_i32:
1428         return TCG_TARGET_HAS_sub2_i32;
1429     case INDEX_op_mulu2_i32:
1430         return TCG_TARGET_HAS_mulu2_i32;
1431     case INDEX_op_muls2_i32:
1432         return TCG_TARGET_HAS_muls2_i32;
1433     case INDEX_op_muluh_i32:
1434         return TCG_TARGET_HAS_muluh_i32;
1435     case INDEX_op_mulsh_i32:
1436         return TCG_TARGET_HAS_mulsh_i32;
1437     case INDEX_op_ext8s_i32:
1438         return TCG_TARGET_HAS_ext8s_i32;
1439     case INDEX_op_ext16s_i32:
1440         return TCG_TARGET_HAS_ext16s_i32;
1441     case INDEX_op_ext8u_i32:
1442         return TCG_TARGET_HAS_ext8u_i32;
1443     case INDEX_op_ext16u_i32:
1444         return TCG_TARGET_HAS_ext16u_i32;
1445     case INDEX_op_bswap16_i32:
1446         return TCG_TARGET_HAS_bswap16_i32;
1447     case INDEX_op_bswap32_i32:
1448         return TCG_TARGET_HAS_bswap32_i32;
1449     case INDEX_op_not_i32:
1450         return TCG_TARGET_HAS_not_i32;
1451     case INDEX_op_neg_i32:
1452         return TCG_TARGET_HAS_neg_i32;
1453     case INDEX_op_andc_i32:
1454         return TCG_TARGET_HAS_andc_i32;
1455     case INDEX_op_orc_i32:
1456         return TCG_TARGET_HAS_orc_i32;
1457     case INDEX_op_eqv_i32:
1458         return TCG_TARGET_HAS_eqv_i32;
1459     case INDEX_op_nand_i32:
1460         return TCG_TARGET_HAS_nand_i32;
1461     case INDEX_op_nor_i32:
1462         return TCG_TARGET_HAS_nor_i32;
1463     case INDEX_op_clz_i32:
1464         return TCG_TARGET_HAS_clz_i32;
1465     case INDEX_op_ctz_i32:
1466         return TCG_TARGET_HAS_ctz_i32;
1467     case INDEX_op_ctpop_i32:
1468         return TCG_TARGET_HAS_ctpop_i32;
1469 
1470     case INDEX_op_brcond2_i32:
1471     case INDEX_op_setcond2_i32:
1472         return TCG_TARGET_REG_BITS == 32;
1473 
1474     case INDEX_op_mov_i64:
1475     case INDEX_op_movi_i64:
1476     case INDEX_op_setcond_i64:
1477     case INDEX_op_brcond_i64:
1478     case INDEX_op_ld8u_i64:
1479     case INDEX_op_ld8s_i64:
1480     case INDEX_op_ld16u_i64:
1481     case INDEX_op_ld16s_i64:
1482     case INDEX_op_ld32u_i64:
1483     case INDEX_op_ld32s_i64:
1484     case INDEX_op_ld_i64:
1485     case INDEX_op_st8_i64:
1486     case INDEX_op_st16_i64:
1487     case INDEX_op_st32_i64:
1488     case INDEX_op_st_i64:
1489     case INDEX_op_add_i64:
1490     case INDEX_op_sub_i64:
1491     case INDEX_op_mul_i64:
1492     case INDEX_op_and_i64:
1493     case INDEX_op_or_i64:
1494     case INDEX_op_xor_i64:
1495     case INDEX_op_shl_i64:
1496     case INDEX_op_shr_i64:
1497     case INDEX_op_sar_i64:
1498     case INDEX_op_ext_i32_i64:
1499     case INDEX_op_extu_i32_i64:
1500         return TCG_TARGET_REG_BITS == 64;
1501 
1502     case INDEX_op_movcond_i64:
1503         return TCG_TARGET_HAS_movcond_i64;
1504     case INDEX_op_div_i64:
1505     case INDEX_op_divu_i64:
1506         return TCG_TARGET_HAS_div_i64;
1507     case INDEX_op_rem_i64:
1508     case INDEX_op_remu_i64:
1509         return TCG_TARGET_HAS_rem_i64;
1510     case INDEX_op_div2_i64:
1511     case INDEX_op_divu2_i64:
1512         return TCG_TARGET_HAS_div2_i64;
1513     case INDEX_op_rotl_i64:
1514     case INDEX_op_rotr_i64:
1515         return TCG_TARGET_HAS_rot_i64;
1516     case INDEX_op_deposit_i64:
1517         return TCG_TARGET_HAS_deposit_i64;
1518     case INDEX_op_extract_i64:
1519         return TCG_TARGET_HAS_extract_i64;
1520     case INDEX_op_sextract_i64:
1521         return TCG_TARGET_HAS_sextract_i64;
1522     case INDEX_op_extract2_i64:
1523         return TCG_TARGET_HAS_extract2_i64;
1524     case INDEX_op_extrl_i64_i32:
1525         return TCG_TARGET_HAS_extrl_i64_i32;
1526     case INDEX_op_extrh_i64_i32:
1527         return TCG_TARGET_HAS_extrh_i64_i32;
1528     case INDEX_op_ext8s_i64:
1529         return TCG_TARGET_HAS_ext8s_i64;
1530     case INDEX_op_ext16s_i64:
1531         return TCG_TARGET_HAS_ext16s_i64;
1532     case INDEX_op_ext32s_i64:
1533         return TCG_TARGET_HAS_ext32s_i64;
1534     case INDEX_op_ext8u_i64:
1535         return TCG_TARGET_HAS_ext8u_i64;
1536     case INDEX_op_ext16u_i64:
1537         return TCG_TARGET_HAS_ext16u_i64;
1538     case INDEX_op_ext32u_i64:
1539         return TCG_TARGET_HAS_ext32u_i64;
1540     case INDEX_op_bswap16_i64:
1541         return TCG_TARGET_HAS_bswap16_i64;
1542     case INDEX_op_bswap32_i64:
1543         return TCG_TARGET_HAS_bswap32_i64;
1544     case INDEX_op_bswap64_i64:
1545         return TCG_TARGET_HAS_bswap64_i64;
1546     case INDEX_op_not_i64:
1547         return TCG_TARGET_HAS_not_i64;
1548     case INDEX_op_neg_i64:
1549         return TCG_TARGET_HAS_neg_i64;
1550     case INDEX_op_andc_i64:
1551         return TCG_TARGET_HAS_andc_i64;
1552     case INDEX_op_orc_i64:
1553         return TCG_TARGET_HAS_orc_i64;
1554     case INDEX_op_eqv_i64:
1555         return TCG_TARGET_HAS_eqv_i64;
1556     case INDEX_op_nand_i64:
1557         return TCG_TARGET_HAS_nand_i64;
1558     case INDEX_op_nor_i64:
1559         return TCG_TARGET_HAS_nor_i64;
1560     case INDEX_op_clz_i64:
1561         return TCG_TARGET_HAS_clz_i64;
1562     case INDEX_op_ctz_i64:
1563         return TCG_TARGET_HAS_ctz_i64;
1564     case INDEX_op_ctpop_i64:
1565         return TCG_TARGET_HAS_ctpop_i64;
1566     case INDEX_op_add2_i64:
1567         return TCG_TARGET_HAS_add2_i64;
1568     case INDEX_op_sub2_i64:
1569         return TCG_TARGET_HAS_sub2_i64;
1570     case INDEX_op_mulu2_i64:
1571         return TCG_TARGET_HAS_mulu2_i64;
1572     case INDEX_op_muls2_i64:
1573         return TCG_TARGET_HAS_muls2_i64;
1574     case INDEX_op_muluh_i64:
1575         return TCG_TARGET_HAS_muluh_i64;
1576     case INDEX_op_mulsh_i64:
1577         return TCG_TARGET_HAS_mulsh_i64;
1578 
1579     case INDEX_op_mov_vec:
1580     case INDEX_op_dup_vec:
1581     case INDEX_op_dupi_vec:
1582     case INDEX_op_ld_vec:
1583     case INDEX_op_st_vec:
1584     case INDEX_op_add_vec:
1585     case INDEX_op_sub_vec:
1586     case INDEX_op_and_vec:
1587     case INDEX_op_or_vec:
1588     case INDEX_op_xor_vec:
1589     case INDEX_op_cmp_vec:
1590         return have_vec;
1591     case INDEX_op_dup2_vec:
1592         return have_vec && TCG_TARGET_REG_BITS == 32;
1593     case INDEX_op_not_vec:
1594         return have_vec && TCG_TARGET_HAS_not_vec;
1595     case INDEX_op_neg_vec:
1596         return have_vec && TCG_TARGET_HAS_neg_vec;
1597     case INDEX_op_andc_vec:
1598         return have_vec && TCG_TARGET_HAS_andc_vec;
1599     case INDEX_op_orc_vec:
1600         return have_vec && TCG_TARGET_HAS_orc_vec;
1601     case INDEX_op_mul_vec:
1602         return have_vec && TCG_TARGET_HAS_mul_vec;
1603     case INDEX_op_shli_vec:
1604     case INDEX_op_shri_vec:
1605     case INDEX_op_sari_vec:
1606         return have_vec && TCG_TARGET_HAS_shi_vec;
1607     case INDEX_op_shls_vec:
1608     case INDEX_op_shrs_vec:
1609     case INDEX_op_sars_vec:
1610         return have_vec && TCG_TARGET_HAS_shs_vec;
1611     case INDEX_op_shlv_vec:
1612     case INDEX_op_shrv_vec:
1613     case INDEX_op_sarv_vec:
1614         return have_vec && TCG_TARGET_HAS_shv_vec;
1615     case INDEX_op_ssadd_vec:
1616     case INDEX_op_usadd_vec:
1617     case INDEX_op_sssub_vec:
1618     case INDEX_op_ussub_vec:
1619         return have_vec && TCG_TARGET_HAS_sat_vec;
1620     case INDEX_op_smin_vec:
1621     case INDEX_op_umin_vec:
1622     case INDEX_op_smax_vec:
1623     case INDEX_op_umax_vec:
1624         return have_vec && TCG_TARGET_HAS_minmax_vec;
1625 
1626     default:
1627         tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
1628         return true;
1629     }
1630 }
1631 
1632 /* Note: we convert the 64 bit args to 32 bit and do some alignment
1633    and endian swap. Maybe it would be better to do the alignment
1634    and endian swap in tcg_reg_alloc_call(). */
1635 void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
1636 {
1637     int i, real_args, nb_rets, pi;
1638     unsigned sizemask, flags;
1639     TCGHelperInfo *info;
1640     TCGOp *op;
1641 
1642     info = g_hash_table_lookup(helper_table, (gpointer)func);
1643     flags = info->flags;
1644     sizemask = info->sizemask;
1645 
1646 #if defined(__sparc__) && !defined(__arch64__) \
1647     && !defined(CONFIG_TCG_INTERPRETER)
1648     /* We have 64-bit values in one register, but need to pass as two
1649        separate parameters.  Split them.  */
1650     int orig_sizemask = sizemask;
1651     int orig_nargs = nargs;
1652     TCGv_i64 retl, reth;
1653     TCGTemp *split_args[MAX_OPC_PARAM];
1654 
1655     retl = NULL;
1656     reth = NULL;
1657     if (sizemask != 0) {
1658         for (i = real_args = 0; i < nargs; ++i) {
1659             int is_64bit = sizemask & (1 << (i+1)*2);
1660             if (is_64bit) {
1661                 TCGv_i64 orig = temp_tcgv_i64(args[i]);
1662                 TCGv_i32 h = tcg_temp_new_i32();
1663                 TCGv_i32 l = tcg_temp_new_i32();
1664                 tcg_gen_extr_i64_i32(l, h, orig);
1665                 split_args[real_args++] = tcgv_i32_temp(h);
1666                 split_args[real_args++] = tcgv_i32_temp(l);
1667             } else {
1668                 split_args[real_args++] = args[i];
1669             }
1670         }
1671         nargs = real_args;
1672         args = split_args;
1673         sizemask = 0;
1674     }
1675 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1676     for (i = 0; i < nargs; ++i) {
1677         int is_64bit = sizemask & (1 << (i+1)*2);
1678         int is_signed = sizemask & (2 << (i+1)*2);
1679         if (!is_64bit) {
1680             TCGv_i64 temp = tcg_temp_new_i64();
1681             TCGv_i64 orig = temp_tcgv_i64(args[i]);
1682             if (is_signed) {
1683                 tcg_gen_ext32s_i64(temp, orig);
1684             } else {
1685                 tcg_gen_ext32u_i64(temp, orig);
1686             }
1687             args[i] = tcgv_i64_temp(temp);
1688         }
1689     }
1690 #endif /* TCG_TARGET_EXTEND_ARGS */
1691 
1692     op = tcg_emit_op(INDEX_op_call);
1693 
1694     pi = 0;
1695     if (ret != NULL) {
1696 #if defined(__sparc__) && !defined(__arch64__) \
1697     && !defined(CONFIG_TCG_INTERPRETER)
1698         if (orig_sizemask & 1) {
1699             /* The 32-bit ABI is going to return the 64-bit value in
1700                the %o0/%o1 register pair.  Prepare for this by using
1701                two return temporaries, and reassemble below.  */
1702             retl = tcg_temp_new_i64();
1703             reth = tcg_temp_new_i64();
1704             op->args[pi++] = tcgv_i64_arg(reth);
1705             op->args[pi++] = tcgv_i64_arg(retl);
1706             nb_rets = 2;
1707         } else {
1708             op->args[pi++] = temp_arg(ret);
1709             nb_rets = 1;
1710         }
1711 #else
1712         if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
1713 #ifdef HOST_WORDS_BIGENDIAN
1714             op->args[pi++] = temp_arg(ret + 1);
1715             op->args[pi++] = temp_arg(ret);
1716 #else
1717             op->args[pi++] = temp_arg(ret);
1718             op->args[pi++] = temp_arg(ret + 1);
1719 #endif
1720             nb_rets = 2;
1721         } else {
1722             op->args[pi++] = temp_arg(ret);
1723             nb_rets = 1;
1724         }
1725 #endif
1726     } else {
1727         nb_rets = 0;
1728     }
1729     TCGOP_CALLO(op) = nb_rets;
1730 
1731     real_args = 0;
1732     for (i = 0; i < nargs; i++) {
1733         int is_64bit = sizemask & (1 << (i+1)*2);
1734         if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
1735 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
1736             /* some targets want aligned 64 bit args */
1737             if (real_args & 1) {
1738                 op->args[pi++] = TCG_CALL_DUMMY_ARG;
1739                 real_args++;
1740             }
1741 #endif
1742            /* If stack grows up, then we will be placing successive
1743               arguments at lower addresses, which means we need to
1744               reverse the order compared to how we would normally
1745               treat either big or little-endian.  For those arguments
1746               that will wind up in registers, this still works for
1747               HPPA (the only current STACK_GROWSUP target) since the
1748               argument registers are *also* allocated in decreasing
1749               order.  If another such target is added, this logic may
1750               have to get more complicated to differentiate between
1751               stack arguments and register arguments.  */
1752 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
1753             op->args[pi++] = temp_arg(args[i] + 1);
1754             op->args[pi++] = temp_arg(args[i]);
1755 #else
1756             op->args[pi++] = temp_arg(args[i]);
1757             op->args[pi++] = temp_arg(args[i] + 1);
1758 #endif
1759             real_args += 2;
1760             continue;
1761         }
1762 
1763         op->args[pi++] = temp_arg(args[i]);
1764         real_args++;
1765     }
1766     op->args[pi++] = (uintptr_t)func;
1767     op->args[pi++] = flags;
1768     TCGOP_CALLI(op) = real_args;
1769 
1770     /* Make sure the fields didn't overflow.  */
1771     tcg_debug_assert(TCGOP_CALLI(op) == real_args);
1772     tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
1773 
1774 #if defined(__sparc__) && !defined(__arch64__) \
1775     && !defined(CONFIG_TCG_INTERPRETER)
1776     /* Free all of the parts we allocated above.  */
1777     for (i = real_args = 0; i < orig_nargs; ++i) {
1778         int is_64bit = orig_sizemask & (1 << (i+1)*2);
1779         if (is_64bit) {
1780             tcg_temp_free_internal(args[real_args++]);
1781             tcg_temp_free_internal(args[real_args++]);
1782         } else {
1783             real_args++;
1784         }
1785     }
1786     if (orig_sizemask & 1) {
1787         /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
1788            Note that describing these as TCGv_i64 eliminates an unnecessary
1789            zero-extension that tcg_gen_concat_i32_i64 would create.  */
1790         tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
1791         tcg_temp_free_i64(retl);
1792         tcg_temp_free_i64(reth);
1793     }
1794 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
1795     for (i = 0; i < nargs; ++i) {
1796         int is_64bit = sizemask & (1 << (i+1)*2);
1797         if (!is_64bit) {
1798             tcg_temp_free_internal(args[i]);
1799         }
1800     }
1801 #endif /* TCG_TARGET_EXTEND_ARGS */
1802 }
1803 
1804 static void tcg_reg_alloc_start(TCGContext *s)
1805 {
1806     int i, n;
1807     TCGTemp *ts;
1808 
1809     for (i = 0, n = s->nb_globals; i < n; i++) {
1810         ts = &s->temps[i];
1811         ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM);
1812     }
1813     for (n = s->nb_temps; i < n; i++) {
1814         ts = &s->temps[i];
1815         ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
1816         ts->mem_allocated = 0;
1817         ts->fixed_reg = 0;
1818     }
1819 
1820     memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
1821 }
1822 
1823 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1824                                  TCGTemp *ts)
1825 {
1826     int idx = temp_idx(ts);
1827 
1828     if (ts->temp_global) {
1829         pstrcpy(buf, buf_size, ts->name);
1830     } else if (ts->temp_local) {
1831         snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
1832     } else {
1833         snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
1834     }
1835     return buf;
1836 }
1837 
1838 static char *tcg_get_arg_str(TCGContext *s, char *buf,
1839                              int buf_size, TCGArg arg)
1840 {
1841     return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
1842 }
1843 
1844 /* Find helper name.  */
1845 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
1846 {
1847     const char *ret = NULL;
1848     if (helper_table) {
1849         TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val);
1850         if (info) {
1851             ret = info->name;
1852         }
1853     }
1854     return ret;
1855 }
1856 
1857 static const char * const cond_name[] =
1858 {
1859     [TCG_COND_NEVER] = "never",
1860     [TCG_COND_ALWAYS] = "always",
1861     [TCG_COND_EQ] = "eq",
1862     [TCG_COND_NE] = "ne",
1863     [TCG_COND_LT] = "lt",
1864     [TCG_COND_GE] = "ge",
1865     [TCG_COND_LE] = "le",
1866     [TCG_COND_GT] = "gt",
1867     [TCG_COND_LTU] = "ltu",
1868     [TCG_COND_GEU] = "geu",
1869     [TCG_COND_LEU] = "leu",
1870     [TCG_COND_GTU] = "gtu"
1871 };
1872 
1873 static const char * const ldst_name[] =
1874 {
1875     [MO_UB]   = "ub",
1876     [MO_SB]   = "sb",
1877     [MO_LEUW] = "leuw",
1878     [MO_LESW] = "lesw",
1879     [MO_LEUL] = "leul",
1880     [MO_LESL] = "lesl",
1881     [MO_LEQ]  = "leq",
1882     [MO_BEUW] = "beuw",
1883     [MO_BESW] = "besw",
1884     [MO_BEUL] = "beul",
1885     [MO_BESL] = "besl",
1886     [MO_BEQ]  = "beq",
1887 };
1888 
1889 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1890 #ifdef ALIGNED_ONLY
1891     [MO_UNALN >> MO_ASHIFT]    = "un+",
1892     [MO_ALIGN >> MO_ASHIFT]    = "",
1893 #else
1894     [MO_UNALN >> MO_ASHIFT]    = "",
1895     [MO_ALIGN >> MO_ASHIFT]    = "al+",
1896 #endif
1897     [MO_ALIGN_2 >> MO_ASHIFT]  = "al2+",
1898     [MO_ALIGN_4 >> MO_ASHIFT]  = "al4+",
1899     [MO_ALIGN_8 >> MO_ASHIFT]  = "al8+",
1900     [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1901     [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1902     [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1903 };
1904 
1905 static inline bool tcg_regset_single(TCGRegSet d)
1906 {
1907     return (d & (d - 1)) == 0;
1908 }
1909 
1910 static inline TCGReg tcg_regset_first(TCGRegSet d)
1911 {
1912     if (TCG_TARGET_NB_REGS <= 32) {
1913         return ctz32(d);
1914     } else {
1915         return ctz64(d);
1916     }
1917 }
1918 
1919 static void tcg_dump_ops(TCGContext *s, bool have_prefs)
1920 {
1921     char buf[128];
1922     TCGOp *op;
1923 
1924     QTAILQ_FOREACH(op, &s->ops, link) {
1925         int i, k, nb_oargs, nb_iargs, nb_cargs;
1926         const TCGOpDef *def;
1927         TCGOpcode c;
1928         int col = 0;
1929 
1930         c = op->opc;
1931         def = &tcg_op_defs[c];
1932 
1933         if (c == INDEX_op_insn_start) {
1934             nb_oargs = 0;
1935             col += qemu_log("\n ----");
1936 
1937             for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1938                 target_ulong a;
1939 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1940                 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
1941 #else
1942                 a = op->args[i];
1943 #endif
1944                 col += qemu_log(" " TARGET_FMT_lx, a);
1945             }
1946         } else if (c == INDEX_op_call) {
1947             /* variable number of arguments */
1948             nb_oargs = TCGOP_CALLO(op);
1949             nb_iargs = TCGOP_CALLI(op);
1950             nb_cargs = def->nb_cargs;
1951 
1952             /* function name, flags, out args */
1953             col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1954                             tcg_find_helper(s, op->args[nb_oargs + nb_iargs]),
1955                             op->args[nb_oargs + nb_iargs + 1], nb_oargs);
1956             for (i = 0; i < nb_oargs; i++) {
1957                 col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf),
1958                                                        op->args[i]));
1959             }
1960             for (i = 0; i < nb_iargs; i++) {
1961                 TCGArg arg = op->args[nb_oargs + i];
1962                 const char *t = "<dummy>";
1963                 if (arg != TCG_CALL_DUMMY_ARG) {
1964                     t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
1965                 }
1966                 col += qemu_log(",%s", t);
1967             }
1968         } else {
1969             col += qemu_log(" %s ", def->name);
1970 
1971             nb_oargs = def->nb_oargs;
1972             nb_iargs = def->nb_iargs;
1973             nb_cargs = def->nb_cargs;
1974 
1975             if (def->flags & TCG_OPF_VECTOR) {
1976                 col += qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op),
1977                                 8 << TCGOP_VECE(op));
1978             }
1979 
1980             k = 0;
1981             for (i = 0; i < nb_oargs; i++) {
1982                 if (k != 0) {
1983                     col += qemu_log(",");
1984                 }
1985                 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1986                                                       op->args[k++]));
1987             }
1988             for (i = 0; i < nb_iargs; i++) {
1989                 if (k != 0) {
1990                     col += qemu_log(",");
1991                 }
1992                 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
1993                                                       op->args[k++]));
1994             }
1995             switch (c) {
1996             case INDEX_op_brcond_i32:
1997             case INDEX_op_setcond_i32:
1998             case INDEX_op_movcond_i32:
1999             case INDEX_op_brcond2_i32:
2000             case INDEX_op_setcond2_i32:
2001             case INDEX_op_brcond_i64:
2002             case INDEX_op_setcond_i64:
2003             case INDEX_op_movcond_i64:
2004             case INDEX_op_cmp_vec:
2005                 if (op->args[k] < ARRAY_SIZE(cond_name)
2006                     && cond_name[op->args[k]]) {
2007                     col += qemu_log(",%s", cond_name[op->args[k++]]);
2008                 } else {
2009                     col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]);
2010                 }
2011                 i = 1;
2012                 break;
2013             case INDEX_op_qemu_ld_i32:
2014             case INDEX_op_qemu_st_i32:
2015             case INDEX_op_qemu_ld_i64:
2016             case INDEX_op_qemu_st_i64:
2017                 {
2018                     TCGMemOpIdx oi = op->args[k++];
2019                     TCGMemOp op = get_memop(oi);
2020                     unsigned ix = get_mmuidx(oi);
2021 
2022                     if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
2023                         col += qemu_log(",$0x%x,%u", op, ix);
2024                     } else {
2025                         const char *s_al, *s_op;
2026                         s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
2027                         s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
2028                         col += qemu_log(",%s%s,%u", s_al, s_op, ix);
2029                     }
2030                     i = 1;
2031                 }
2032                 break;
2033             default:
2034                 i = 0;
2035                 break;
2036             }
2037             switch (c) {
2038             case INDEX_op_set_label:
2039             case INDEX_op_br:
2040             case INDEX_op_brcond_i32:
2041             case INDEX_op_brcond_i64:
2042             case INDEX_op_brcond2_i32:
2043                 col += qemu_log("%s$L%d", k ? "," : "",
2044                                 arg_label(op->args[k])->id);
2045                 i++, k++;
2046                 break;
2047             default:
2048                 break;
2049             }
2050             for (; i < nb_cargs; i++, k++) {
2051                 col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]);
2052             }
2053         }
2054 
2055         if (have_prefs || op->life) {
2056             for (; col < 40; ++col) {
2057                 putc(' ', qemu_logfile);
2058             }
2059         }
2060 
2061         if (op->life) {
2062             unsigned life = op->life;
2063 
2064             if (life & (SYNC_ARG * 3)) {
2065                 qemu_log("  sync:");
2066                 for (i = 0; i < 2; ++i) {
2067                     if (life & (SYNC_ARG << i)) {
2068                         qemu_log(" %d", i);
2069                     }
2070                 }
2071             }
2072             life /= DEAD_ARG;
2073             if (life) {
2074                 qemu_log("  dead:");
2075                 for (i = 0; life; ++i, life >>= 1) {
2076                     if (life & 1) {
2077                         qemu_log(" %d", i);
2078                     }
2079                 }
2080             }
2081         }
2082 
2083         if (have_prefs) {
2084             for (i = 0; i < nb_oargs; ++i) {
2085                 TCGRegSet set = op->output_pref[i];
2086 
2087                 if (i == 0) {
2088                     qemu_log("  pref=");
2089                 } else {
2090                     qemu_log(",");
2091                 }
2092                 if (set == 0) {
2093                     qemu_log("none");
2094                 } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
2095                     qemu_log("all");
2096 #ifdef CONFIG_DEBUG_TCG
2097                 } else if (tcg_regset_single(set)) {
2098                     TCGReg reg = tcg_regset_first(set);
2099                     qemu_log("%s", tcg_target_reg_names[reg]);
2100 #endif
2101                 } else if (TCG_TARGET_NB_REGS <= 32) {
2102                     qemu_log("%#x", (uint32_t)set);
2103                 } else {
2104                     qemu_log("%#" PRIx64, (uint64_t)set);
2105                 }
2106             }
2107         }
2108 
2109         qemu_log("\n");
2110     }
2111 }
2112 
2113 /* we give more priority to constraints with less registers */
2114 static int get_constraint_priority(const TCGOpDef *def, int k)
2115 {
2116     const TCGArgConstraint *arg_ct;
2117 
2118     int i, n;
2119     arg_ct = &def->args_ct[k];
2120     if (arg_ct->ct & TCG_CT_ALIAS) {
2121         /* an alias is equivalent to a single register */
2122         n = 1;
2123     } else {
2124         if (!(arg_ct->ct & TCG_CT_REG))
2125             return 0;
2126         n = 0;
2127         for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
2128             if (tcg_regset_test_reg(arg_ct->u.regs, i))
2129                 n++;
2130         }
2131     }
2132     return TCG_TARGET_NB_REGS - n + 1;
2133 }
2134 
2135 /* sort from highest priority to lowest */
2136 static void sort_constraints(TCGOpDef *def, int start, int n)
2137 {
2138     int i, j, p1, p2, tmp;
2139 
2140     for(i = 0; i < n; i++)
2141         def->sorted_args[start + i] = start + i;
2142     if (n <= 1)
2143         return;
2144     for(i = 0; i < n - 1; i++) {
2145         for(j = i + 1; j < n; j++) {
2146             p1 = get_constraint_priority(def, def->sorted_args[start + i]);
2147             p2 = get_constraint_priority(def, def->sorted_args[start + j]);
2148             if (p1 < p2) {
2149                 tmp = def->sorted_args[start + i];
2150                 def->sorted_args[start + i] = def->sorted_args[start + j];
2151                 def->sorted_args[start + j] = tmp;
2152             }
2153         }
2154     }
2155 }
2156 
2157 static void process_op_defs(TCGContext *s)
2158 {
2159     TCGOpcode op;
2160 
2161     for (op = 0; op < NB_OPS; op++) {
2162         TCGOpDef *def = &tcg_op_defs[op];
2163         const TCGTargetOpDef *tdefs;
2164         TCGType type;
2165         int i, nb_args;
2166 
2167         if (def->flags & TCG_OPF_NOT_PRESENT) {
2168             continue;
2169         }
2170 
2171         nb_args = def->nb_iargs + def->nb_oargs;
2172         if (nb_args == 0) {
2173             continue;
2174         }
2175 
2176         tdefs = tcg_target_op_def(op);
2177         /* Missing TCGTargetOpDef entry. */
2178         tcg_debug_assert(tdefs != NULL);
2179 
2180         type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
2181         for (i = 0; i < nb_args; i++) {
2182             const char *ct_str = tdefs->args_ct_str[i];
2183             /* Incomplete TCGTargetOpDef entry. */
2184             tcg_debug_assert(ct_str != NULL);
2185 
2186             def->args_ct[i].u.regs = 0;
2187             def->args_ct[i].ct = 0;
2188             while (*ct_str != '\0') {
2189                 switch(*ct_str) {
2190                 case '0' ... '9':
2191                     {
2192                         int oarg = *ct_str - '0';
2193                         tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
2194                         tcg_debug_assert(oarg < def->nb_oargs);
2195                         tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
2196                         /* TCG_CT_ALIAS is for the output arguments.
2197                            The input is tagged with TCG_CT_IALIAS. */
2198                         def->args_ct[i] = def->args_ct[oarg];
2199                         def->args_ct[oarg].ct |= TCG_CT_ALIAS;
2200                         def->args_ct[oarg].alias_index = i;
2201                         def->args_ct[i].ct |= TCG_CT_IALIAS;
2202                         def->args_ct[i].alias_index = oarg;
2203                     }
2204                     ct_str++;
2205                     break;
2206                 case '&':
2207                     def->args_ct[i].ct |= TCG_CT_NEWREG;
2208                     ct_str++;
2209                     break;
2210                 case 'i':
2211                     def->args_ct[i].ct |= TCG_CT_CONST;
2212                     ct_str++;
2213                     break;
2214                 default:
2215                     ct_str = target_parse_constraint(&def->args_ct[i],
2216                                                      ct_str, type);
2217                     /* Typo in TCGTargetOpDef constraint. */
2218                     tcg_debug_assert(ct_str != NULL);
2219                 }
2220             }
2221         }
2222 
2223         /* TCGTargetOpDef entry with too much information? */
2224         tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
2225 
2226         /* sort the constraints (XXX: this is just an heuristic) */
2227         sort_constraints(def, 0, def->nb_oargs);
2228         sort_constraints(def, def->nb_oargs, def->nb_iargs);
2229     }
2230 }
2231 
2232 void tcg_op_remove(TCGContext *s, TCGOp *op)
2233 {
2234     TCGLabel *label;
2235 
2236     switch (op->opc) {
2237     case INDEX_op_br:
2238         label = arg_label(op->args[0]);
2239         label->refs--;
2240         break;
2241     case INDEX_op_brcond_i32:
2242     case INDEX_op_brcond_i64:
2243         label = arg_label(op->args[3]);
2244         label->refs--;
2245         break;
2246     case INDEX_op_brcond2_i32:
2247         label = arg_label(op->args[5]);
2248         label->refs--;
2249         break;
2250     default:
2251         break;
2252     }
2253 
2254     QTAILQ_REMOVE(&s->ops, op, link);
2255     QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
2256     s->nb_ops--;
2257 
2258 #ifdef CONFIG_PROFILER
2259     atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
2260 #endif
2261 }
2262 
2263 static TCGOp *tcg_op_alloc(TCGOpcode opc)
2264 {
2265     TCGContext *s = tcg_ctx;
2266     TCGOp *op;
2267 
2268     if (likely(QTAILQ_EMPTY(&s->free_ops))) {
2269         op = tcg_malloc(sizeof(TCGOp));
2270     } else {
2271         op = QTAILQ_FIRST(&s->free_ops);
2272         QTAILQ_REMOVE(&s->free_ops, op, link);
2273     }
2274     memset(op, 0, offsetof(TCGOp, link));
2275     op->opc = opc;
2276     s->nb_ops++;
2277 
2278     return op;
2279 }
2280 
2281 TCGOp *tcg_emit_op(TCGOpcode opc)
2282 {
2283     TCGOp *op = tcg_op_alloc(opc);
2284     QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2285     return op;
2286 }
2287 
2288 TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, TCGOpcode opc)
2289 {
2290     TCGOp *new_op = tcg_op_alloc(opc);
2291     QTAILQ_INSERT_BEFORE(old_op, new_op, link);
2292     return new_op;
2293 }
2294 
2295 TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, TCGOpcode opc)
2296 {
2297     TCGOp *new_op = tcg_op_alloc(opc);
2298     QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
2299     return new_op;
2300 }
2301 
2302 /* Reachable analysis : remove unreachable code.  */
2303 static void reachable_code_pass(TCGContext *s)
2304 {
2305     TCGOp *op, *op_next;
2306     bool dead = false;
2307 
2308     QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2309         bool remove = dead;
2310         TCGLabel *label;
2311         int call_flags;
2312 
2313         switch (op->opc) {
2314         case INDEX_op_set_label:
2315             label = arg_label(op->args[0]);
2316             if (label->refs == 0) {
2317                 /*
2318                  * While there is an occasional backward branch, virtually
2319                  * all branches generated by the translators are forward.
2320                  * Which means that generally we will have already removed
2321                  * all references to the label that will be, and there is
2322                  * little to be gained by iterating.
2323                  */
2324                 remove = true;
2325             } else {
2326                 /* Once we see a label, insns become live again.  */
2327                 dead = false;
2328                 remove = false;
2329 
2330                 /*
2331                  * Optimization can fold conditional branches to unconditional.
2332                  * If we find a label with one reference which is preceded by
2333                  * an unconditional branch to it, remove both.  This needed to
2334                  * wait until the dead code in between them was removed.
2335                  */
2336                 if (label->refs == 1) {
2337                     TCGOp *op_prev = QTAILQ_PREV(op, link);
2338                     if (op_prev->opc == INDEX_op_br &&
2339                         label == arg_label(op_prev->args[0])) {
2340                         tcg_op_remove(s, op_prev);
2341                         remove = true;
2342                     }
2343                 }
2344             }
2345             break;
2346 
2347         case INDEX_op_br:
2348         case INDEX_op_exit_tb:
2349         case INDEX_op_goto_ptr:
2350             /* Unconditional branches; everything following is dead.  */
2351             dead = true;
2352             break;
2353 
2354         case INDEX_op_call:
2355             /* Notice noreturn helper calls, raising exceptions.  */
2356             call_flags = op->args[TCGOP_CALLO(op) + TCGOP_CALLI(op) + 1];
2357             if (call_flags & TCG_CALL_NO_RETURN) {
2358                 dead = true;
2359             }
2360             break;
2361 
2362         case INDEX_op_insn_start:
2363             /* Never remove -- we need to keep these for unwind.  */
2364             remove = false;
2365             break;
2366 
2367         default:
2368             break;
2369         }
2370 
2371         if (remove) {
2372             tcg_op_remove(s, op);
2373         }
2374     }
2375 }
2376 
2377 #define TS_DEAD  1
2378 #define TS_MEM   2
2379 
2380 #define IS_DEAD_ARG(n)   (arg_life & (DEAD_ARG << (n)))
2381 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2382 
2383 /* For liveness_pass_1, the register preferences for a given temp.  */
2384 static inline TCGRegSet *la_temp_pref(TCGTemp *ts)
2385 {
2386     return ts->state_ptr;
2387 }
2388 
2389 /* For liveness_pass_1, reset the preferences for a given temp to the
2390  * maximal regset for its type.
2391  */
2392 static inline void la_reset_pref(TCGTemp *ts)
2393 {
2394     *la_temp_pref(ts)
2395         = (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]);
2396 }
2397 
2398 /* liveness analysis: end of function: all temps are dead, and globals
2399    should be in memory. */
2400 static void la_func_end(TCGContext *s, int ng, int nt)
2401 {
2402     int i;
2403 
2404     for (i = 0; i < ng; ++i) {
2405         s->temps[i].state = TS_DEAD | TS_MEM;
2406         la_reset_pref(&s->temps[i]);
2407     }
2408     for (i = ng; i < nt; ++i) {
2409         s->temps[i].state = TS_DEAD;
2410         la_reset_pref(&s->temps[i]);
2411     }
2412 }
2413 
2414 /* liveness analysis: end of basic block: all temps are dead, globals
2415    and local temps should be in memory. */
2416 static void la_bb_end(TCGContext *s, int ng, int nt)
2417 {
2418     int i;
2419 
2420     for (i = 0; i < ng; ++i) {
2421         s->temps[i].state = TS_DEAD | TS_MEM;
2422         la_reset_pref(&s->temps[i]);
2423     }
2424     for (i = ng; i < nt; ++i) {
2425         s->temps[i].state = (s->temps[i].temp_local
2426                              ? TS_DEAD | TS_MEM
2427                              : TS_DEAD);
2428         la_reset_pref(&s->temps[i]);
2429     }
2430 }
2431 
2432 /* liveness analysis: sync globals back to memory.  */
2433 static void la_global_sync(TCGContext *s, int ng)
2434 {
2435     int i;
2436 
2437     for (i = 0; i < ng; ++i) {
2438         int state = s->temps[i].state;
2439         s->temps[i].state = state | TS_MEM;
2440         if (state == TS_DEAD) {
2441             /* If the global was previously dead, reset prefs.  */
2442             la_reset_pref(&s->temps[i]);
2443         }
2444     }
2445 }
2446 
2447 /* liveness analysis: sync globals back to memory and kill.  */
2448 static void la_global_kill(TCGContext *s, int ng)
2449 {
2450     int i;
2451 
2452     for (i = 0; i < ng; i++) {
2453         s->temps[i].state = TS_DEAD | TS_MEM;
2454         la_reset_pref(&s->temps[i]);
2455     }
2456 }
2457 
2458 /* liveness analysis: note live globals crossing calls.  */
2459 static void la_cross_call(TCGContext *s, int nt)
2460 {
2461     TCGRegSet mask = ~tcg_target_call_clobber_regs;
2462     int i;
2463 
2464     for (i = 0; i < nt; i++) {
2465         TCGTemp *ts = &s->temps[i];
2466         if (!(ts->state & TS_DEAD)) {
2467             TCGRegSet *pset = la_temp_pref(ts);
2468             TCGRegSet set = *pset;
2469 
2470             set &= mask;
2471             /* If the combination is not possible, restart.  */
2472             if (set == 0) {
2473                 set = tcg_target_available_regs[ts->type] & mask;
2474             }
2475             *pset = set;
2476         }
2477     }
2478 }
2479 
2480 /* Liveness analysis : update the opc_arg_life array to tell if a
2481    given input arguments is dead. Instructions updating dead
2482    temporaries are removed. */
2483 static void liveness_pass_1(TCGContext *s)
2484 {
2485     int nb_globals = s->nb_globals;
2486     int nb_temps = s->nb_temps;
2487     TCGOp *op, *op_prev;
2488     TCGRegSet *prefs;
2489     int i;
2490 
2491     prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
2492     for (i = 0; i < nb_temps; ++i) {
2493         s->temps[i].state_ptr = prefs + i;
2494     }
2495 
2496     /* ??? Should be redundant with the exit_tb that ends the TB.  */
2497     la_func_end(s, nb_globals, nb_temps);
2498 
2499     QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
2500         int nb_iargs, nb_oargs;
2501         TCGOpcode opc_new, opc_new2;
2502         bool have_opc_new2;
2503         TCGLifeData arg_life = 0;
2504         TCGTemp *ts;
2505         TCGOpcode opc = op->opc;
2506         const TCGOpDef *def = &tcg_op_defs[opc];
2507 
2508         switch (opc) {
2509         case INDEX_op_call:
2510             {
2511                 int call_flags;
2512                 int nb_call_regs;
2513 
2514                 nb_oargs = TCGOP_CALLO(op);
2515                 nb_iargs = TCGOP_CALLI(op);
2516                 call_flags = op->args[nb_oargs + nb_iargs + 1];
2517 
2518                 /* pure functions can be removed if their result is unused */
2519                 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
2520                     for (i = 0; i < nb_oargs; i++) {
2521                         ts = arg_temp(op->args[i]);
2522                         if (ts->state != TS_DEAD) {
2523                             goto do_not_remove_call;
2524                         }
2525                     }
2526                     goto do_remove;
2527                 }
2528             do_not_remove_call:
2529 
2530                 /* Output args are dead.  */
2531                 for (i = 0; i < nb_oargs; i++) {
2532                     ts = arg_temp(op->args[i]);
2533                     if (ts->state & TS_DEAD) {
2534                         arg_life |= DEAD_ARG << i;
2535                     }
2536                     if (ts->state & TS_MEM) {
2537                         arg_life |= SYNC_ARG << i;
2538                     }
2539                     ts->state = TS_DEAD;
2540                     la_reset_pref(ts);
2541 
2542                     /* Not used -- it will be tcg_target_call_oarg_regs[i].  */
2543                     op->output_pref[i] = 0;
2544                 }
2545 
2546                 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
2547                                     TCG_CALL_NO_READ_GLOBALS))) {
2548                     la_global_kill(s, nb_globals);
2549                 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
2550                     la_global_sync(s, nb_globals);
2551                 }
2552 
2553                 /* Record arguments that die in this helper.  */
2554                 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2555                     ts = arg_temp(op->args[i]);
2556                     if (ts && ts->state & TS_DEAD) {
2557                         arg_life |= DEAD_ARG << i;
2558                     }
2559                 }
2560 
2561                 /* For all live registers, remove call-clobbered prefs.  */
2562                 la_cross_call(s, nb_temps);
2563 
2564                 nb_call_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2565 
2566                 /* Input arguments are live for preceding opcodes.  */
2567                 for (i = 0; i < nb_iargs; i++) {
2568                     ts = arg_temp(op->args[i + nb_oargs]);
2569                     if (ts && ts->state & TS_DEAD) {
2570                         /* For those arguments that die, and will be allocated
2571                          * in registers, clear the register set for that arg,
2572                          * to be filled in below.  For args that will be on
2573                          * the stack, reset to any available reg.
2574                          */
2575                         *la_temp_pref(ts)
2576                             = (i < nb_call_regs ? 0 :
2577                                tcg_target_available_regs[ts->type]);
2578                         ts->state &= ~TS_DEAD;
2579                     }
2580                 }
2581 
2582                 /* For each input argument, add its input register to prefs.
2583                    If a temp is used once, this produces a single set bit.  */
2584                 for (i = 0; i < MIN(nb_call_regs, nb_iargs); i++) {
2585                     ts = arg_temp(op->args[i + nb_oargs]);
2586                     if (ts) {
2587                         tcg_regset_set_reg(*la_temp_pref(ts),
2588                                            tcg_target_call_iarg_regs[i]);
2589                     }
2590                 }
2591             }
2592             break;
2593         case INDEX_op_insn_start:
2594             break;
2595         case INDEX_op_discard:
2596             /* mark the temporary as dead */
2597             ts = arg_temp(op->args[0]);
2598             ts->state = TS_DEAD;
2599             la_reset_pref(ts);
2600             break;
2601 
2602         case INDEX_op_add2_i32:
2603             opc_new = INDEX_op_add_i32;
2604             goto do_addsub2;
2605         case INDEX_op_sub2_i32:
2606             opc_new = INDEX_op_sub_i32;
2607             goto do_addsub2;
2608         case INDEX_op_add2_i64:
2609             opc_new = INDEX_op_add_i64;
2610             goto do_addsub2;
2611         case INDEX_op_sub2_i64:
2612             opc_new = INDEX_op_sub_i64;
2613         do_addsub2:
2614             nb_iargs = 4;
2615             nb_oargs = 2;
2616             /* Test if the high part of the operation is dead, but not
2617                the low part.  The result can be optimized to a simple
2618                add or sub.  This happens often for x86_64 guest when the
2619                cpu mode is set to 32 bit.  */
2620             if (arg_temp(op->args[1])->state == TS_DEAD) {
2621                 if (arg_temp(op->args[0])->state == TS_DEAD) {
2622                     goto do_remove;
2623                 }
2624                 /* Replace the opcode and adjust the args in place,
2625                    leaving 3 unused args at the end.  */
2626                 op->opc = opc = opc_new;
2627                 op->args[1] = op->args[2];
2628                 op->args[2] = op->args[4];
2629                 /* Fall through and mark the single-word operation live.  */
2630                 nb_iargs = 2;
2631                 nb_oargs = 1;
2632             }
2633             goto do_not_remove;
2634 
2635         case INDEX_op_mulu2_i32:
2636             opc_new = INDEX_op_mul_i32;
2637             opc_new2 = INDEX_op_muluh_i32;
2638             have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
2639             goto do_mul2;
2640         case INDEX_op_muls2_i32:
2641             opc_new = INDEX_op_mul_i32;
2642             opc_new2 = INDEX_op_mulsh_i32;
2643             have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
2644             goto do_mul2;
2645         case INDEX_op_mulu2_i64:
2646             opc_new = INDEX_op_mul_i64;
2647             opc_new2 = INDEX_op_muluh_i64;
2648             have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
2649             goto do_mul2;
2650         case INDEX_op_muls2_i64:
2651             opc_new = INDEX_op_mul_i64;
2652             opc_new2 = INDEX_op_mulsh_i64;
2653             have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
2654             goto do_mul2;
2655         do_mul2:
2656             nb_iargs = 2;
2657             nb_oargs = 2;
2658             if (arg_temp(op->args[1])->state == TS_DEAD) {
2659                 if (arg_temp(op->args[0])->state == TS_DEAD) {
2660                     /* Both parts of the operation are dead.  */
2661                     goto do_remove;
2662                 }
2663                 /* The high part of the operation is dead; generate the low. */
2664                 op->opc = opc = opc_new;
2665                 op->args[1] = op->args[2];
2666                 op->args[2] = op->args[3];
2667             } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
2668                 /* The low part of the operation is dead; generate the high. */
2669                 op->opc = opc = opc_new2;
2670                 op->args[0] = op->args[1];
2671                 op->args[1] = op->args[2];
2672                 op->args[2] = op->args[3];
2673             } else {
2674                 goto do_not_remove;
2675             }
2676             /* Mark the single-word operation live.  */
2677             nb_oargs = 1;
2678             goto do_not_remove;
2679 
2680         default:
2681             /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
2682             nb_iargs = def->nb_iargs;
2683             nb_oargs = def->nb_oargs;
2684 
2685             /* Test if the operation can be removed because all
2686                its outputs are dead. We assume that nb_oargs == 0
2687                implies side effects */
2688             if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
2689                 for (i = 0; i < nb_oargs; i++) {
2690                     if (arg_temp(op->args[i])->state != TS_DEAD) {
2691                         goto do_not_remove;
2692                     }
2693                 }
2694                 goto do_remove;
2695             }
2696             goto do_not_remove;
2697 
2698         do_remove:
2699             tcg_op_remove(s, op);
2700             break;
2701 
2702         do_not_remove:
2703             for (i = 0; i < nb_oargs; i++) {
2704                 ts = arg_temp(op->args[i]);
2705 
2706                 /* Remember the preference of the uses that followed.  */
2707                 op->output_pref[i] = *la_temp_pref(ts);
2708 
2709                 /* Output args are dead.  */
2710                 if (ts->state & TS_DEAD) {
2711                     arg_life |= DEAD_ARG << i;
2712                 }
2713                 if (ts->state & TS_MEM) {
2714                     arg_life |= SYNC_ARG << i;
2715                 }
2716                 ts->state = TS_DEAD;
2717                 la_reset_pref(ts);
2718             }
2719 
2720             /* If end of basic block, update.  */
2721             if (def->flags & TCG_OPF_BB_EXIT) {
2722                 la_func_end(s, nb_globals, nb_temps);
2723             } else if (def->flags & TCG_OPF_BB_END) {
2724                 la_bb_end(s, nb_globals, nb_temps);
2725             } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2726                 la_global_sync(s, nb_globals);
2727                 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2728                     la_cross_call(s, nb_temps);
2729                 }
2730             }
2731 
2732             /* Record arguments that die in this opcode.  */
2733             for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2734                 ts = arg_temp(op->args[i]);
2735                 if (ts->state & TS_DEAD) {
2736                     arg_life |= DEAD_ARG << i;
2737                 }
2738             }
2739 
2740             /* Input arguments are live for preceding opcodes.  */
2741             for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2742                 ts = arg_temp(op->args[i]);
2743                 if (ts->state & TS_DEAD) {
2744                     /* For operands that were dead, initially allow
2745                        all regs for the type.  */
2746                     *la_temp_pref(ts) = tcg_target_available_regs[ts->type];
2747                     ts->state &= ~TS_DEAD;
2748                 }
2749             }
2750 
2751             /* Incorporate constraints for this operand.  */
2752             switch (opc) {
2753             case INDEX_op_mov_i32:
2754             case INDEX_op_mov_i64:
2755                 /* Note that these are TCG_OPF_NOT_PRESENT and do not
2756                    have proper constraints.  That said, special case
2757                    moves to propagate preferences backward.  */
2758                 if (IS_DEAD_ARG(1)) {
2759                     *la_temp_pref(arg_temp(op->args[0]))
2760                         = *la_temp_pref(arg_temp(op->args[1]));
2761                 }
2762                 break;
2763 
2764             default:
2765                 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2766                     const TCGArgConstraint *ct = &def->args_ct[i];
2767                     TCGRegSet set, *pset;
2768 
2769                     ts = arg_temp(op->args[i]);
2770                     pset = la_temp_pref(ts);
2771                     set = *pset;
2772 
2773                     set &= ct->u.regs;
2774                     if (ct->ct & TCG_CT_IALIAS) {
2775                         set &= op->output_pref[ct->alias_index];
2776                     }
2777                     /* If the combination is not possible, restart.  */
2778                     if (set == 0) {
2779                         set = ct->u.regs;
2780                     }
2781                     *pset = set;
2782                 }
2783                 break;
2784             }
2785             break;
2786         }
2787         op->life = arg_life;
2788     }
2789 }
2790 
2791 /* Liveness analysis: Convert indirect regs to direct temporaries.  */
2792 static bool liveness_pass_2(TCGContext *s)
2793 {
2794     int nb_globals = s->nb_globals;
2795     int nb_temps, i;
2796     bool changes = false;
2797     TCGOp *op, *op_next;
2798 
2799     /* Create a temporary for each indirect global.  */
2800     for (i = 0; i < nb_globals; ++i) {
2801         TCGTemp *its = &s->temps[i];
2802         if (its->indirect_reg) {
2803             TCGTemp *dts = tcg_temp_alloc(s);
2804             dts->type = its->type;
2805             dts->base_type = its->base_type;
2806             its->state_ptr = dts;
2807         } else {
2808             its->state_ptr = NULL;
2809         }
2810         /* All globals begin dead.  */
2811         its->state = TS_DEAD;
2812     }
2813     for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
2814         TCGTemp *its = &s->temps[i];
2815         its->state_ptr = NULL;
2816         its->state = TS_DEAD;
2817     }
2818 
2819     QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2820         TCGOpcode opc = op->opc;
2821         const TCGOpDef *def = &tcg_op_defs[opc];
2822         TCGLifeData arg_life = op->life;
2823         int nb_iargs, nb_oargs, call_flags;
2824         TCGTemp *arg_ts, *dir_ts;
2825 
2826         if (opc == INDEX_op_call) {
2827             nb_oargs = TCGOP_CALLO(op);
2828             nb_iargs = TCGOP_CALLI(op);
2829             call_flags = op->args[nb_oargs + nb_iargs + 1];
2830         } else {
2831             nb_iargs = def->nb_iargs;
2832             nb_oargs = def->nb_oargs;
2833 
2834             /* Set flags similar to how calls require.  */
2835             if (def->flags & TCG_OPF_BB_END) {
2836                 /* Like writing globals: save_globals */
2837                 call_flags = 0;
2838             } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2839                 /* Like reading globals: sync_globals */
2840                 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
2841             } else {
2842                 /* No effect on globals.  */
2843                 call_flags = (TCG_CALL_NO_READ_GLOBALS |
2844                               TCG_CALL_NO_WRITE_GLOBALS);
2845             }
2846         }
2847 
2848         /* Make sure that input arguments are available.  */
2849         for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2850             arg_ts = arg_temp(op->args[i]);
2851             if (arg_ts) {
2852                 dir_ts = arg_ts->state_ptr;
2853                 if (dir_ts && arg_ts->state == TS_DEAD) {
2854                     TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
2855                                       ? INDEX_op_ld_i32
2856                                       : INDEX_op_ld_i64);
2857                     TCGOp *lop = tcg_op_insert_before(s, op, lopc);
2858 
2859                     lop->args[0] = temp_arg(dir_ts);
2860                     lop->args[1] = temp_arg(arg_ts->mem_base);
2861                     lop->args[2] = arg_ts->mem_offset;
2862 
2863                     /* Loaded, but synced with memory.  */
2864                     arg_ts->state = TS_MEM;
2865                 }
2866             }
2867         }
2868 
2869         /* Perform input replacement, and mark inputs that became dead.
2870            No action is required except keeping temp_state up to date
2871            so that we reload when needed.  */
2872         for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2873             arg_ts = arg_temp(op->args[i]);
2874             if (arg_ts) {
2875                 dir_ts = arg_ts->state_ptr;
2876                 if (dir_ts) {
2877                     op->args[i] = temp_arg(dir_ts);
2878                     changes = true;
2879                     if (IS_DEAD_ARG(i)) {
2880                         arg_ts->state = TS_DEAD;
2881                     }
2882                 }
2883             }
2884         }
2885 
2886         /* Liveness analysis should ensure that the following are
2887            all correct, for call sites and basic block end points.  */
2888         if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
2889             /* Nothing to do */
2890         } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
2891             for (i = 0; i < nb_globals; ++i) {
2892                 /* Liveness should see that globals are synced back,
2893                    that is, either TS_DEAD or TS_MEM.  */
2894                 arg_ts = &s->temps[i];
2895                 tcg_debug_assert(arg_ts->state_ptr == 0
2896                                  || arg_ts->state != 0);
2897             }
2898         } else {
2899             for (i = 0; i < nb_globals; ++i) {
2900                 /* Liveness should see that globals are saved back,
2901                    that is, TS_DEAD, waiting to be reloaded.  */
2902                 arg_ts = &s->temps[i];
2903                 tcg_debug_assert(arg_ts->state_ptr == 0
2904                                  || arg_ts->state == TS_DEAD);
2905             }
2906         }
2907 
2908         /* Outputs become available.  */
2909         for (i = 0; i < nb_oargs; i++) {
2910             arg_ts = arg_temp(op->args[i]);
2911             dir_ts = arg_ts->state_ptr;
2912             if (!dir_ts) {
2913                 continue;
2914             }
2915             op->args[i] = temp_arg(dir_ts);
2916             changes = true;
2917 
2918             /* The output is now live and modified.  */
2919             arg_ts->state = 0;
2920 
2921             /* Sync outputs upon their last write.  */
2922             if (NEED_SYNC_ARG(i)) {
2923                 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
2924                                   ? INDEX_op_st_i32
2925                                   : INDEX_op_st_i64);
2926                 TCGOp *sop = tcg_op_insert_after(s, op, sopc);
2927 
2928                 sop->args[0] = temp_arg(dir_ts);
2929                 sop->args[1] = temp_arg(arg_ts->mem_base);
2930                 sop->args[2] = arg_ts->mem_offset;
2931 
2932                 arg_ts->state = TS_MEM;
2933             }
2934             /* Drop outputs that are dead.  */
2935             if (IS_DEAD_ARG(i)) {
2936                 arg_ts->state = TS_DEAD;
2937             }
2938         }
2939     }
2940 
2941     return changes;
2942 }
2943 
2944 #ifdef CONFIG_DEBUG_TCG
2945 static void dump_regs(TCGContext *s)
2946 {
2947     TCGTemp *ts;
2948     int i;
2949     char buf[64];
2950 
2951     for(i = 0; i < s->nb_temps; i++) {
2952         ts = &s->temps[i];
2953         printf("  %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
2954         switch(ts->val_type) {
2955         case TEMP_VAL_REG:
2956             printf("%s", tcg_target_reg_names[ts->reg]);
2957             break;
2958         case TEMP_VAL_MEM:
2959             printf("%d(%s)", (int)ts->mem_offset,
2960                    tcg_target_reg_names[ts->mem_base->reg]);
2961             break;
2962         case TEMP_VAL_CONST:
2963             printf("$0x%" TCG_PRIlx, ts->val);
2964             break;
2965         case TEMP_VAL_DEAD:
2966             printf("D");
2967             break;
2968         default:
2969             printf("???");
2970             break;
2971         }
2972         printf("\n");
2973     }
2974 
2975     for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
2976         if (s->reg_to_temp[i] != NULL) {
2977             printf("%s: %s\n",
2978                    tcg_target_reg_names[i],
2979                    tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
2980         }
2981     }
2982 }
2983 
2984 static void check_regs(TCGContext *s)
2985 {
2986     int reg;
2987     int k;
2988     TCGTemp *ts;
2989     char buf[64];
2990 
2991     for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2992         ts = s->reg_to_temp[reg];
2993         if (ts != NULL) {
2994             if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
2995                 printf("Inconsistency for register %s:\n",
2996                        tcg_target_reg_names[reg]);
2997                 goto fail;
2998             }
2999         }
3000     }
3001     for (k = 0; k < s->nb_temps; k++) {
3002         ts = &s->temps[k];
3003         if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
3004             && s->reg_to_temp[ts->reg] != ts) {
3005             printf("Inconsistency for temp %s:\n",
3006                    tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
3007         fail:
3008             printf("reg state:\n");
3009             dump_regs(s);
3010             tcg_abort();
3011         }
3012     }
3013 }
3014 #endif
3015 
3016 static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
3017 {
3018 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
3019     /* Sparc64 stack is accessed with offset of 2047 */
3020     s->current_frame_offset = (s->current_frame_offset +
3021                                (tcg_target_long)sizeof(tcg_target_long) - 1) &
3022         ~(sizeof(tcg_target_long) - 1);
3023 #endif
3024     if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
3025         s->frame_end) {
3026         tcg_abort();
3027     }
3028     ts->mem_offset = s->current_frame_offset;
3029     ts->mem_base = s->frame_temp;
3030     ts->mem_allocated = 1;
3031     s->current_frame_offset += sizeof(tcg_target_long);
3032 }
3033 
3034 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
3035 
3036 /* Mark a temporary as free or dead.  If 'free_or_dead' is negative,
3037    mark it free; otherwise mark it dead.  */
3038 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
3039 {
3040     if (ts->fixed_reg) {
3041         return;
3042     }
3043     if (ts->val_type == TEMP_VAL_REG) {
3044         s->reg_to_temp[ts->reg] = NULL;
3045     }
3046     ts->val_type = (free_or_dead < 0
3047                     || ts->temp_local
3048                     || ts->temp_global
3049                     ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
3050 }
3051 
3052 /* Mark a temporary as dead.  */
3053 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
3054 {
3055     temp_free_or_dead(s, ts, 1);
3056 }
3057 
3058 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
3059    registers needs to be allocated to store a constant.  If 'free_or_dead'
3060    is non-zero, subsequently release the temporary; if it is positive, the
3061    temp is dead; if it is negative, the temp is free.  */
3062 static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
3063                       TCGRegSet preferred_regs, int free_or_dead)
3064 {
3065     if (ts->fixed_reg) {
3066         return;
3067     }
3068     if (!ts->mem_coherent) {
3069         if (!ts->mem_allocated) {
3070             temp_allocate_frame(s, ts);
3071         }
3072         switch (ts->val_type) {
3073         case TEMP_VAL_CONST:
3074             /* If we're going to free the temp immediately, then we won't
3075                require it later in a register, so attempt to store the
3076                constant to memory directly.  */
3077             if (free_or_dead
3078                 && tcg_out_sti(s, ts->type, ts->val,
3079                                ts->mem_base->reg, ts->mem_offset)) {
3080                 break;
3081             }
3082             temp_load(s, ts, tcg_target_available_regs[ts->type],
3083                       allocated_regs, preferred_regs);
3084             /* fallthrough */
3085 
3086         case TEMP_VAL_REG:
3087             tcg_out_st(s, ts->type, ts->reg,
3088                        ts->mem_base->reg, ts->mem_offset);
3089             break;
3090 
3091         case TEMP_VAL_MEM:
3092             break;
3093 
3094         case TEMP_VAL_DEAD:
3095         default:
3096             tcg_abort();
3097         }
3098         ts->mem_coherent = 1;
3099     }
3100     if (free_or_dead) {
3101         temp_free_or_dead(s, ts, free_or_dead);
3102     }
3103 }
3104 
3105 /* free register 'reg' by spilling the corresponding temporary if necessary */
3106 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
3107 {
3108     TCGTemp *ts = s->reg_to_temp[reg];
3109     if (ts != NULL) {
3110         temp_sync(s, ts, allocated_regs, 0, -1);
3111     }
3112 }
3113 
3114 /**
3115  * tcg_reg_alloc:
3116  * @required_regs: Set of registers in which we must allocate.
3117  * @allocated_regs: Set of registers which must be avoided.
3118  * @preferred_regs: Set of registers we should prefer.
3119  * @rev: True if we search the registers in "indirect" order.
3120  *
3121  * The allocated register must be in @required_regs & ~@allocated_regs,
3122  * but if we can put it in @preferred_regs we may save a move later.
3123  */
3124 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
3125                             TCGRegSet allocated_regs,
3126                             TCGRegSet preferred_regs, bool rev)
3127 {
3128     int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
3129     TCGRegSet reg_ct[2];
3130     const int *order;
3131 
3132     reg_ct[1] = required_regs & ~allocated_regs;
3133     tcg_debug_assert(reg_ct[1] != 0);
3134     reg_ct[0] = reg_ct[1] & preferred_regs;
3135 
3136     /* Skip the preferred_regs option if it cannot be satisfied,
3137        or if the preference made no difference.  */
3138     f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
3139 
3140     order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
3141 
3142     /* Try free registers, preferences first.  */
3143     for (j = f; j < 2; j++) {
3144         TCGRegSet set = reg_ct[j];
3145 
3146         if (tcg_regset_single(set)) {
3147             /* One register in the set.  */
3148             TCGReg reg = tcg_regset_first(set);
3149             if (s->reg_to_temp[reg] == NULL) {
3150                 return reg;
3151             }
3152         } else {
3153             for (i = 0; i < n; i++) {
3154                 TCGReg reg = order[i];
3155                 if (s->reg_to_temp[reg] == NULL &&
3156                     tcg_regset_test_reg(set, reg)) {
3157                     return reg;
3158                 }
3159             }
3160         }
3161     }
3162 
3163     /* We must spill something.  */
3164     for (j = f; j < 2; j++) {
3165         TCGRegSet set = reg_ct[j];
3166 
3167         if (tcg_regset_single(set)) {
3168             /* One register in the set.  */
3169             TCGReg reg = tcg_regset_first(set);
3170             tcg_reg_free(s, reg, allocated_regs);
3171             return reg;
3172         } else {
3173             for (i = 0; i < n; i++) {
3174                 TCGReg reg = order[i];
3175                 if (tcg_regset_test_reg(set, reg)) {
3176                     tcg_reg_free(s, reg, allocated_regs);
3177                     return reg;
3178                 }
3179             }
3180         }
3181     }
3182 
3183     tcg_abort();
3184 }
3185 
3186 /* Make sure the temporary is in a register.  If needed, allocate the register
3187    from DESIRED while avoiding ALLOCATED.  */
3188 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
3189                       TCGRegSet allocated_regs, TCGRegSet preferred_regs)
3190 {
3191     TCGReg reg;
3192 
3193     switch (ts->val_type) {
3194     case TEMP_VAL_REG:
3195         return;
3196     case TEMP_VAL_CONST:
3197         reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
3198                             preferred_regs, ts->indirect_base);
3199         tcg_out_movi(s, ts->type, reg, ts->val);
3200         ts->mem_coherent = 0;
3201         break;
3202     case TEMP_VAL_MEM:
3203         reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
3204                             preferred_regs, ts->indirect_base);
3205         tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
3206         ts->mem_coherent = 1;
3207         break;
3208     case TEMP_VAL_DEAD:
3209     default:
3210         tcg_abort();
3211     }
3212     ts->reg = reg;
3213     ts->val_type = TEMP_VAL_REG;
3214     s->reg_to_temp[reg] = ts;
3215 }
3216 
3217 /* Save a temporary to memory. 'allocated_regs' is used in case a
3218    temporary registers needs to be allocated to store a constant.  */
3219 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
3220 {
3221     /* The liveness analysis already ensures that globals are back
3222        in memory. Keep an tcg_debug_assert for safety. */
3223     tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
3224 }
3225 
3226 /* save globals to their canonical location and assume they can be
3227    modified be the following code. 'allocated_regs' is used in case a
3228    temporary registers needs to be allocated to store a constant. */
3229 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
3230 {
3231     int i, n;
3232 
3233     for (i = 0, n = s->nb_globals; i < n; i++) {
3234         temp_save(s, &s->temps[i], allocated_regs);
3235     }
3236 }
3237 
3238 /* sync globals to their canonical location and assume they can be
3239    read by the following code. 'allocated_regs' is used in case a
3240    temporary registers needs to be allocated to store a constant. */
3241 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
3242 {
3243     int i, n;
3244 
3245     for (i = 0, n = s->nb_globals; i < n; i++) {
3246         TCGTemp *ts = &s->temps[i];
3247         tcg_debug_assert(ts->val_type != TEMP_VAL_REG
3248                          || ts->fixed_reg
3249                          || ts->mem_coherent);
3250     }
3251 }
3252 
3253 /* at the end of a basic block, we assume all temporaries are dead and
3254    all globals are stored at their canonical location. */
3255 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
3256 {
3257     int i;
3258 
3259     for (i = s->nb_globals; i < s->nb_temps; i++) {
3260         TCGTemp *ts = &s->temps[i];
3261         if (ts->temp_local) {
3262             temp_save(s, ts, allocated_regs);
3263         } else {
3264             /* The liveness analysis already ensures that temps are dead.
3265                Keep an tcg_debug_assert for safety. */
3266             tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
3267         }
3268     }
3269 
3270     save_globals(s, allocated_regs);
3271 }
3272 
3273 static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
3274                                   tcg_target_ulong val, TCGLifeData arg_life,
3275                                   TCGRegSet preferred_regs)
3276 {
3277     if (ots->fixed_reg) {
3278         /* For fixed registers, we do not do any constant propagation.  */
3279         tcg_out_movi(s, ots->type, ots->reg, val);
3280         return;
3281     }
3282 
3283     /* The movi is not explicitly generated here.  */
3284     if (ots->val_type == TEMP_VAL_REG) {
3285         s->reg_to_temp[ots->reg] = NULL;
3286     }
3287     ots->val_type = TEMP_VAL_CONST;
3288     ots->val = val;
3289     ots->mem_coherent = 0;
3290     if (NEED_SYNC_ARG(0)) {
3291         temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0));
3292     } else if (IS_DEAD_ARG(0)) {
3293         temp_dead(s, ots);
3294     }
3295 }
3296 
3297 static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op)
3298 {
3299     TCGTemp *ots = arg_temp(op->args[0]);
3300     tcg_target_ulong val = op->args[1];
3301 
3302     tcg_reg_alloc_do_movi(s, ots, val, op->life, op->output_pref[0]);
3303 }
3304 
3305 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
3306 {
3307     const TCGLifeData arg_life = op->life;
3308     TCGRegSet allocated_regs, preferred_regs;
3309     TCGTemp *ts, *ots;
3310     TCGType otype, itype;
3311 
3312     allocated_regs = s->reserved_regs;
3313     preferred_regs = op->output_pref[0];
3314     ots = arg_temp(op->args[0]);
3315     ts = arg_temp(op->args[1]);
3316 
3317     /* Note that otype != itype for no-op truncation.  */
3318     otype = ots->type;
3319     itype = ts->type;
3320 
3321     if (ts->val_type == TEMP_VAL_CONST) {
3322         /* propagate constant or generate sti */
3323         tcg_target_ulong val = ts->val;
3324         if (IS_DEAD_ARG(1)) {
3325             temp_dead(s, ts);
3326         }
3327         tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs);
3328         return;
3329     }
3330 
3331     /* If the source value is in memory we're going to be forced
3332        to have it in a register in order to perform the copy.  Copy
3333        the SOURCE value into its own register first, that way we
3334        don't have to reload SOURCE the next time it is used. */
3335     if (ts->val_type == TEMP_VAL_MEM) {
3336         temp_load(s, ts, tcg_target_available_regs[itype],
3337                   allocated_regs, preferred_regs);
3338     }
3339 
3340     tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
3341     if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
3342         /* mov to a non-saved dead register makes no sense (even with
3343            liveness analysis disabled). */
3344         tcg_debug_assert(NEED_SYNC_ARG(0));
3345         if (!ots->mem_allocated) {
3346             temp_allocate_frame(s, ots);
3347         }
3348         tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
3349         if (IS_DEAD_ARG(1)) {
3350             temp_dead(s, ts);
3351         }
3352         temp_dead(s, ots);
3353     } else {
3354         if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
3355             /* the mov can be suppressed */
3356             if (ots->val_type == TEMP_VAL_REG) {
3357                 s->reg_to_temp[ots->reg] = NULL;
3358             }
3359             ots->reg = ts->reg;
3360             temp_dead(s, ts);
3361         } else {
3362             if (ots->val_type != TEMP_VAL_REG) {
3363                 /* When allocating a new register, make sure to not spill the
3364                    input one. */
3365                 tcg_regset_set_reg(allocated_regs, ts->reg);
3366                 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
3367                                          allocated_regs, preferred_regs,
3368                                          ots->indirect_base);
3369             }
3370             tcg_out_mov(s, otype, ots->reg, ts->reg);
3371         }
3372         ots->val_type = TEMP_VAL_REG;
3373         ots->mem_coherent = 0;
3374         s->reg_to_temp[ots->reg] = ots;
3375         if (NEED_SYNC_ARG(0)) {
3376             temp_sync(s, ots, allocated_regs, 0, 0);
3377         }
3378     }
3379 }
3380 
3381 static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
3382 {
3383     const TCGLifeData arg_life = op->life;
3384     const TCGOpDef * const def = &tcg_op_defs[op->opc];
3385     TCGRegSet i_allocated_regs;
3386     TCGRegSet o_allocated_regs;
3387     int i, k, nb_iargs, nb_oargs;
3388     TCGReg reg;
3389     TCGArg arg;
3390     const TCGArgConstraint *arg_ct;
3391     TCGTemp *ts;
3392     TCGArg new_args[TCG_MAX_OP_ARGS];
3393     int const_args[TCG_MAX_OP_ARGS];
3394 
3395     nb_oargs = def->nb_oargs;
3396     nb_iargs = def->nb_iargs;
3397 
3398     /* copy constants */
3399     memcpy(new_args + nb_oargs + nb_iargs,
3400            op->args + nb_oargs + nb_iargs,
3401            sizeof(TCGArg) * def->nb_cargs);
3402 
3403     i_allocated_regs = s->reserved_regs;
3404     o_allocated_regs = s->reserved_regs;
3405 
3406     /* satisfy input constraints */
3407     for (k = 0; k < nb_iargs; k++) {
3408         TCGRegSet i_preferred_regs, o_preferred_regs;
3409 
3410         i = def->sorted_args[nb_oargs + k];
3411         arg = op->args[i];
3412         arg_ct = &def->args_ct[i];
3413         ts = arg_temp(arg);
3414 
3415         if (ts->val_type == TEMP_VAL_CONST
3416             && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
3417             /* constant is OK for instruction */
3418             const_args[i] = 1;
3419             new_args[i] = ts->val;
3420             continue;
3421         }
3422 
3423         i_preferred_regs = o_preferred_regs = 0;
3424         if (arg_ct->ct & TCG_CT_IALIAS) {
3425             o_preferred_regs = op->output_pref[arg_ct->alias_index];
3426             if (ts->fixed_reg) {
3427                 /* if fixed register, we must allocate a new register
3428                    if the alias is not the same register */
3429                 if (arg != op->args[arg_ct->alias_index]) {
3430                     goto allocate_in_reg;
3431                 }
3432             } else {
3433                 /* if the input is aliased to an output and if it is
3434                    not dead after the instruction, we must allocate
3435                    a new register and move it */
3436                 if (!IS_DEAD_ARG(i)) {
3437                     goto allocate_in_reg;
3438                 }
3439 
3440                 /* check if the current register has already been allocated
3441                    for another input aliased to an output */
3442                 if (ts->val_type == TEMP_VAL_REG) {
3443                     int k2, i2;
3444                     reg = ts->reg;
3445                     for (k2 = 0 ; k2 < k ; k2++) {
3446                         i2 = def->sorted_args[nb_oargs + k2];
3447                         if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
3448                             reg == new_args[i2]) {
3449                             goto allocate_in_reg;
3450                         }
3451                     }
3452                 }
3453                 i_preferred_regs = o_preferred_regs;
3454             }
3455         }
3456 
3457         temp_load(s, ts, arg_ct->u.regs, i_allocated_regs, i_preferred_regs);
3458         reg = ts->reg;
3459 
3460         if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
3461             /* nothing to do : the constraint is satisfied */
3462         } else {
3463         allocate_in_reg:
3464             /* allocate a new register matching the constraint
3465                and move the temporary register into it */
3466             temp_load(s, ts, tcg_target_available_regs[ts->type],
3467                       i_allocated_regs, 0);
3468             reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
3469                                 o_preferred_regs, ts->indirect_base);
3470             tcg_out_mov(s, ts->type, reg, ts->reg);
3471         }
3472         new_args[i] = reg;
3473         const_args[i] = 0;
3474         tcg_regset_set_reg(i_allocated_regs, reg);
3475     }
3476 
3477     /* mark dead temporaries and free the associated registers */
3478     for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3479         if (IS_DEAD_ARG(i)) {
3480             temp_dead(s, arg_temp(op->args[i]));
3481         }
3482     }
3483 
3484     if (def->flags & TCG_OPF_BB_END) {
3485         tcg_reg_alloc_bb_end(s, i_allocated_regs);
3486     } else {
3487         if (def->flags & TCG_OPF_CALL_CLOBBER) {
3488             /* XXX: permit generic clobber register list ? */
3489             for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
3490                 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
3491                     tcg_reg_free(s, i, i_allocated_regs);
3492                 }
3493             }
3494         }
3495         if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3496             /* sync globals if the op has side effects and might trigger
3497                an exception. */
3498             sync_globals(s, i_allocated_regs);
3499         }
3500 
3501         /* satisfy the output constraints */
3502         for(k = 0; k < nb_oargs; k++) {
3503             i = def->sorted_args[k];
3504             arg = op->args[i];
3505             arg_ct = &def->args_ct[i];
3506             ts = arg_temp(arg);
3507             if ((arg_ct->ct & TCG_CT_ALIAS)
3508                 && !const_args[arg_ct->alias_index]) {
3509                 reg = new_args[arg_ct->alias_index];
3510             } else if (arg_ct->ct & TCG_CT_NEWREG) {
3511                 reg = tcg_reg_alloc(s, arg_ct->u.regs,
3512                                     i_allocated_regs | o_allocated_regs,
3513                                     op->output_pref[k], ts->indirect_base);
3514             } else {
3515                 /* if fixed register, we try to use it */
3516                 reg = ts->reg;
3517                 if (ts->fixed_reg &&
3518                     tcg_regset_test_reg(arg_ct->u.regs, reg)) {
3519                     goto oarg_end;
3520                 }
3521                 reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
3522                                     op->output_pref[k], ts->indirect_base);
3523             }
3524             tcg_regset_set_reg(o_allocated_regs, reg);
3525             /* if a fixed register is used, then a move will be done afterwards */
3526             if (!ts->fixed_reg) {
3527                 if (ts->val_type == TEMP_VAL_REG) {
3528                     s->reg_to_temp[ts->reg] = NULL;
3529                 }
3530                 ts->val_type = TEMP_VAL_REG;
3531                 ts->reg = reg;
3532                 /* temp value is modified, so the value kept in memory is
3533                    potentially not the same */
3534                 ts->mem_coherent = 0;
3535                 s->reg_to_temp[reg] = ts;
3536             }
3537         oarg_end:
3538             new_args[i] = reg;
3539         }
3540     }
3541 
3542     /* emit instruction */
3543     if (def->flags & TCG_OPF_VECTOR) {
3544         tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
3545                        new_args, const_args);
3546     } else {
3547         tcg_out_op(s, op->opc, new_args, const_args);
3548     }
3549 
3550     /* move the outputs in the correct register if needed */
3551     for(i = 0; i < nb_oargs; i++) {
3552         ts = arg_temp(op->args[i]);
3553         reg = new_args[i];
3554         if (ts->fixed_reg && ts->reg != reg) {
3555             tcg_out_mov(s, ts->type, ts->reg, reg);
3556         }
3557         if (NEED_SYNC_ARG(i)) {
3558             temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
3559         } else if (IS_DEAD_ARG(i)) {
3560             temp_dead(s, ts);
3561         }
3562     }
3563 }
3564 
3565 #ifdef TCG_TARGET_STACK_GROWSUP
3566 #define STACK_DIR(x) (-(x))
3567 #else
3568 #define STACK_DIR(x) (x)
3569 #endif
3570 
3571 static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
3572 {
3573     const int nb_oargs = TCGOP_CALLO(op);
3574     const int nb_iargs = TCGOP_CALLI(op);
3575     const TCGLifeData arg_life = op->life;
3576     int flags, nb_regs, i;
3577     TCGReg reg;
3578     TCGArg arg;
3579     TCGTemp *ts;
3580     intptr_t stack_offset;
3581     size_t call_stack_size;
3582     tcg_insn_unit *func_addr;
3583     int allocate_args;
3584     TCGRegSet allocated_regs;
3585 
3586     func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs];
3587     flags = op->args[nb_oargs + nb_iargs + 1];
3588 
3589     nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
3590     if (nb_regs > nb_iargs) {
3591         nb_regs = nb_iargs;
3592     }
3593 
3594     /* assign stack slots first */
3595     call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
3596     call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
3597         ~(TCG_TARGET_STACK_ALIGN - 1);
3598     allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
3599     if (allocate_args) {
3600         /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
3601            preallocate call stack */
3602         tcg_abort();
3603     }
3604 
3605     stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
3606     for (i = nb_regs; i < nb_iargs; i++) {
3607         arg = op->args[nb_oargs + i];
3608 #ifdef TCG_TARGET_STACK_GROWSUP
3609         stack_offset -= sizeof(tcg_target_long);
3610 #endif
3611         if (arg != TCG_CALL_DUMMY_ARG) {
3612             ts = arg_temp(arg);
3613             temp_load(s, ts, tcg_target_available_regs[ts->type],
3614                       s->reserved_regs, 0);
3615             tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
3616         }
3617 #ifndef TCG_TARGET_STACK_GROWSUP
3618         stack_offset += sizeof(tcg_target_long);
3619 #endif
3620     }
3621 
3622     /* assign input registers */
3623     allocated_regs = s->reserved_regs;
3624     for (i = 0; i < nb_regs; i++) {
3625         arg = op->args[nb_oargs + i];
3626         if (arg != TCG_CALL_DUMMY_ARG) {
3627             ts = arg_temp(arg);
3628             reg = tcg_target_call_iarg_regs[i];
3629 
3630             if (ts->val_type == TEMP_VAL_REG) {
3631                 if (ts->reg != reg) {
3632                     tcg_reg_free(s, reg, allocated_regs);
3633                     tcg_out_mov(s, ts->type, reg, ts->reg);
3634                 }
3635             } else {
3636                 TCGRegSet arg_set = 0;
3637 
3638                 tcg_reg_free(s, reg, allocated_regs);
3639                 tcg_regset_set_reg(arg_set, reg);
3640                 temp_load(s, ts, arg_set, allocated_regs, 0);
3641             }
3642 
3643             tcg_regset_set_reg(allocated_regs, reg);
3644         }
3645     }
3646 
3647     /* mark dead temporaries and free the associated registers */
3648     for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
3649         if (IS_DEAD_ARG(i)) {
3650             temp_dead(s, arg_temp(op->args[i]));
3651         }
3652     }
3653 
3654     /* clobber call registers */
3655     for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
3656         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
3657             tcg_reg_free(s, i, allocated_regs);
3658         }
3659     }
3660 
3661     /* Save globals if they might be written by the helper, sync them if
3662        they might be read. */
3663     if (flags & TCG_CALL_NO_READ_GLOBALS) {
3664         /* Nothing to do */
3665     } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
3666         sync_globals(s, allocated_regs);
3667     } else {
3668         save_globals(s, allocated_regs);
3669     }
3670 
3671     tcg_out_call(s, func_addr);
3672 
3673     /* assign output registers and emit moves if needed */
3674     for(i = 0; i < nb_oargs; i++) {
3675         arg = op->args[i];
3676         ts = arg_temp(arg);
3677         reg = tcg_target_call_oarg_regs[i];
3678         tcg_debug_assert(s->reg_to_temp[reg] == NULL);
3679 
3680         if (ts->fixed_reg) {
3681             if (ts->reg != reg) {
3682                 tcg_out_mov(s, ts->type, ts->reg, reg);
3683             }
3684         } else {
3685             if (ts->val_type == TEMP_VAL_REG) {
3686                 s->reg_to_temp[ts->reg] = NULL;
3687             }
3688             ts->val_type = TEMP_VAL_REG;
3689             ts->reg = reg;
3690             ts->mem_coherent = 0;
3691             s->reg_to_temp[reg] = ts;
3692             if (NEED_SYNC_ARG(i)) {
3693                 temp_sync(s, ts, allocated_regs, 0, IS_DEAD_ARG(i));
3694             } else if (IS_DEAD_ARG(i)) {
3695                 temp_dead(s, ts);
3696             }
3697         }
3698     }
3699 }
3700 
3701 #ifdef CONFIG_PROFILER
3702 
3703 /* avoid copy/paste errors */
3704 #define PROF_ADD(to, from, field)                       \
3705     do {                                                \
3706         (to)->field += atomic_read(&((from)->field));   \
3707     } while (0)
3708 
3709 #define PROF_MAX(to, from, field)                                       \
3710     do {                                                                \
3711         typeof((from)->field) val__ = atomic_read(&((from)->field));    \
3712         if (val__ > (to)->field) {                                      \
3713             (to)->field = val__;                                        \
3714         }                                                               \
3715     } while (0)
3716 
3717 /* Pass in a zero'ed @prof */
3718 static inline
3719 void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
3720 {
3721     unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
3722     unsigned int i;
3723 
3724     for (i = 0; i < n_ctxs; i++) {
3725         TCGContext *s = atomic_read(&tcg_ctxs[i]);
3726         const TCGProfile *orig = &s->prof;
3727 
3728         if (counters) {
3729             PROF_ADD(prof, orig, cpu_exec_time);
3730             PROF_ADD(prof, orig, tb_count1);
3731             PROF_ADD(prof, orig, tb_count);
3732             PROF_ADD(prof, orig, op_count);
3733             PROF_MAX(prof, orig, op_count_max);
3734             PROF_ADD(prof, orig, temp_count);
3735             PROF_MAX(prof, orig, temp_count_max);
3736             PROF_ADD(prof, orig, del_op_count);
3737             PROF_ADD(prof, orig, code_in_len);
3738             PROF_ADD(prof, orig, code_out_len);
3739             PROF_ADD(prof, orig, search_out_len);
3740             PROF_ADD(prof, orig, interm_time);
3741             PROF_ADD(prof, orig, code_time);
3742             PROF_ADD(prof, orig, la_time);
3743             PROF_ADD(prof, orig, opt_time);
3744             PROF_ADD(prof, orig, restore_count);
3745             PROF_ADD(prof, orig, restore_time);
3746         }
3747         if (table) {
3748             int i;
3749 
3750             for (i = 0; i < NB_OPS; i++) {
3751                 PROF_ADD(prof, orig, table_op_count[i]);
3752             }
3753         }
3754     }
3755 }
3756 
3757 #undef PROF_ADD
3758 #undef PROF_MAX
3759 
3760 static void tcg_profile_snapshot_counters(TCGProfile *prof)
3761 {
3762     tcg_profile_snapshot(prof, true, false);
3763 }
3764 
3765 static void tcg_profile_snapshot_table(TCGProfile *prof)
3766 {
3767     tcg_profile_snapshot(prof, false, true);
3768 }
3769 
3770 void tcg_dump_op_count(void)
3771 {
3772     TCGProfile prof = {};
3773     int i;
3774 
3775     tcg_profile_snapshot_table(&prof);
3776     for (i = 0; i < NB_OPS; i++) {
3777         qemu_printf("%s %" PRId64 "\n", tcg_op_defs[i].name,
3778                     prof.table_op_count[i]);
3779     }
3780 }
3781 
3782 int64_t tcg_cpu_exec_time(void)
3783 {
3784     unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
3785     unsigned int i;
3786     int64_t ret = 0;
3787 
3788     for (i = 0; i < n_ctxs; i++) {
3789         const TCGContext *s = atomic_read(&tcg_ctxs[i]);
3790         const TCGProfile *prof = &s->prof;
3791 
3792         ret += atomic_read(&prof->cpu_exec_time);
3793     }
3794     return ret;
3795 }
3796 #else
3797 void tcg_dump_op_count(void)
3798 {
3799     qemu_printf("[TCG profiler not compiled]\n");
3800 }
3801 
3802 int64_t tcg_cpu_exec_time(void)
3803 {
3804     error_report("%s: TCG profiler not compiled", __func__);
3805     exit(EXIT_FAILURE);
3806 }
3807 #endif
3808 
3809 
3810 int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
3811 {
3812 #ifdef CONFIG_PROFILER
3813     TCGProfile *prof = &s->prof;
3814 #endif
3815     int i, num_insns;
3816     TCGOp *op;
3817 
3818 #ifdef CONFIG_PROFILER
3819     {
3820         int n = 0;
3821 
3822         QTAILQ_FOREACH(op, &s->ops, link) {
3823             n++;
3824         }
3825         atomic_set(&prof->op_count, prof->op_count + n);
3826         if (n > prof->op_count_max) {
3827             atomic_set(&prof->op_count_max, n);
3828         }
3829 
3830         n = s->nb_temps;
3831         atomic_set(&prof->temp_count, prof->temp_count + n);
3832         if (n > prof->temp_count_max) {
3833             atomic_set(&prof->temp_count_max, n);
3834         }
3835     }
3836 #endif
3837 
3838 #ifdef DEBUG_DISAS
3839     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
3840                  && qemu_log_in_addr_range(tb->pc))) {
3841         qemu_log_lock();
3842         qemu_log("OP:\n");
3843         tcg_dump_ops(s, false);
3844         qemu_log("\n");
3845         qemu_log_unlock();
3846     }
3847 #endif
3848 
3849 #ifdef CONFIG_DEBUG_TCG
3850     /* Ensure all labels referenced have been emitted.  */
3851     {
3852         TCGLabel *l;
3853         bool error = false;
3854 
3855         QSIMPLEQ_FOREACH(l, &s->labels, next) {
3856             if (unlikely(!l->present) && l->refs) {
3857                 qemu_log_mask(CPU_LOG_TB_OP,
3858                               "$L%d referenced but not present.\n", l->id);
3859                 error = true;
3860             }
3861         }
3862         assert(!error);
3863     }
3864 #endif
3865 
3866 #ifdef CONFIG_PROFILER
3867     atomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
3868 #endif
3869 
3870 #ifdef USE_TCG_OPTIMIZATIONS
3871     tcg_optimize(s);
3872 #endif
3873 
3874 #ifdef CONFIG_PROFILER
3875     atomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
3876     atomic_set(&prof->la_time, prof->la_time - profile_getclock());
3877 #endif
3878 
3879     reachable_code_pass(s);
3880     liveness_pass_1(s);
3881 
3882     if (s->nb_indirects > 0) {
3883 #ifdef DEBUG_DISAS
3884         if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
3885                      && qemu_log_in_addr_range(tb->pc))) {
3886             qemu_log_lock();
3887             qemu_log("OP before indirect lowering:\n");
3888             tcg_dump_ops(s, false);
3889             qemu_log("\n");
3890             qemu_log_unlock();
3891         }
3892 #endif
3893         /* Replace indirect temps with direct temps.  */
3894         if (liveness_pass_2(s)) {
3895             /* If changes were made, re-run liveness.  */
3896             liveness_pass_1(s);
3897         }
3898     }
3899 
3900 #ifdef CONFIG_PROFILER
3901     atomic_set(&prof->la_time, prof->la_time + profile_getclock());
3902 #endif
3903 
3904 #ifdef DEBUG_DISAS
3905     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
3906                  && qemu_log_in_addr_range(tb->pc))) {
3907         qemu_log_lock();
3908         qemu_log("OP after optimization and liveness analysis:\n");
3909         tcg_dump_ops(s, true);
3910         qemu_log("\n");
3911         qemu_log_unlock();
3912     }
3913 #endif
3914 
3915     tcg_reg_alloc_start(s);
3916 
3917     s->code_buf = tb->tc.ptr;
3918     s->code_ptr = tb->tc.ptr;
3919 
3920 #ifdef TCG_TARGET_NEED_LDST_LABELS
3921     QSIMPLEQ_INIT(&s->ldst_labels);
3922 #endif
3923 #ifdef TCG_TARGET_NEED_POOL_LABELS
3924     s->pool_labels = NULL;
3925 #endif
3926 
3927     num_insns = -1;
3928     QTAILQ_FOREACH(op, &s->ops, link) {
3929         TCGOpcode opc = op->opc;
3930 
3931 #ifdef CONFIG_PROFILER
3932         atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
3933 #endif
3934 
3935         switch (opc) {
3936         case INDEX_op_mov_i32:
3937         case INDEX_op_mov_i64:
3938         case INDEX_op_mov_vec:
3939             tcg_reg_alloc_mov(s, op);
3940             break;
3941         case INDEX_op_movi_i32:
3942         case INDEX_op_movi_i64:
3943         case INDEX_op_dupi_vec:
3944             tcg_reg_alloc_movi(s, op);
3945             break;
3946         case INDEX_op_insn_start:
3947             if (num_insns >= 0) {
3948                 size_t off = tcg_current_code_size(s);
3949                 s->gen_insn_end_off[num_insns] = off;
3950                 /* Assert that we do not overflow our stored offset.  */
3951                 assert(s->gen_insn_end_off[num_insns] == off);
3952             }
3953             num_insns++;
3954             for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
3955                 target_ulong a;
3956 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
3957                 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
3958 #else
3959                 a = op->args[i];
3960 #endif
3961                 s->gen_insn_data[num_insns][i] = a;
3962             }
3963             break;
3964         case INDEX_op_discard:
3965             temp_dead(s, arg_temp(op->args[0]));
3966             break;
3967         case INDEX_op_set_label:
3968             tcg_reg_alloc_bb_end(s, s->reserved_regs);
3969             tcg_out_label(s, arg_label(op->args[0]), s->code_ptr);
3970             break;
3971         case INDEX_op_call:
3972             tcg_reg_alloc_call(s, op);
3973             break;
3974         default:
3975             /* Sanity check that we've not introduced any unhandled opcodes. */
3976             tcg_debug_assert(tcg_op_supported(opc));
3977             /* Note: in order to speed up the code, it would be much
3978                faster to have specialized register allocator functions for
3979                some common argument patterns */
3980             tcg_reg_alloc_op(s, op);
3981             break;
3982         }
3983 #ifdef CONFIG_DEBUG_TCG
3984         check_regs(s);
3985 #endif
3986         /* Test for (pending) buffer overflow.  The assumption is that any
3987            one operation beginning below the high water mark cannot overrun
3988            the buffer completely.  Thus we can test for overflow after
3989            generating code without having to check during generation.  */
3990         if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
3991             return -1;
3992         }
3993         /* Test for TB overflow, as seen by gen_insn_end_off.  */
3994         if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
3995             return -2;
3996         }
3997     }
3998     tcg_debug_assert(num_insns >= 0);
3999     s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
4000 
4001     /* Generate TB finalization at the end of block */
4002 #ifdef TCG_TARGET_NEED_LDST_LABELS
4003     i = tcg_out_ldst_finalize(s);
4004     if (i < 0) {
4005         return i;
4006     }
4007 #endif
4008 #ifdef TCG_TARGET_NEED_POOL_LABELS
4009     i = tcg_out_pool_finalize(s);
4010     if (i < 0) {
4011         return i;
4012     }
4013 #endif
4014     if (!tcg_resolve_relocs(s)) {
4015         return -2;
4016     }
4017 
4018     /* flush instruction cache */
4019     flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
4020 
4021     return tcg_current_code_size(s);
4022 }
4023 
4024 #ifdef CONFIG_PROFILER
4025 void tcg_dump_info(void)
4026 {
4027     TCGProfile prof = {};
4028     const TCGProfile *s;
4029     int64_t tb_count;
4030     int64_t tb_div_count;
4031     int64_t tot;
4032 
4033     tcg_profile_snapshot_counters(&prof);
4034     s = &prof;
4035     tb_count = s->tb_count;
4036     tb_div_count = tb_count ? tb_count : 1;
4037     tot = s->interm_time + s->code_time;
4038 
4039     qemu_printf("JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
4040                 tot, tot / 2.4e9);
4041     qemu_printf("translated TBs      %" PRId64 " (aborted=%" PRId64
4042                 " %0.1f%%)\n",
4043                 tb_count, s->tb_count1 - tb_count,
4044                 (double)(s->tb_count1 - s->tb_count)
4045                 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
4046     qemu_printf("avg ops/TB          %0.1f max=%d\n",
4047                 (double)s->op_count / tb_div_count, s->op_count_max);
4048     qemu_printf("deleted ops/TB      %0.2f\n",
4049                 (double)s->del_op_count / tb_div_count);
4050     qemu_printf("avg temps/TB        %0.2f max=%d\n",
4051                 (double)s->temp_count / tb_div_count, s->temp_count_max);
4052     qemu_printf("avg host code/TB    %0.1f\n",
4053                 (double)s->code_out_len / tb_div_count);
4054     qemu_printf("avg search data/TB  %0.1f\n",
4055                 (double)s->search_out_len / tb_div_count);
4056 
4057     qemu_printf("cycles/op           %0.1f\n",
4058                 s->op_count ? (double)tot / s->op_count : 0);
4059     qemu_printf("cycles/in byte      %0.1f\n",
4060                 s->code_in_len ? (double)tot / s->code_in_len : 0);
4061     qemu_printf("cycles/out byte     %0.1f\n",
4062                 s->code_out_len ? (double)tot / s->code_out_len : 0);
4063     qemu_printf("cycles/search byte     %0.1f\n",
4064                 s->search_out_len ? (double)tot / s->search_out_len : 0);
4065     if (tot == 0) {
4066         tot = 1;
4067     }
4068     qemu_printf("  gen_interm time   %0.1f%%\n",
4069                 (double)s->interm_time / tot * 100.0);
4070     qemu_printf("  gen_code time     %0.1f%%\n",
4071                 (double)s->code_time / tot * 100.0);
4072     qemu_printf("optim./code time    %0.1f%%\n",
4073                 (double)s->opt_time / (s->code_time ? s->code_time : 1)
4074                 * 100.0);
4075     qemu_printf("liveness/code time  %0.1f%%\n",
4076                 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
4077     qemu_printf("cpu_restore count   %" PRId64 "\n",
4078                 s->restore_count);
4079     qemu_printf("  avg cycles        %0.1f\n",
4080                 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
4081 }
4082 #else
4083 void tcg_dump_info(void)
4084 {
4085     qemu_printf("[TCG profiler not compiled]\n");
4086 }
4087 #endif
4088 
4089 #ifdef ELF_HOST_MACHINE
4090 /* In order to use this feature, the backend needs to do three things:
4091 
4092    (1) Define ELF_HOST_MACHINE to indicate both what value to
4093        put into the ELF image and to indicate support for the feature.
4094 
4095    (2) Define tcg_register_jit.  This should create a buffer containing
4096        the contents of a .debug_frame section that describes the post-
4097        prologue unwind info for the tcg machine.
4098 
4099    (3) Call tcg_register_jit_int, with the constructed .debug_frame.
4100 */
4101 
4102 /* Begin GDB interface.  THE FOLLOWING MUST MATCH GDB DOCS.  */
4103 typedef enum {
4104     JIT_NOACTION = 0,
4105     JIT_REGISTER_FN,
4106     JIT_UNREGISTER_FN
4107 } jit_actions_t;
4108 
4109 struct jit_code_entry {
4110     struct jit_code_entry *next_entry;
4111     struct jit_code_entry *prev_entry;
4112     const void *symfile_addr;
4113     uint64_t symfile_size;
4114 };
4115 
4116 struct jit_descriptor {
4117     uint32_t version;
4118     uint32_t action_flag;
4119     struct jit_code_entry *relevant_entry;
4120     struct jit_code_entry *first_entry;
4121 };
4122 
4123 void __jit_debug_register_code(void) __attribute__((noinline));
4124 void __jit_debug_register_code(void)
4125 {
4126     asm("");
4127 }
4128 
4129 /* Must statically initialize the version, because GDB may check
4130    the version before we can set it.  */
4131 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
4132 
4133 /* End GDB interface.  */
4134 
4135 static int find_string(const char *strtab, const char *str)
4136 {
4137     const char *p = strtab + 1;
4138 
4139     while (1) {
4140         if (strcmp(p, str) == 0) {
4141             return p - strtab;
4142         }
4143         p += strlen(p) + 1;
4144     }
4145 }
4146 
4147 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
4148                                  const void *debug_frame,
4149                                  size_t debug_frame_size)
4150 {
4151     struct __attribute__((packed)) DebugInfo {
4152         uint32_t  len;
4153         uint16_t  version;
4154         uint32_t  abbrev;
4155         uint8_t   ptr_size;
4156         uint8_t   cu_die;
4157         uint16_t  cu_lang;
4158         uintptr_t cu_low_pc;
4159         uintptr_t cu_high_pc;
4160         uint8_t   fn_die;
4161         char      fn_name[16];
4162         uintptr_t fn_low_pc;
4163         uintptr_t fn_high_pc;
4164         uint8_t   cu_eoc;
4165     };
4166 
4167     struct ElfImage {
4168         ElfW(Ehdr) ehdr;
4169         ElfW(Phdr) phdr;
4170         ElfW(Shdr) shdr[7];
4171         ElfW(Sym)  sym[2];
4172         struct DebugInfo di;
4173         uint8_t    da[24];
4174         char       str[80];
4175     };
4176 
4177     struct ElfImage *img;
4178 
4179     static const struct ElfImage img_template = {
4180         .ehdr = {
4181             .e_ident[EI_MAG0] = ELFMAG0,
4182             .e_ident[EI_MAG1] = ELFMAG1,
4183             .e_ident[EI_MAG2] = ELFMAG2,
4184             .e_ident[EI_MAG3] = ELFMAG3,
4185             .e_ident[EI_CLASS] = ELF_CLASS,
4186             .e_ident[EI_DATA] = ELF_DATA,
4187             .e_ident[EI_VERSION] = EV_CURRENT,
4188             .e_type = ET_EXEC,
4189             .e_machine = ELF_HOST_MACHINE,
4190             .e_version = EV_CURRENT,
4191             .e_phoff = offsetof(struct ElfImage, phdr),
4192             .e_shoff = offsetof(struct ElfImage, shdr),
4193             .e_ehsize = sizeof(ElfW(Shdr)),
4194             .e_phentsize = sizeof(ElfW(Phdr)),
4195             .e_phnum = 1,
4196             .e_shentsize = sizeof(ElfW(Shdr)),
4197             .e_shnum = ARRAY_SIZE(img->shdr),
4198             .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
4199 #ifdef ELF_HOST_FLAGS
4200             .e_flags = ELF_HOST_FLAGS,
4201 #endif
4202 #ifdef ELF_OSABI
4203             .e_ident[EI_OSABI] = ELF_OSABI,
4204 #endif
4205         },
4206         .phdr = {
4207             .p_type = PT_LOAD,
4208             .p_flags = PF_X,
4209         },
4210         .shdr = {
4211             [0] = { .sh_type = SHT_NULL },
4212             /* Trick: The contents of code_gen_buffer are not present in
4213                this fake ELF file; that got allocated elsewhere.  Therefore
4214                we mark .text as SHT_NOBITS (similar to .bss) so that readers
4215                will not look for contents.  We can record any address.  */
4216             [1] = { /* .text */
4217                 .sh_type = SHT_NOBITS,
4218                 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
4219             },
4220             [2] = { /* .debug_info */
4221                 .sh_type = SHT_PROGBITS,
4222                 .sh_offset = offsetof(struct ElfImage, di),
4223                 .sh_size = sizeof(struct DebugInfo),
4224             },
4225             [3] = { /* .debug_abbrev */
4226                 .sh_type = SHT_PROGBITS,
4227                 .sh_offset = offsetof(struct ElfImage, da),
4228                 .sh_size = sizeof(img->da),
4229             },
4230             [4] = { /* .debug_frame */
4231                 .sh_type = SHT_PROGBITS,
4232                 .sh_offset = sizeof(struct ElfImage),
4233             },
4234             [5] = { /* .symtab */
4235                 .sh_type = SHT_SYMTAB,
4236                 .sh_offset = offsetof(struct ElfImage, sym),
4237                 .sh_size = sizeof(img->sym),
4238                 .sh_info = 1,
4239                 .sh_link = ARRAY_SIZE(img->shdr) - 1,
4240                 .sh_entsize = sizeof(ElfW(Sym)),
4241             },
4242             [6] = { /* .strtab */
4243                 .sh_type = SHT_STRTAB,
4244                 .sh_offset = offsetof(struct ElfImage, str),
4245                 .sh_size = sizeof(img->str),
4246             }
4247         },
4248         .sym = {
4249             [1] = { /* code_gen_buffer */
4250                 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
4251                 .st_shndx = 1,
4252             }
4253         },
4254         .di = {
4255             .len = sizeof(struct DebugInfo) - 4,
4256             .version = 2,
4257             .ptr_size = sizeof(void *),
4258             .cu_die = 1,
4259             .cu_lang = 0x8001,  /* DW_LANG_Mips_Assembler */
4260             .fn_die = 2,
4261             .fn_name = "code_gen_buffer"
4262         },
4263         .da = {
4264             1,          /* abbrev number (the cu) */
4265             0x11, 1,    /* DW_TAG_compile_unit, has children */
4266             0x13, 0x5,  /* DW_AT_language, DW_FORM_data2 */
4267             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
4268             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
4269             0, 0,       /* end of abbrev */
4270             2,          /* abbrev number (the fn) */
4271             0x2e, 0,    /* DW_TAG_subprogram, no children */
4272             0x3, 0x8,   /* DW_AT_name, DW_FORM_string */
4273             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
4274             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
4275             0, 0,       /* end of abbrev */
4276             0           /* no more abbrev */
4277         },
4278         .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
4279                ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
4280     };
4281 
4282     /* We only need a single jit entry; statically allocate it.  */
4283     static struct jit_code_entry one_entry;
4284 
4285     uintptr_t buf = (uintptr_t)buf_ptr;
4286     size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
4287     DebugFrameHeader *dfh;
4288 
4289     img = g_malloc(img_size);
4290     *img = img_template;
4291 
4292     img->phdr.p_vaddr = buf;
4293     img->phdr.p_paddr = buf;
4294     img->phdr.p_memsz = buf_size;
4295 
4296     img->shdr[1].sh_name = find_string(img->str, ".text");
4297     img->shdr[1].sh_addr = buf;
4298     img->shdr[1].sh_size = buf_size;
4299 
4300     img->shdr[2].sh_name = find_string(img->str, ".debug_info");
4301     img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
4302 
4303     img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
4304     img->shdr[4].sh_size = debug_frame_size;
4305 
4306     img->shdr[5].sh_name = find_string(img->str, ".symtab");
4307     img->shdr[6].sh_name = find_string(img->str, ".strtab");
4308 
4309     img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
4310     img->sym[1].st_value = buf;
4311     img->sym[1].st_size = buf_size;
4312 
4313     img->di.cu_low_pc = buf;
4314     img->di.cu_high_pc = buf + buf_size;
4315     img->di.fn_low_pc = buf;
4316     img->di.fn_high_pc = buf + buf_size;
4317 
4318     dfh = (DebugFrameHeader *)(img + 1);
4319     memcpy(dfh, debug_frame, debug_frame_size);
4320     dfh->fde.func_start = buf;
4321     dfh->fde.func_len = buf_size;
4322 
4323 #ifdef DEBUG_JIT
4324     /* Enable this block to be able to debug the ELF image file creation.
4325        One can use readelf, objdump, or other inspection utilities.  */
4326     {
4327         FILE *f = fopen("/tmp/qemu.jit", "w+b");
4328         if (f) {
4329             if (fwrite(img, img_size, 1, f) != img_size) {
4330                 /* Avoid stupid unused return value warning for fwrite.  */
4331             }
4332             fclose(f);
4333         }
4334     }
4335 #endif
4336 
4337     one_entry.symfile_addr = img;
4338     one_entry.symfile_size = img_size;
4339 
4340     __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
4341     __jit_debug_descriptor.relevant_entry = &one_entry;
4342     __jit_debug_descriptor.first_entry = &one_entry;
4343     __jit_debug_register_code();
4344 }
4345 #else
4346 /* No support for the feature.  Provide the entry point expected by exec.c,
4347    and implement the internal function we declared earlier.  */
4348 
4349 static void tcg_register_jit_int(void *buf, size_t size,
4350                                  const void *debug_frame,
4351                                  size_t debug_frame_size)
4352 {
4353 }
4354 
4355 void tcg_register_jit(void *buf, size_t buf_size)
4356 {
4357 }
4358 #endif /* ELF_HOST_MACHINE */
4359 
4360 #if !TCG_TARGET_MAYBE_vec
4361 void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
4362 {
4363     g_assert_not_reached();
4364 }
4365 #endif
4366