xref: /qemu/tcg/tcg.c (revision 4f2d31fb)
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
28 
29 #include "config.h"
30 
31 /* Define to jump the ELF file used to communicate with GDB.  */
32 #undef DEBUG_JIT
33 
34 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
35 /* define it to suppress various consistency checks (faster) */
36 #define NDEBUG
37 #endif
38 
39 #include "qemu-common.h"
40 #include "qemu/host-utils.h"
41 #include "qemu/timer.h"
42 
43 /* Note: the long term plan is to reduce the dependencies on the QEMU
44    CPU definitions. Currently they are used for qemu_ld/st
45    instructions */
46 #define NO_CPU_IO_DEFS
47 #include "cpu.h"
48 
49 #include "tcg-op.h"
50 
51 #if UINTPTR_MAX == UINT32_MAX
52 # define ELF_CLASS  ELFCLASS32
53 #else
54 # define ELF_CLASS  ELFCLASS64
55 #endif
56 #ifdef HOST_WORDS_BIGENDIAN
57 # define ELF_DATA   ELFDATA2MSB
58 #else
59 # define ELF_DATA   ELFDATA2LSB
60 #endif
61 
62 #include "elf.h"
63 
64 /* Forward declarations for functions declared in tcg-target.c and used here. */
65 static void tcg_target_init(TCGContext *s);
66 static void tcg_target_qemu_prologue(TCGContext *s);
67 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
68                         intptr_t value, intptr_t addend);
69 
70 /* The CIE and FDE header definitions will be common to all hosts.  */
71 typedef struct {
72     uint32_t len __attribute__((aligned((sizeof(void *)))));
73     uint32_t id;
74     uint8_t version;
75     char augmentation[1];
76     uint8_t code_align;
77     uint8_t data_align;
78     uint8_t return_column;
79 } DebugFrameCIE;
80 
81 typedef struct QEMU_PACKED {
82     uint32_t len __attribute__((aligned((sizeof(void *)))));
83     uint32_t cie_offset;
84     uintptr_t func_start;
85     uintptr_t func_len;
86 } DebugFrameFDEHeader;
87 
88 typedef struct QEMU_PACKED {
89     DebugFrameCIE cie;
90     DebugFrameFDEHeader fde;
91 } DebugFrameHeader;
92 
93 static void tcg_register_jit_int(void *buf, size_t size,
94                                  const void *debug_frame,
95                                  size_t debug_frame_size)
96     __attribute__((unused));
97 
98 /* Forward declarations for functions declared and used in tcg-target.c. */
99 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
100 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
101                        intptr_t arg2);
102 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
103 static void tcg_out_movi(TCGContext *s, TCGType type,
104                          TCGReg ret, tcg_target_long arg);
105 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
106                        const int *const_args);
107 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
108                        intptr_t arg2);
109 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
110 static int tcg_target_const_match(tcg_target_long val, TCGType type,
111                                   const TCGArgConstraint *arg_ct);
112 static void tcg_out_tb_init(TCGContext *s);
113 static void tcg_out_tb_finalize(TCGContext *s);
114 
115 
116 
117 static TCGRegSet tcg_target_available_regs[2];
118 static TCGRegSet tcg_target_call_clobber_regs;
119 
120 #if TCG_TARGET_INSN_UNIT_SIZE == 1
121 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
122 {
123     *s->code_ptr++ = v;
124 }
125 
126 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
127                                                       uint8_t v)
128 {
129     *p = v;
130 }
131 #endif
132 
133 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
134 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
135 {
136     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
137         *s->code_ptr++ = v;
138     } else {
139         tcg_insn_unit *p = s->code_ptr;
140         memcpy(p, &v, sizeof(v));
141         s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
142     }
143 }
144 
145 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
146                                                        uint16_t v)
147 {
148     if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
149         *p = v;
150     } else {
151         memcpy(p, &v, sizeof(v));
152     }
153 }
154 #endif
155 
156 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
157 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
158 {
159     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
160         *s->code_ptr++ = v;
161     } else {
162         tcg_insn_unit *p = s->code_ptr;
163         memcpy(p, &v, sizeof(v));
164         s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
165     }
166 }
167 
168 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
169                                                        uint32_t v)
170 {
171     if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
172         *p = v;
173     } else {
174         memcpy(p, &v, sizeof(v));
175     }
176 }
177 #endif
178 
179 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
180 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
181 {
182     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
183         *s->code_ptr++ = v;
184     } else {
185         tcg_insn_unit *p = s->code_ptr;
186         memcpy(p, &v, sizeof(v));
187         s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
188     }
189 }
190 
191 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
192                                                        uint64_t v)
193 {
194     if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
195         *p = v;
196     } else {
197         memcpy(p, &v, sizeof(v));
198     }
199 }
200 #endif
201 
202 /* label relocation processing */
203 
204 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
205                           TCGLabel *l, intptr_t addend)
206 {
207     TCGRelocation *r;
208 
209     if (l->has_value) {
210         /* FIXME: This may break relocations on RISC targets that
211            modify instruction fields in place.  The caller may not have
212            written the initial value.  */
213         patch_reloc(code_ptr, type, l->u.value, addend);
214     } else {
215         /* add a new relocation entry */
216         r = tcg_malloc(sizeof(TCGRelocation));
217         r->type = type;
218         r->ptr = code_ptr;
219         r->addend = addend;
220         r->next = l->u.first_reloc;
221         l->u.first_reloc = r;
222     }
223 }
224 
225 static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
226 {
227     intptr_t value = (intptr_t)ptr;
228     TCGRelocation *r;
229 
230     assert(!l->has_value);
231 
232     for (r = l->u.first_reloc; r != NULL; r = r->next) {
233         patch_reloc(r->ptr, r->type, value, r->addend);
234     }
235 
236     l->has_value = 1;
237     l->u.value_ptr = ptr;
238 }
239 
240 TCGLabel *gen_new_label(void)
241 {
242     TCGContext *s = &tcg_ctx;
243     TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
244 
245     *l = (TCGLabel){
246         .id = s->nb_labels++
247     };
248 
249     return l;
250 }
251 
252 #include "tcg-target.c"
253 
254 /* pool based memory allocation */
255 void *tcg_malloc_internal(TCGContext *s, int size)
256 {
257     TCGPool *p;
258     int pool_size;
259 
260     if (size > TCG_POOL_CHUNK_SIZE) {
261         /* big malloc: insert a new pool (XXX: could optimize) */
262         p = g_malloc(sizeof(TCGPool) + size);
263         p->size = size;
264         p->next = s->pool_first_large;
265         s->pool_first_large = p;
266         return p->data;
267     } else {
268         p = s->pool_current;
269         if (!p) {
270             p = s->pool_first;
271             if (!p)
272                 goto new_pool;
273         } else {
274             if (!p->next) {
275             new_pool:
276                 pool_size = TCG_POOL_CHUNK_SIZE;
277                 p = g_malloc(sizeof(TCGPool) + pool_size);
278                 p->size = pool_size;
279                 p->next = NULL;
280                 if (s->pool_current)
281                     s->pool_current->next = p;
282                 else
283                     s->pool_first = p;
284             } else {
285                 p = p->next;
286             }
287         }
288     }
289     s->pool_current = p;
290     s->pool_cur = p->data + size;
291     s->pool_end = p->data + p->size;
292     return p->data;
293 }
294 
295 void tcg_pool_reset(TCGContext *s)
296 {
297     TCGPool *p, *t;
298     for (p = s->pool_first_large; p; p = t) {
299         t = p->next;
300         g_free(p);
301     }
302     s->pool_first_large = NULL;
303     s->pool_cur = s->pool_end = NULL;
304     s->pool_current = NULL;
305 }
306 
307 typedef struct TCGHelperInfo {
308     void *func;
309     const char *name;
310     unsigned flags;
311     unsigned sizemask;
312 } TCGHelperInfo;
313 
314 #include "exec/helper-proto.h"
315 
316 static const TCGHelperInfo all_helpers[] = {
317 #include "exec/helper-tcg.h"
318 };
319 
320 void tcg_context_init(TCGContext *s)
321 {
322     int op, total_args, n, i;
323     TCGOpDef *def;
324     TCGArgConstraint *args_ct;
325     int *sorted_args;
326     GHashTable *helper_table;
327 
328     memset(s, 0, sizeof(*s));
329     s->nb_globals = 0;
330 
331     /* Count total number of arguments and allocate the corresponding
332        space */
333     total_args = 0;
334     for(op = 0; op < NB_OPS; op++) {
335         def = &tcg_op_defs[op];
336         n = def->nb_iargs + def->nb_oargs;
337         total_args += n;
338     }
339 
340     args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
341     sorted_args = g_malloc(sizeof(int) * total_args);
342 
343     for(op = 0; op < NB_OPS; op++) {
344         def = &tcg_op_defs[op];
345         def->args_ct = args_ct;
346         def->sorted_args = sorted_args;
347         n = def->nb_iargs + def->nb_oargs;
348         sorted_args += n;
349         args_ct += n;
350     }
351 
352     /* Register helpers.  */
353     /* Use g_direct_hash/equal for direct pointer comparisons on func.  */
354     s->helpers = helper_table = g_hash_table_new(NULL, NULL);
355 
356     for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
357         g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
358                             (gpointer)&all_helpers[i]);
359     }
360 
361     tcg_target_init(s);
362 }
363 
364 void tcg_prologue_init(TCGContext *s)
365 {
366     size_t prologue_size, total_size;
367     void *buf0, *buf1;
368 
369     /* Put the prologue at the beginning of code_gen_buffer.  */
370     buf0 = s->code_gen_buffer;
371     s->code_ptr = buf0;
372     s->code_buf = buf0;
373     s->code_gen_prologue = buf0;
374 
375     /* Generate the prologue.  */
376     tcg_target_qemu_prologue(s);
377     buf1 = s->code_ptr;
378     flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
379 
380     /* Deduct the prologue from the buffer.  */
381     prologue_size = tcg_current_code_size(s);
382     s->code_gen_ptr = buf1;
383     s->code_gen_buffer = buf1;
384     s->code_buf = buf1;
385     total_size = s->code_gen_buffer_size - prologue_size;
386     s->code_gen_buffer_size = total_size;
387 
388     /* Compute a high-water mark, at which we voluntarily flush the buffer
389        and start over.  The size here is arbitrary, significantly larger
390        than we expect the code generation for any one opcode to require.  */
391     s->code_gen_highwater = s->code_gen_buffer + (total_size - 1024);
392 
393     tcg_register_jit(s->code_gen_buffer, total_size);
394 
395 #ifdef DEBUG_DISAS
396     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
397         qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
398         log_disas(buf0, prologue_size);
399         qemu_log("\n");
400         qemu_log_flush();
401     }
402 #endif
403 }
404 
405 void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size)
406 {
407     s->frame_start = start;
408     s->frame_end = start + size;
409     s->frame_reg = reg;
410 }
411 
412 void tcg_func_start(TCGContext *s)
413 {
414     tcg_pool_reset(s);
415     s->nb_temps = s->nb_globals;
416 
417     /* No temps have been previously allocated for size or locality.  */
418     memset(s->free_temps, 0, sizeof(s->free_temps));
419 
420     s->nb_labels = 0;
421     s->current_frame_offset = s->frame_start;
422 
423 #ifdef CONFIG_DEBUG_TCG
424     s->goto_tb_issue_mask = 0;
425 #endif
426 
427     s->gen_first_op_idx = 0;
428     s->gen_last_op_idx = -1;
429     s->gen_next_op_idx = 0;
430     s->gen_next_parm_idx = 0;
431 
432     s->be = tcg_malloc(sizeof(TCGBackendData));
433 }
434 
435 static inline void tcg_temp_alloc(TCGContext *s, int n)
436 {
437     if (n > TCG_MAX_TEMPS)
438         tcg_abort();
439 }
440 
441 static inline int tcg_global_reg_new_internal(TCGType type, int reg,
442                                               const char *name)
443 {
444     TCGContext *s = &tcg_ctx;
445     TCGTemp *ts;
446     int idx;
447 
448 #if TCG_TARGET_REG_BITS == 32
449     if (type != TCG_TYPE_I32)
450         tcg_abort();
451 #endif
452     if (tcg_regset_test_reg(s->reserved_regs, reg))
453         tcg_abort();
454     idx = s->nb_globals;
455     tcg_temp_alloc(s, s->nb_globals + 1);
456     ts = &s->temps[s->nb_globals];
457     ts->base_type = type;
458     ts->type = type;
459     ts->fixed_reg = 1;
460     ts->reg = reg;
461     ts->name = name;
462     s->nb_globals++;
463     tcg_regset_set_reg(s->reserved_regs, reg);
464     return idx;
465 }
466 
467 TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name)
468 {
469     int idx;
470 
471     idx = tcg_global_reg_new_internal(TCG_TYPE_I32, reg, name);
472     return MAKE_TCGV_I32(idx);
473 }
474 
475 TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name)
476 {
477     int idx;
478 
479     idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name);
480     return MAKE_TCGV_I64(idx);
481 }
482 
483 static inline int tcg_global_mem_new_internal(TCGType type, int reg,
484                                               intptr_t offset,
485                                               const char *name)
486 {
487     TCGContext *s = &tcg_ctx;
488     TCGTemp *ts;
489     int idx;
490 
491     idx = s->nb_globals;
492 #if TCG_TARGET_REG_BITS == 32
493     if (type == TCG_TYPE_I64) {
494         char buf[64];
495         tcg_temp_alloc(s, s->nb_globals + 2);
496         ts = &s->temps[s->nb_globals];
497         ts->base_type = type;
498         ts->type = TCG_TYPE_I32;
499         ts->fixed_reg = 0;
500         ts->mem_allocated = 1;
501         ts->mem_reg = reg;
502 #ifdef HOST_WORDS_BIGENDIAN
503         ts->mem_offset = offset + 4;
504 #else
505         ts->mem_offset = offset;
506 #endif
507         pstrcpy(buf, sizeof(buf), name);
508         pstrcat(buf, sizeof(buf), "_0");
509         ts->name = strdup(buf);
510         ts++;
511 
512         ts->base_type = type;
513         ts->type = TCG_TYPE_I32;
514         ts->fixed_reg = 0;
515         ts->mem_allocated = 1;
516         ts->mem_reg = reg;
517 #ifdef HOST_WORDS_BIGENDIAN
518         ts->mem_offset = offset;
519 #else
520         ts->mem_offset = offset + 4;
521 #endif
522         pstrcpy(buf, sizeof(buf), name);
523         pstrcat(buf, sizeof(buf), "_1");
524         ts->name = strdup(buf);
525 
526         s->nb_globals += 2;
527     } else
528 #endif
529     {
530         tcg_temp_alloc(s, s->nb_globals + 1);
531         ts = &s->temps[s->nb_globals];
532         ts->base_type = type;
533         ts->type = type;
534         ts->fixed_reg = 0;
535         ts->mem_allocated = 1;
536         ts->mem_reg = reg;
537         ts->mem_offset = offset;
538         ts->name = name;
539         s->nb_globals++;
540     }
541     return idx;
542 }
543 
544 TCGv_i32 tcg_global_mem_new_i32(int reg, intptr_t offset, const char *name)
545 {
546     int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
547     return MAKE_TCGV_I32(idx);
548 }
549 
550 TCGv_i64 tcg_global_mem_new_i64(int reg, intptr_t offset, const char *name)
551 {
552     int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
553     return MAKE_TCGV_I64(idx);
554 }
555 
556 static inline int tcg_temp_new_internal(TCGType type, int temp_local)
557 {
558     TCGContext *s = &tcg_ctx;
559     TCGTemp *ts;
560     int idx, k;
561 
562     k = type + (temp_local ? TCG_TYPE_COUNT : 0);
563     idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
564     if (idx < TCG_MAX_TEMPS) {
565         /* There is already an available temp with the right type.  */
566         clear_bit(idx, s->free_temps[k].l);
567 
568         ts = &s->temps[idx];
569         ts->temp_allocated = 1;
570         assert(ts->base_type == type);
571         assert(ts->temp_local == temp_local);
572     } else {
573         idx = s->nb_temps;
574 #if TCG_TARGET_REG_BITS == 32
575         if (type == TCG_TYPE_I64) {
576             tcg_temp_alloc(s, s->nb_temps + 2);
577             ts = &s->temps[s->nb_temps];
578             ts->base_type = type;
579             ts->type = TCG_TYPE_I32;
580             ts->temp_allocated = 1;
581             ts->temp_local = temp_local;
582             ts->name = NULL;
583             ts++;
584             ts->base_type = type;
585             ts->type = TCG_TYPE_I32;
586             ts->temp_allocated = 1;
587             ts->temp_local = temp_local;
588             ts->name = NULL;
589             s->nb_temps += 2;
590         } else
591 #endif
592         {
593             tcg_temp_alloc(s, s->nb_temps + 1);
594             ts = &s->temps[s->nb_temps];
595             ts->base_type = type;
596             ts->type = type;
597             ts->temp_allocated = 1;
598             ts->temp_local = temp_local;
599             ts->name = NULL;
600             s->nb_temps++;
601         }
602     }
603 
604 #if defined(CONFIG_DEBUG_TCG)
605     s->temps_in_use++;
606 #endif
607     return idx;
608 }
609 
610 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
611 {
612     int idx;
613 
614     idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
615     return MAKE_TCGV_I32(idx);
616 }
617 
618 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
619 {
620     int idx;
621 
622     idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
623     return MAKE_TCGV_I64(idx);
624 }
625 
626 static void tcg_temp_free_internal(int idx)
627 {
628     TCGContext *s = &tcg_ctx;
629     TCGTemp *ts;
630     int k;
631 
632 #if defined(CONFIG_DEBUG_TCG)
633     s->temps_in_use--;
634     if (s->temps_in_use < 0) {
635         fprintf(stderr, "More temporaries freed than allocated!\n");
636     }
637 #endif
638 
639     assert(idx >= s->nb_globals && idx < s->nb_temps);
640     ts = &s->temps[idx];
641     assert(ts->temp_allocated != 0);
642     ts->temp_allocated = 0;
643 
644     k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
645     set_bit(idx, s->free_temps[k].l);
646 }
647 
648 void tcg_temp_free_i32(TCGv_i32 arg)
649 {
650     tcg_temp_free_internal(GET_TCGV_I32(arg));
651 }
652 
653 void tcg_temp_free_i64(TCGv_i64 arg)
654 {
655     tcg_temp_free_internal(GET_TCGV_I64(arg));
656 }
657 
658 TCGv_i32 tcg_const_i32(int32_t val)
659 {
660     TCGv_i32 t0;
661     t0 = tcg_temp_new_i32();
662     tcg_gen_movi_i32(t0, val);
663     return t0;
664 }
665 
666 TCGv_i64 tcg_const_i64(int64_t val)
667 {
668     TCGv_i64 t0;
669     t0 = tcg_temp_new_i64();
670     tcg_gen_movi_i64(t0, val);
671     return t0;
672 }
673 
674 TCGv_i32 tcg_const_local_i32(int32_t val)
675 {
676     TCGv_i32 t0;
677     t0 = tcg_temp_local_new_i32();
678     tcg_gen_movi_i32(t0, val);
679     return t0;
680 }
681 
682 TCGv_i64 tcg_const_local_i64(int64_t val)
683 {
684     TCGv_i64 t0;
685     t0 = tcg_temp_local_new_i64();
686     tcg_gen_movi_i64(t0, val);
687     return t0;
688 }
689 
690 #if defined(CONFIG_DEBUG_TCG)
691 void tcg_clear_temp_count(void)
692 {
693     TCGContext *s = &tcg_ctx;
694     s->temps_in_use = 0;
695 }
696 
697 int tcg_check_temp_count(void)
698 {
699     TCGContext *s = &tcg_ctx;
700     if (s->temps_in_use) {
701         /* Clear the count so that we don't give another
702          * warning immediately next time around.
703          */
704         s->temps_in_use = 0;
705         return 1;
706     }
707     return 0;
708 }
709 #endif
710 
711 /* Note: we convert the 64 bit args to 32 bit and do some alignment
712    and endian swap. Maybe it would be better to do the alignment
713    and endian swap in tcg_reg_alloc_call(). */
714 void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
715                    int nargs, TCGArg *args)
716 {
717     int i, real_args, nb_rets, pi, pi_first;
718     unsigned sizemask, flags;
719     TCGHelperInfo *info;
720 
721     info = g_hash_table_lookup(s->helpers, (gpointer)func);
722     flags = info->flags;
723     sizemask = info->sizemask;
724 
725 #if defined(__sparc__) && !defined(__arch64__) \
726     && !defined(CONFIG_TCG_INTERPRETER)
727     /* We have 64-bit values in one register, but need to pass as two
728        separate parameters.  Split them.  */
729     int orig_sizemask = sizemask;
730     int orig_nargs = nargs;
731     TCGv_i64 retl, reth;
732 
733     TCGV_UNUSED_I64(retl);
734     TCGV_UNUSED_I64(reth);
735     if (sizemask != 0) {
736         TCGArg *split_args = __builtin_alloca(sizeof(TCGArg) * nargs * 2);
737         for (i = real_args = 0; i < nargs; ++i) {
738             int is_64bit = sizemask & (1 << (i+1)*2);
739             if (is_64bit) {
740                 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
741                 TCGv_i32 h = tcg_temp_new_i32();
742                 TCGv_i32 l = tcg_temp_new_i32();
743                 tcg_gen_extr_i64_i32(l, h, orig);
744                 split_args[real_args++] = GET_TCGV_I32(h);
745                 split_args[real_args++] = GET_TCGV_I32(l);
746             } else {
747                 split_args[real_args++] = args[i];
748             }
749         }
750         nargs = real_args;
751         args = split_args;
752         sizemask = 0;
753     }
754 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
755     for (i = 0; i < nargs; ++i) {
756         int is_64bit = sizemask & (1 << (i+1)*2);
757         int is_signed = sizemask & (2 << (i+1)*2);
758         if (!is_64bit) {
759             TCGv_i64 temp = tcg_temp_new_i64();
760             TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
761             if (is_signed) {
762                 tcg_gen_ext32s_i64(temp, orig);
763             } else {
764                 tcg_gen_ext32u_i64(temp, orig);
765             }
766             args[i] = GET_TCGV_I64(temp);
767         }
768     }
769 #endif /* TCG_TARGET_EXTEND_ARGS */
770 
771     pi_first = pi = s->gen_next_parm_idx;
772     if (ret != TCG_CALL_DUMMY_ARG) {
773 #if defined(__sparc__) && !defined(__arch64__) \
774     && !defined(CONFIG_TCG_INTERPRETER)
775         if (orig_sizemask & 1) {
776             /* The 32-bit ABI is going to return the 64-bit value in
777                the %o0/%o1 register pair.  Prepare for this by using
778                two return temporaries, and reassemble below.  */
779             retl = tcg_temp_new_i64();
780             reth = tcg_temp_new_i64();
781             s->gen_opparam_buf[pi++] = GET_TCGV_I64(reth);
782             s->gen_opparam_buf[pi++] = GET_TCGV_I64(retl);
783             nb_rets = 2;
784         } else {
785             s->gen_opparam_buf[pi++] = ret;
786             nb_rets = 1;
787         }
788 #else
789         if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
790 #ifdef HOST_WORDS_BIGENDIAN
791             s->gen_opparam_buf[pi++] = ret + 1;
792             s->gen_opparam_buf[pi++] = ret;
793 #else
794             s->gen_opparam_buf[pi++] = ret;
795             s->gen_opparam_buf[pi++] = ret + 1;
796 #endif
797             nb_rets = 2;
798         } else {
799             s->gen_opparam_buf[pi++] = ret;
800             nb_rets = 1;
801         }
802 #endif
803     } else {
804         nb_rets = 0;
805     }
806     real_args = 0;
807     for (i = 0; i < nargs; i++) {
808         int is_64bit = sizemask & (1 << (i+1)*2);
809         if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
810 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
811             /* some targets want aligned 64 bit args */
812             if (real_args & 1) {
813                 s->gen_opparam_buf[pi++] = TCG_CALL_DUMMY_ARG;
814                 real_args++;
815             }
816 #endif
817 	    /* If stack grows up, then we will be placing successive
818 	       arguments at lower addresses, which means we need to
819 	       reverse the order compared to how we would normally
820 	       treat either big or little-endian.  For those arguments
821 	       that will wind up in registers, this still works for
822 	       HPPA (the only current STACK_GROWSUP target) since the
823 	       argument registers are *also* allocated in decreasing
824 	       order.  If another such target is added, this logic may
825 	       have to get more complicated to differentiate between
826 	       stack arguments and register arguments.  */
827 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
828             s->gen_opparam_buf[pi++] = args[i] + 1;
829             s->gen_opparam_buf[pi++] = args[i];
830 #else
831             s->gen_opparam_buf[pi++] = args[i];
832             s->gen_opparam_buf[pi++] = args[i] + 1;
833 #endif
834             real_args += 2;
835             continue;
836         }
837 
838         s->gen_opparam_buf[pi++] = args[i];
839         real_args++;
840     }
841     s->gen_opparam_buf[pi++] = (uintptr_t)func;
842     s->gen_opparam_buf[pi++] = flags;
843 
844     i = s->gen_next_op_idx;
845     tcg_debug_assert(i < OPC_BUF_SIZE);
846     tcg_debug_assert(pi <= OPPARAM_BUF_SIZE);
847 
848     /* Set links for sequential allocation during translation.  */
849     s->gen_op_buf[i] = (TCGOp){
850         .opc = INDEX_op_call,
851         .callo = nb_rets,
852         .calli = real_args,
853         .args = pi_first,
854         .prev = i - 1,
855         .next = i + 1
856     };
857 
858     /* Make sure the calli field didn't overflow.  */
859     tcg_debug_assert(s->gen_op_buf[i].calli == real_args);
860 
861     s->gen_last_op_idx = i;
862     s->gen_next_op_idx = i + 1;
863     s->gen_next_parm_idx = pi;
864 
865 #if defined(__sparc__) && !defined(__arch64__) \
866     && !defined(CONFIG_TCG_INTERPRETER)
867     /* Free all of the parts we allocated above.  */
868     for (i = real_args = 0; i < orig_nargs; ++i) {
869         int is_64bit = orig_sizemask & (1 << (i+1)*2);
870         if (is_64bit) {
871             TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]);
872             TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]);
873             tcg_temp_free_i32(h);
874             tcg_temp_free_i32(l);
875         } else {
876             real_args++;
877         }
878     }
879     if (orig_sizemask & 1) {
880         /* The 32-bit ABI returned two 32-bit pieces.  Re-assemble them.
881            Note that describing these as TCGv_i64 eliminates an unnecessary
882            zero-extension that tcg_gen_concat_i32_i64 would create.  */
883         tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth);
884         tcg_temp_free_i64(retl);
885         tcg_temp_free_i64(reth);
886     }
887 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
888     for (i = 0; i < nargs; ++i) {
889         int is_64bit = sizemask & (1 << (i+1)*2);
890         if (!is_64bit) {
891             TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
892             tcg_temp_free_i64(temp);
893         }
894     }
895 #endif /* TCG_TARGET_EXTEND_ARGS */
896 }
897 
898 static void tcg_reg_alloc_start(TCGContext *s)
899 {
900     int i;
901     TCGTemp *ts;
902     for(i = 0; i < s->nb_globals; i++) {
903         ts = &s->temps[i];
904         if (ts->fixed_reg) {
905             ts->val_type = TEMP_VAL_REG;
906         } else {
907             ts->val_type = TEMP_VAL_MEM;
908         }
909     }
910     for(i = s->nb_globals; i < s->nb_temps; i++) {
911         ts = &s->temps[i];
912         if (ts->temp_local) {
913             ts->val_type = TEMP_VAL_MEM;
914         } else {
915             ts->val_type = TEMP_VAL_DEAD;
916         }
917         ts->mem_allocated = 0;
918         ts->fixed_reg = 0;
919     }
920     for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
921         s->reg_to_temp[i] = -1;
922     }
923 }
924 
925 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size,
926                                  int idx)
927 {
928     TCGTemp *ts;
929 
930     assert(idx >= 0 && idx < s->nb_temps);
931     ts = &s->temps[idx];
932     if (idx < s->nb_globals) {
933         pstrcpy(buf, buf_size, ts->name);
934     } else {
935         if (ts->temp_local)
936             snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
937         else
938             snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
939     }
940     return buf;
941 }
942 
943 char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg)
944 {
945     return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg));
946 }
947 
948 char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg)
949 {
950     return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg));
951 }
952 
953 /* Find helper name.  */
954 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
955 {
956     const char *ret = NULL;
957     if (s->helpers) {
958         TCGHelperInfo *info = g_hash_table_lookup(s->helpers, (gpointer)val);
959         if (info) {
960             ret = info->name;
961         }
962     }
963     return ret;
964 }
965 
966 static const char * const cond_name[] =
967 {
968     [TCG_COND_NEVER] = "never",
969     [TCG_COND_ALWAYS] = "always",
970     [TCG_COND_EQ] = "eq",
971     [TCG_COND_NE] = "ne",
972     [TCG_COND_LT] = "lt",
973     [TCG_COND_GE] = "ge",
974     [TCG_COND_LE] = "le",
975     [TCG_COND_GT] = "gt",
976     [TCG_COND_LTU] = "ltu",
977     [TCG_COND_GEU] = "geu",
978     [TCG_COND_LEU] = "leu",
979     [TCG_COND_GTU] = "gtu"
980 };
981 
982 static const char * const ldst_name[] =
983 {
984     [MO_UB]   = "ub",
985     [MO_SB]   = "sb",
986     [MO_LEUW] = "leuw",
987     [MO_LESW] = "lesw",
988     [MO_LEUL] = "leul",
989     [MO_LESL] = "lesl",
990     [MO_LEQ]  = "leq",
991     [MO_BEUW] = "beuw",
992     [MO_BESW] = "besw",
993     [MO_BEUL] = "beul",
994     [MO_BESL] = "besl",
995     [MO_BEQ]  = "beq",
996 };
997 
998 void tcg_dump_ops(TCGContext *s)
999 {
1000     char buf[128];
1001     TCGOp *op;
1002     int oi;
1003 
1004     for (oi = s->gen_first_op_idx; oi >= 0; oi = op->next) {
1005         int i, k, nb_oargs, nb_iargs, nb_cargs;
1006         const TCGOpDef *def;
1007         const TCGArg *args;
1008         TCGOpcode c;
1009 
1010         op = &s->gen_op_buf[oi];
1011         c = op->opc;
1012         def = &tcg_op_defs[c];
1013         args = &s->gen_opparam_buf[op->args];
1014 
1015         if (c == INDEX_op_insn_start) {
1016             qemu_log("%s ----", oi != s->gen_first_op_idx ? "\n" : "");
1017 
1018             for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1019                 target_ulong a;
1020 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1021                 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
1022 #else
1023                 a = args[i];
1024 #endif
1025                 qemu_log(" " TARGET_FMT_lx, a);
1026             }
1027         } else if (c == INDEX_op_call) {
1028             /* variable number of arguments */
1029             nb_oargs = op->callo;
1030             nb_iargs = op->calli;
1031             nb_cargs = def->nb_cargs;
1032 
1033             /* function name, flags, out args */
1034             qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1035                      tcg_find_helper(s, args[nb_oargs + nb_iargs]),
1036                      args[nb_oargs + nb_iargs + 1], nb_oargs);
1037             for (i = 0; i < nb_oargs; i++) {
1038                 qemu_log(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1039                                                    args[i]));
1040             }
1041             for (i = 0; i < nb_iargs; i++) {
1042                 TCGArg arg = args[nb_oargs + i];
1043                 const char *t = "<dummy>";
1044                 if (arg != TCG_CALL_DUMMY_ARG) {
1045                     t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg);
1046                 }
1047                 qemu_log(",%s", t);
1048             }
1049         } else {
1050             qemu_log(" %s ", def->name);
1051 
1052             nb_oargs = def->nb_oargs;
1053             nb_iargs = def->nb_iargs;
1054             nb_cargs = def->nb_cargs;
1055 
1056             k = 0;
1057             for (i = 0; i < nb_oargs; i++) {
1058                 if (k != 0) {
1059                     qemu_log(",");
1060                 }
1061                 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1062                                                    args[k++]));
1063             }
1064             for (i = 0; i < nb_iargs; i++) {
1065                 if (k != 0) {
1066                     qemu_log(",");
1067                 }
1068                 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1069                                                    args[k++]));
1070             }
1071             switch (c) {
1072             case INDEX_op_brcond_i32:
1073             case INDEX_op_setcond_i32:
1074             case INDEX_op_movcond_i32:
1075             case INDEX_op_brcond2_i32:
1076             case INDEX_op_setcond2_i32:
1077             case INDEX_op_brcond_i64:
1078             case INDEX_op_setcond_i64:
1079             case INDEX_op_movcond_i64:
1080                 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1081                     qemu_log(",%s", cond_name[args[k++]]);
1082                 } else {
1083                     qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1084                 }
1085                 i = 1;
1086                 break;
1087             case INDEX_op_qemu_ld_i32:
1088             case INDEX_op_qemu_st_i32:
1089             case INDEX_op_qemu_ld_i64:
1090             case INDEX_op_qemu_st_i64:
1091                 {
1092                     TCGMemOpIdx oi = args[k++];
1093                     TCGMemOp op = get_memop(oi);
1094                     unsigned ix = get_mmuidx(oi);
1095 
1096                     if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1097                         qemu_log(",$0x%x,%u", op, ix);
1098                     } else {
1099                         const char *s_al = "", *s_op;
1100                         if (op & MO_AMASK) {
1101                             if ((op & MO_AMASK) == MO_ALIGN) {
1102                                 s_al = "al+";
1103                             } else {
1104                                 s_al = "un+";
1105                             }
1106                         }
1107                         s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1108                         qemu_log(",%s%s,%u", s_al, s_op, ix);
1109                     }
1110                     i = 1;
1111                 }
1112                 break;
1113             default:
1114                 i = 0;
1115                 break;
1116             }
1117             switch (c) {
1118             case INDEX_op_set_label:
1119             case INDEX_op_br:
1120             case INDEX_op_brcond_i32:
1121             case INDEX_op_brcond_i64:
1122             case INDEX_op_brcond2_i32:
1123                 qemu_log("%s$L%d", k ? "," : "", arg_label(args[k])->id);
1124                 i++, k++;
1125                 break;
1126             default:
1127                 break;
1128             }
1129             for (; i < nb_cargs; i++, k++) {
1130                 qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", args[k]);
1131             }
1132         }
1133         qemu_log("\n");
1134     }
1135 }
1136 
1137 /* we give more priority to constraints with less registers */
1138 static int get_constraint_priority(const TCGOpDef *def, int k)
1139 {
1140     const TCGArgConstraint *arg_ct;
1141 
1142     int i, n;
1143     arg_ct = &def->args_ct[k];
1144     if (arg_ct->ct & TCG_CT_ALIAS) {
1145         /* an alias is equivalent to a single register */
1146         n = 1;
1147     } else {
1148         if (!(arg_ct->ct & TCG_CT_REG))
1149             return 0;
1150         n = 0;
1151         for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1152             if (tcg_regset_test_reg(arg_ct->u.regs, i))
1153                 n++;
1154         }
1155     }
1156     return TCG_TARGET_NB_REGS - n + 1;
1157 }
1158 
1159 /* sort from highest priority to lowest */
1160 static void sort_constraints(TCGOpDef *def, int start, int n)
1161 {
1162     int i, j, p1, p2, tmp;
1163 
1164     for(i = 0; i < n; i++)
1165         def->sorted_args[start + i] = start + i;
1166     if (n <= 1)
1167         return;
1168     for(i = 0; i < n - 1; i++) {
1169         for(j = i + 1; j < n; j++) {
1170             p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1171             p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1172             if (p1 < p2) {
1173                 tmp = def->sorted_args[start + i];
1174                 def->sorted_args[start + i] = def->sorted_args[start + j];
1175                 def->sorted_args[start + j] = tmp;
1176             }
1177         }
1178     }
1179 }
1180 
1181 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1182 {
1183     TCGOpcode op;
1184     TCGOpDef *def;
1185     const char *ct_str;
1186     int i, nb_args;
1187 
1188     for(;;) {
1189         if (tdefs->op == (TCGOpcode)-1)
1190             break;
1191         op = tdefs->op;
1192         assert((unsigned)op < NB_OPS);
1193         def = &tcg_op_defs[op];
1194 #if defined(CONFIG_DEBUG_TCG)
1195         /* Duplicate entry in op definitions? */
1196         assert(!def->used);
1197         def->used = 1;
1198 #endif
1199         nb_args = def->nb_iargs + def->nb_oargs;
1200         for(i = 0; i < nb_args; i++) {
1201             ct_str = tdefs->args_ct_str[i];
1202             /* Incomplete TCGTargetOpDef entry? */
1203             assert(ct_str != NULL);
1204             tcg_regset_clear(def->args_ct[i].u.regs);
1205             def->args_ct[i].ct = 0;
1206             if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1207                 int oarg;
1208                 oarg = ct_str[0] - '0';
1209                 assert(oarg < def->nb_oargs);
1210                 assert(def->args_ct[oarg].ct & TCG_CT_REG);
1211                 /* TCG_CT_ALIAS is for the output arguments. The input
1212                    argument is tagged with TCG_CT_IALIAS. */
1213                 def->args_ct[i] = def->args_ct[oarg];
1214                 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1215                 def->args_ct[oarg].alias_index = i;
1216                 def->args_ct[i].ct |= TCG_CT_IALIAS;
1217                 def->args_ct[i].alias_index = oarg;
1218             } else {
1219                 for(;;) {
1220                     if (*ct_str == '\0')
1221                         break;
1222                     switch(*ct_str) {
1223                     case 'i':
1224                         def->args_ct[i].ct |= TCG_CT_CONST;
1225                         ct_str++;
1226                         break;
1227                     default:
1228                         if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1229                             fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1230                                     ct_str, i, def->name);
1231                             exit(1);
1232                         }
1233                     }
1234                 }
1235             }
1236         }
1237 
1238         /* TCGTargetOpDef entry with too much information? */
1239         assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1240 
1241         /* sort the constraints (XXX: this is just an heuristic) */
1242         sort_constraints(def, 0, def->nb_oargs);
1243         sort_constraints(def, def->nb_oargs, def->nb_iargs);
1244 
1245 #if 0
1246         {
1247             int i;
1248 
1249             printf("%s: sorted=", def->name);
1250             for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1251                 printf(" %d", def->sorted_args[i]);
1252             printf("\n");
1253         }
1254 #endif
1255         tdefs++;
1256     }
1257 
1258 #if defined(CONFIG_DEBUG_TCG)
1259     i = 0;
1260     for (op = 0; op < tcg_op_defs_max; op++) {
1261         const TCGOpDef *def = &tcg_op_defs[op];
1262         if (def->flags & TCG_OPF_NOT_PRESENT) {
1263             /* Wrong entry in op definitions? */
1264             if (def->used) {
1265                 fprintf(stderr, "Invalid op definition for %s\n", def->name);
1266                 i = 1;
1267             }
1268         } else {
1269             /* Missing entry in op definitions? */
1270             if (!def->used) {
1271                 fprintf(stderr, "Missing op definition for %s\n", def->name);
1272                 i = 1;
1273             }
1274         }
1275     }
1276     if (i == 1) {
1277         tcg_abort();
1278     }
1279 #endif
1280 }
1281 
1282 void tcg_op_remove(TCGContext *s, TCGOp *op)
1283 {
1284     int next = op->next;
1285     int prev = op->prev;
1286 
1287     if (next >= 0) {
1288         s->gen_op_buf[next].prev = prev;
1289     } else {
1290         s->gen_last_op_idx = prev;
1291     }
1292     if (prev >= 0) {
1293         s->gen_op_buf[prev].next = next;
1294     } else {
1295         s->gen_first_op_idx = next;
1296     }
1297 
1298     memset(op, -1, sizeof(*op));
1299 
1300 #ifdef CONFIG_PROFILER
1301     s->del_op_count++;
1302 #endif
1303 }
1304 
1305 #ifdef USE_LIVENESS_ANALYSIS
1306 /* liveness analysis: end of function: all temps are dead, and globals
1307    should be in memory. */
1308 static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps,
1309                                    uint8_t *mem_temps)
1310 {
1311     memset(dead_temps, 1, s->nb_temps);
1312     memset(mem_temps, 1, s->nb_globals);
1313     memset(mem_temps + s->nb_globals, 0, s->nb_temps - s->nb_globals);
1314 }
1315 
1316 /* liveness analysis: end of basic block: all temps are dead, globals
1317    and local temps should be in memory. */
1318 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps,
1319                                  uint8_t *mem_temps)
1320 {
1321     int i;
1322 
1323     memset(dead_temps, 1, s->nb_temps);
1324     memset(mem_temps, 1, s->nb_globals);
1325     for(i = s->nb_globals; i < s->nb_temps; i++) {
1326         mem_temps[i] = s->temps[i].temp_local;
1327     }
1328 }
1329 
1330 /* Liveness analysis : update the opc_dead_args array to tell if a
1331    given input arguments is dead. Instructions updating dead
1332    temporaries are removed. */
1333 static void tcg_liveness_analysis(TCGContext *s)
1334 {
1335     uint8_t *dead_temps, *mem_temps;
1336     int oi, oi_prev, nb_ops;
1337 
1338     nb_ops = s->gen_next_op_idx;
1339     s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1340     s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1341 
1342     dead_temps = tcg_malloc(s->nb_temps);
1343     mem_temps = tcg_malloc(s->nb_temps);
1344     tcg_la_func_end(s, dead_temps, mem_temps);
1345 
1346     for (oi = s->gen_last_op_idx; oi >= 0; oi = oi_prev) {
1347         int i, nb_iargs, nb_oargs;
1348         TCGOpcode opc_new, opc_new2;
1349         bool have_opc_new2;
1350         uint16_t dead_args;
1351         uint8_t sync_args;
1352         TCGArg arg;
1353 
1354         TCGOp * const op = &s->gen_op_buf[oi];
1355         TCGArg * const args = &s->gen_opparam_buf[op->args];
1356         TCGOpcode opc = op->opc;
1357         const TCGOpDef *def = &tcg_op_defs[opc];
1358 
1359         oi_prev = op->prev;
1360 
1361         switch (opc) {
1362         case INDEX_op_call:
1363             {
1364                 int call_flags;
1365 
1366                 nb_oargs = op->callo;
1367                 nb_iargs = op->calli;
1368                 call_flags = args[nb_oargs + nb_iargs + 1];
1369 
1370                 /* pure functions can be removed if their result is unused */
1371                 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
1372                     for (i = 0; i < nb_oargs; i++) {
1373                         arg = args[i];
1374                         if (!dead_temps[arg] || mem_temps[arg]) {
1375                             goto do_not_remove_call;
1376                         }
1377                     }
1378                     goto do_remove;
1379                 } else {
1380                 do_not_remove_call:
1381 
1382                     /* output args are dead */
1383                     dead_args = 0;
1384                     sync_args = 0;
1385                     for (i = 0; i < nb_oargs; i++) {
1386                         arg = args[i];
1387                         if (dead_temps[arg]) {
1388                             dead_args |= (1 << i);
1389                         }
1390                         if (mem_temps[arg]) {
1391                             sync_args |= (1 << i);
1392                         }
1393                         dead_temps[arg] = 1;
1394                         mem_temps[arg] = 0;
1395                     }
1396 
1397                     if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1398                         /* globals should be synced to memory */
1399                         memset(mem_temps, 1, s->nb_globals);
1400                     }
1401                     if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1402                                         TCG_CALL_NO_READ_GLOBALS))) {
1403                         /* globals should go back to memory */
1404                         memset(dead_temps, 1, s->nb_globals);
1405                     }
1406 
1407                     /* record arguments that die in this helper */
1408                     for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1409                         arg = args[i];
1410                         if (arg != TCG_CALL_DUMMY_ARG) {
1411                             if (dead_temps[arg]) {
1412                                 dead_args |= (1 << i);
1413                             }
1414                         }
1415                     }
1416                     /* input arguments are live for preceding opcodes */
1417                     for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1418                         arg = args[i];
1419                         dead_temps[arg] = 0;
1420                     }
1421                     s->op_dead_args[oi] = dead_args;
1422                     s->op_sync_args[oi] = sync_args;
1423                 }
1424             }
1425             break;
1426         case INDEX_op_insn_start:
1427             break;
1428         case INDEX_op_discard:
1429             /* mark the temporary as dead */
1430             dead_temps[args[0]] = 1;
1431             mem_temps[args[0]] = 0;
1432             break;
1433 
1434         case INDEX_op_add2_i32:
1435             opc_new = INDEX_op_add_i32;
1436             goto do_addsub2;
1437         case INDEX_op_sub2_i32:
1438             opc_new = INDEX_op_sub_i32;
1439             goto do_addsub2;
1440         case INDEX_op_add2_i64:
1441             opc_new = INDEX_op_add_i64;
1442             goto do_addsub2;
1443         case INDEX_op_sub2_i64:
1444             opc_new = INDEX_op_sub_i64;
1445         do_addsub2:
1446             nb_iargs = 4;
1447             nb_oargs = 2;
1448             /* Test if the high part of the operation is dead, but not
1449                the low part.  The result can be optimized to a simple
1450                add or sub.  This happens often for x86_64 guest when the
1451                cpu mode is set to 32 bit.  */
1452             if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1453                 if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1454                     goto do_remove;
1455                 }
1456                 /* Replace the opcode and adjust the args in place,
1457                    leaving 3 unused args at the end.  */
1458                 op->opc = opc = opc_new;
1459                 args[1] = args[2];
1460                 args[2] = args[4];
1461                 /* Fall through and mark the single-word operation live.  */
1462                 nb_iargs = 2;
1463                 nb_oargs = 1;
1464             }
1465             goto do_not_remove;
1466 
1467         case INDEX_op_mulu2_i32:
1468             opc_new = INDEX_op_mul_i32;
1469             opc_new2 = INDEX_op_muluh_i32;
1470             have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
1471             goto do_mul2;
1472         case INDEX_op_muls2_i32:
1473             opc_new = INDEX_op_mul_i32;
1474             opc_new2 = INDEX_op_mulsh_i32;
1475             have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
1476             goto do_mul2;
1477         case INDEX_op_mulu2_i64:
1478             opc_new = INDEX_op_mul_i64;
1479             opc_new2 = INDEX_op_muluh_i64;
1480             have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
1481             goto do_mul2;
1482         case INDEX_op_muls2_i64:
1483             opc_new = INDEX_op_mul_i64;
1484             opc_new2 = INDEX_op_mulsh_i64;
1485             have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
1486             goto do_mul2;
1487         do_mul2:
1488             nb_iargs = 2;
1489             nb_oargs = 2;
1490             if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1491                 if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1492                     /* Both parts of the operation are dead.  */
1493                     goto do_remove;
1494                 }
1495                 /* The high part of the operation is dead; generate the low. */
1496                 op->opc = opc = opc_new;
1497                 args[1] = args[2];
1498                 args[2] = args[3];
1499             } else if (have_opc_new2 && dead_temps[args[0]]
1500                        && !mem_temps[args[0]]) {
1501                 /* The low part of the operation is dead; generate the high. */
1502                 op->opc = opc = opc_new2;
1503                 args[0] = args[1];
1504                 args[1] = args[2];
1505                 args[2] = args[3];
1506             } else {
1507                 goto do_not_remove;
1508             }
1509             /* Mark the single-word operation live.  */
1510             nb_oargs = 1;
1511             goto do_not_remove;
1512 
1513         default:
1514             /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1515             nb_iargs = def->nb_iargs;
1516             nb_oargs = def->nb_oargs;
1517 
1518             /* Test if the operation can be removed because all
1519                its outputs are dead. We assume that nb_oargs == 0
1520                implies side effects */
1521             if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1522                 for (i = 0; i < nb_oargs; i++) {
1523                     arg = args[i];
1524                     if (!dead_temps[arg] || mem_temps[arg]) {
1525                         goto do_not_remove;
1526                     }
1527                 }
1528             do_remove:
1529                 tcg_op_remove(s, op);
1530             } else {
1531             do_not_remove:
1532                 /* output args are dead */
1533                 dead_args = 0;
1534                 sync_args = 0;
1535                 for (i = 0; i < nb_oargs; i++) {
1536                     arg = args[i];
1537                     if (dead_temps[arg]) {
1538                         dead_args |= (1 << i);
1539                     }
1540                     if (mem_temps[arg]) {
1541                         sync_args |= (1 << i);
1542                     }
1543                     dead_temps[arg] = 1;
1544                     mem_temps[arg] = 0;
1545                 }
1546 
1547                 /* if end of basic block, update */
1548                 if (def->flags & TCG_OPF_BB_END) {
1549                     tcg_la_bb_end(s, dead_temps, mem_temps);
1550                 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1551                     /* globals should be synced to memory */
1552                     memset(mem_temps, 1, s->nb_globals);
1553                 }
1554 
1555                 /* record arguments that die in this opcode */
1556                 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1557                     arg = args[i];
1558                     if (dead_temps[arg]) {
1559                         dead_args |= (1 << i);
1560                     }
1561                 }
1562                 /* input arguments are live for preceding opcodes */
1563                 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1564                     arg = args[i];
1565                     dead_temps[arg] = 0;
1566                 }
1567                 s->op_dead_args[oi] = dead_args;
1568                 s->op_sync_args[oi] = sync_args;
1569             }
1570             break;
1571         }
1572     }
1573 }
1574 #else
1575 /* dummy liveness analysis */
1576 static void tcg_liveness_analysis(TCGContext *s)
1577 {
1578     int nb_ops;
1579     nb_ops = s->gen_opc_ptr - s->gen_opc_buf;
1580 
1581     s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1582     memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t));
1583     s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1584     memset(s->op_sync_args, 0, nb_ops * sizeof(uint8_t));
1585 }
1586 #endif
1587 
1588 #ifndef NDEBUG
1589 static void dump_regs(TCGContext *s)
1590 {
1591     TCGTemp *ts;
1592     int i;
1593     char buf[64];
1594 
1595     for(i = 0; i < s->nb_temps; i++) {
1596         ts = &s->temps[i];
1597         printf("  %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1598         switch(ts->val_type) {
1599         case TEMP_VAL_REG:
1600             printf("%s", tcg_target_reg_names[ts->reg]);
1601             break;
1602         case TEMP_VAL_MEM:
1603             printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]);
1604             break;
1605         case TEMP_VAL_CONST:
1606             printf("$0x%" TCG_PRIlx, ts->val);
1607             break;
1608         case TEMP_VAL_DEAD:
1609             printf("D");
1610             break;
1611         default:
1612             printf("???");
1613             break;
1614         }
1615         printf("\n");
1616     }
1617 
1618     for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1619         if (s->reg_to_temp[i] >= 0) {
1620             printf("%s: %s\n",
1621                    tcg_target_reg_names[i],
1622                    tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i]));
1623         }
1624     }
1625 }
1626 
1627 static void check_regs(TCGContext *s)
1628 {
1629     int reg, k;
1630     TCGTemp *ts;
1631     char buf[64];
1632 
1633     for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1634         k = s->reg_to_temp[reg];
1635         if (k >= 0) {
1636             ts = &s->temps[k];
1637             if (ts->val_type != TEMP_VAL_REG ||
1638                 ts->reg != reg) {
1639                 printf("Inconsistency for register %s:\n",
1640                        tcg_target_reg_names[reg]);
1641                 goto fail;
1642             }
1643         }
1644     }
1645     for(k = 0; k < s->nb_temps; k++) {
1646         ts = &s->temps[k];
1647         if (ts->val_type == TEMP_VAL_REG &&
1648             !ts->fixed_reg &&
1649             s->reg_to_temp[ts->reg] != k) {
1650                 printf("Inconsistency for temp %s:\n",
1651                        tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
1652         fail:
1653                 printf("reg state:\n");
1654                 dump_regs(s);
1655                 tcg_abort();
1656         }
1657     }
1658 }
1659 #endif
1660 
1661 static void temp_allocate_frame(TCGContext *s, int temp)
1662 {
1663     TCGTemp *ts;
1664     ts = &s->temps[temp];
1665 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
1666     /* Sparc64 stack is accessed with offset of 2047 */
1667     s->current_frame_offset = (s->current_frame_offset +
1668                                (tcg_target_long)sizeof(tcg_target_long) - 1) &
1669         ~(sizeof(tcg_target_long) - 1);
1670 #endif
1671     if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1672         s->frame_end) {
1673         tcg_abort();
1674     }
1675     ts->mem_offset = s->current_frame_offset;
1676     ts->mem_reg = s->frame_reg;
1677     ts->mem_allocated = 1;
1678     s->current_frame_offset += sizeof(tcg_target_long);
1679 }
1680 
1681 /* sync register 'reg' by saving it to the corresponding temporary */
1682 static inline void tcg_reg_sync(TCGContext *s, int reg)
1683 {
1684     TCGTemp *ts;
1685     int temp;
1686 
1687     temp = s->reg_to_temp[reg];
1688     ts = &s->temps[temp];
1689     assert(ts->val_type == TEMP_VAL_REG);
1690     if (!ts->mem_coherent && !ts->fixed_reg) {
1691         if (!ts->mem_allocated) {
1692             temp_allocate_frame(s, temp);
1693         }
1694         tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1695     }
1696     ts->mem_coherent = 1;
1697 }
1698 
1699 /* free register 'reg' by spilling the corresponding temporary if necessary */
1700 static void tcg_reg_free(TCGContext *s, int reg)
1701 {
1702     int temp;
1703 
1704     temp = s->reg_to_temp[reg];
1705     if (temp != -1) {
1706         tcg_reg_sync(s, reg);
1707         s->temps[temp].val_type = TEMP_VAL_MEM;
1708         s->reg_to_temp[reg] = -1;
1709     }
1710 }
1711 
1712 /* Allocate a register belonging to reg1 & ~reg2 */
1713 static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
1714 {
1715     int i, reg;
1716     TCGRegSet reg_ct;
1717 
1718     tcg_regset_andnot(reg_ct, reg1, reg2);
1719 
1720     /* first try free registers */
1721     for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1722         reg = tcg_target_reg_alloc_order[i];
1723         if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
1724             return reg;
1725     }
1726 
1727     /* XXX: do better spill choice */
1728     for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1729         reg = tcg_target_reg_alloc_order[i];
1730         if (tcg_regset_test_reg(reg_ct, reg)) {
1731             tcg_reg_free(s, reg);
1732             return reg;
1733         }
1734     }
1735 
1736     tcg_abort();
1737 }
1738 
1739 /* mark a temporary as dead. */
1740 static inline void temp_dead(TCGContext *s, int temp)
1741 {
1742     TCGTemp *ts;
1743 
1744     ts = &s->temps[temp];
1745     if (!ts->fixed_reg) {
1746         if (ts->val_type == TEMP_VAL_REG) {
1747             s->reg_to_temp[ts->reg] = -1;
1748         }
1749         if (temp < s->nb_globals || ts->temp_local) {
1750             ts->val_type = TEMP_VAL_MEM;
1751         } else {
1752             ts->val_type = TEMP_VAL_DEAD;
1753         }
1754     }
1755 }
1756 
1757 /* sync a temporary to memory. 'allocated_regs' is used in case a
1758    temporary registers needs to be allocated to store a constant. */
1759 static inline void temp_sync(TCGContext *s, int temp, TCGRegSet allocated_regs)
1760 {
1761     TCGTemp *ts;
1762 
1763     ts = &s->temps[temp];
1764     if (!ts->fixed_reg) {
1765         switch(ts->val_type) {
1766         case TEMP_VAL_CONST:
1767             ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1768                                     allocated_regs);
1769             ts->val_type = TEMP_VAL_REG;
1770             s->reg_to_temp[ts->reg] = temp;
1771             ts->mem_coherent = 0;
1772             tcg_out_movi(s, ts->type, ts->reg, ts->val);
1773             /* fallthrough*/
1774         case TEMP_VAL_REG:
1775             tcg_reg_sync(s, ts->reg);
1776             break;
1777         case TEMP_VAL_DEAD:
1778         case TEMP_VAL_MEM:
1779             break;
1780         default:
1781             tcg_abort();
1782         }
1783     }
1784 }
1785 
1786 /* save a temporary to memory. 'allocated_regs' is used in case a
1787    temporary registers needs to be allocated to store a constant. */
1788 static inline void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs)
1789 {
1790 #ifdef USE_LIVENESS_ANALYSIS
1791     /* The liveness analysis already ensures that globals are back
1792        in memory. Keep an assert for safety. */
1793     assert(s->temps[temp].val_type == TEMP_VAL_MEM || s->temps[temp].fixed_reg);
1794 #else
1795     temp_sync(s, temp, allocated_regs);
1796     temp_dead(s, temp);
1797 #endif
1798 }
1799 
1800 /* save globals to their canonical location and assume they can be
1801    modified be the following code. 'allocated_regs' is used in case a
1802    temporary registers needs to be allocated to store a constant. */
1803 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
1804 {
1805     int i;
1806 
1807     for(i = 0; i < s->nb_globals; i++) {
1808         temp_save(s, i, allocated_regs);
1809     }
1810 }
1811 
1812 /* sync globals to their canonical location and assume they can be
1813    read by the following code. 'allocated_regs' is used in case a
1814    temporary registers needs to be allocated to store a constant. */
1815 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
1816 {
1817     int i;
1818 
1819     for (i = 0; i < s->nb_globals; i++) {
1820 #ifdef USE_LIVENESS_ANALYSIS
1821         assert(s->temps[i].val_type != TEMP_VAL_REG || s->temps[i].fixed_reg ||
1822                s->temps[i].mem_coherent);
1823 #else
1824         temp_sync(s, i, allocated_regs);
1825 #endif
1826     }
1827 }
1828 
1829 /* at the end of a basic block, we assume all temporaries are dead and
1830    all globals are stored at their canonical location. */
1831 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
1832 {
1833     TCGTemp *ts;
1834     int i;
1835 
1836     for(i = s->nb_globals; i < s->nb_temps; i++) {
1837         ts = &s->temps[i];
1838         if (ts->temp_local) {
1839             temp_save(s, i, allocated_regs);
1840         } else {
1841 #ifdef USE_LIVENESS_ANALYSIS
1842             /* The liveness analysis already ensures that temps are dead.
1843                Keep an assert for safety. */
1844             assert(ts->val_type == TEMP_VAL_DEAD);
1845 #else
1846             temp_dead(s, i);
1847 #endif
1848         }
1849     }
1850 
1851     save_globals(s, allocated_regs);
1852 }
1853 
1854 #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1855 #define NEED_SYNC_ARG(n) ((sync_args >> (n)) & 1)
1856 
1857 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
1858                                uint16_t dead_args, uint8_t sync_args)
1859 {
1860     TCGTemp *ots;
1861     tcg_target_ulong val;
1862 
1863     ots = &s->temps[args[0]];
1864     val = args[1];
1865 
1866     if (ots->fixed_reg) {
1867         /* for fixed registers, we do not do any constant
1868            propagation */
1869         tcg_out_movi(s, ots->type, ots->reg, val);
1870     } else {
1871         /* The movi is not explicitly generated here */
1872         if (ots->val_type == TEMP_VAL_REG)
1873             s->reg_to_temp[ots->reg] = -1;
1874         ots->val_type = TEMP_VAL_CONST;
1875         ots->val = val;
1876     }
1877     if (NEED_SYNC_ARG(0)) {
1878         temp_sync(s, args[0], s->reserved_regs);
1879     }
1880     if (IS_DEAD_ARG(0)) {
1881         temp_dead(s, args[0]);
1882     }
1883 }
1884 
1885 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
1886                               const TCGArg *args, uint16_t dead_args,
1887                               uint8_t sync_args)
1888 {
1889     TCGRegSet allocated_regs;
1890     TCGTemp *ts, *ots;
1891     TCGType otype, itype;
1892 
1893     tcg_regset_set(allocated_regs, s->reserved_regs);
1894     ots = &s->temps[args[0]];
1895     ts = &s->temps[args[1]];
1896 
1897     /* Note that otype != itype for no-op truncation.  */
1898     otype = ots->type;
1899     itype = ts->type;
1900 
1901     /* If the source value is not in a register, and we're going to be
1902        forced to have it in a register in order to perform the copy,
1903        then copy the SOURCE value into its own register first.  That way
1904        we don't have to reload SOURCE the next time it is used. */
1905     if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG)
1906         || ts->val_type == TEMP_VAL_MEM) {
1907         ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[itype],
1908                                 allocated_regs);
1909         if (ts->val_type == TEMP_VAL_MEM) {
1910             tcg_out_ld(s, itype, ts->reg, ts->mem_reg, ts->mem_offset);
1911             ts->mem_coherent = 1;
1912         } else if (ts->val_type == TEMP_VAL_CONST) {
1913             tcg_out_movi(s, itype, ts->reg, ts->val);
1914             ts->mem_coherent = 0;
1915         }
1916         s->reg_to_temp[ts->reg] = args[1];
1917         ts->val_type = TEMP_VAL_REG;
1918     }
1919 
1920     if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
1921         /* mov to a non-saved dead register makes no sense (even with
1922            liveness analysis disabled). */
1923         assert(NEED_SYNC_ARG(0));
1924         /* The code above should have moved the temp to a register. */
1925         assert(ts->val_type == TEMP_VAL_REG);
1926         if (!ots->mem_allocated) {
1927             temp_allocate_frame(s, args[0]);
1928         }
1929         tcg_out_st(s, otype, ts->reg, ots->mem_reg, ots->mem_offset);
1930         if (IS_DEAD_ARG(1)) {
1931             temp_dead(s, args[1]);
1932         }
1933         temp_dead(s, args[0]);
1934     } else if (ts->val_type == TEMP_VAL_CONST) {
1935         /* propagate constant */
1936         if (ots->val_type == TEMP_VAL_REG) {
1937             s->reg_to_temp[ots->reg] = -1;
1938         }
1939         ots->val_type = TEMP_VAL_CONST;
1940         ots->val = ts->val;
1941         if (IS_DEAD_ARG(1)) {
1942             temp_dead(s, args[1]);
1943         }
1944     } else {
1945         /* The code in the first if block should have moved the
1946            temp to a register. */
1947         assert(ts->val_type == TEMP_VAL_REG);
1948         if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
1949             /* the mov can be suppressed */
1950             if (ots->val_type == TEMP_VAL_REG) {
1951                 s->reg_to_temp[ots->reg] = -1;
1952             }
1953             ots->reg = ts->reg;
1954             temp_dead(s, args[1]);
1955         } else {
1956             if (ots->val_type != TEMP_VAL_REG) {
1957                 /* When allocating a new register, make sure to not spill the
1958                    input one. */
1959                 tcg_regset_set_reg(allocated_regs, ts->reg);
1960                 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
1961                                          allocated_regs);
1962             }
1963             tcg_out_mov(s, otype, ots->reg, ts->reg);
1964         }
1965         ots->val_type = TEMP_VAL_REG;
1966         ots->mem_coherent = 0;
1967         s->reg_to_temp[ots->reg] = args[0];
1968         if (NEED_SYNC_ARG(0)) {
1969             tcg_reg_sync(s, ots->reg);
1970         }
1971     }
1972 }
1973 
1974 static void tcg_reg_alloc_op(TCGContext *s,
1975                              const TCGOpDef *def, TCGOpcode opc,
1976                              const TCGArg *args, uint16_t dead_args,
1977                              uint8_t sync_args)
1978 {
1979     TCGRegSet allocated_regs;
1980     int i, k, nb_iargs, nb_oargs, reg;
1981     TCGArg arg;
1982     const TCGArgConstraint *arg_ct;
1983     TCGTemp *ts;
1984     TCGArg new_args[TCG_MAX_OP_ARGS];
1985     int const_args[TCG_MAX_OP_ARGS];
1986 
1987     nb_oargs = def->nb_oargs;
1988     nb_iargs = def->nb_iargs;
1989 
1990     /* copy constants */
1991     memcpy(new_args + nb_oargs + nb_iargs,
1992            args + nb_oargs + nb_iargs,
1993            sizeof(TCGArg) * def->nb_cargs);
1994 
1995     /* satisfy input constraints */
1996     tcg_regset_set(allocated_regs, s->reserved_regs);
1997     for(k = 0; k < nb_iargs; k++) {
1998         i = def->sorted_args[nb_oargs + k];
1999         arg = args[i];
2000         arg_ct = &def->args_ct[i];
2001         ts = &s->temps[arg];
2002         if (ts->val_type == TEMP_VAL_MEM) {
2003             reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2004             tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2005             ts->val_type = TEMP_VAL_REG;
2006             ts->reg = reg;
2007             ts->mem_coherent = 1;
2008             s->reg_to_temp[reg] = arg;
2009         } else if (ts->val_type == TEMP_VAL_CONST) {
2010             if (tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2011                 /* constant is OK for instruction */
2012                 const_args[i] = 1;
2013                 new_args[i] = ts->val;
2014                 goto iarg_end;
2015             } else {
2016                 /* need to move to a register */
2017                 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2018                 tcg_out_movi(s, ts->type, reg, ts->val);
2019                 ts->val_type = TEMP_VAL_REG;
2020                 ts->reg = reg;
2021                 ts->mem_coherent = 0;
2022                 s->reg_to_temp[reg] = arg;
2023             }
2024         }
2025         assert(ts->val_type == TEMP_VAL_REG);
2026         if (arg_ct->ct & TCG_CT_IALIAS) {
2027             if (ts->fixed_reg) {
2028                 /* if fixed register, we must allocate a new register
2029                    if the alias is not the same register */
2030                 if (arg != args[arg_ct->alias_index])
2031                     goto allocate_in_reg;
2032             } else {
2033                 /* if the input is aliased to an output and if it is
2034                    not dead after the instruction, we must allocate
2035                    a new register and move it */
2036                 if (!IS_DEAD_ARG(i)) {
2037                     goto allocate_in_reg;
2038                 }
2039                 /* check if the current register has already been allocated
2040                    for another input aliased to an output */
2041                 int k2, i2;
2042                 for (k2 = 0 ; k2 < k ; k2++) {
2043                     i2 = def->sorted_args[nb_oargs + k2];
2044                     if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2045                         (new_args[i2] == ts->reg)) {
2046                         goto allocate_in_reg;
2047                     }
2048                 }
2049             }
2050         }
2051         reg = ts->reg;
2052         if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2053             /* nothing to do : the constraint is satisfied */
2054         } else {
2055         allocate_in_reg:
2056             /* allocate a new register matching the constraint
2057                and move the temporary register into it */
2058             reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2059             tcg_out_mov(s, ts->type, reg, ts->reg);
2060         }
2061         new_args[i] = reg;
2062         const_args[i] = 0;
2063         tcg_regset_set_reg(allocated_regs, reg);
2064     iarg_end: ;
2065     }
2066 
2067     /* mark dead temporaries and free the associated registers */
2068     for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2069         if (IS_DEAD_ARG(i)) {
2070             temp_dead(s, args[i]);
2071         }
2072     }
2073 
2074     if (def->flags & TCG_OPF_BB_END) {
2075         tcg_reg_alloc_bb_end(s, allocated_regs);
2076     } else {
2077         if (def->flags & TCG_OPF_CALL_CLOBBER) {
2078             /* XXX: permit generic clobber register list ? */
2079             for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2080                 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
2081                     tcg_reg_free(s, reg);
2082                 }
2083             }
2084         }
2085         if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2086             /* sync globals if the op has side effects and might trigger
2087                an exception. */
2088             sync_globals(s, allocated_regs);
2089         }
2090 
2091         /* satisfy the output constraints */
2092         tcg_regset_set(allocated_regs, s->reserved_regs);
2093         for(k = 0; k < nb_oargs; k++) {
2094             i = def->sorted_args[k];
2095             arg = args[i];
2096             arg_ct = &def->args_ct[i];
2097             ts = &s->temps[arg];
2098             if (arg_ct->ct & TCG_CT_ALIAS) {
2099                 reg = new_args[arg_ct->alias_index];
2100             } else {
2101                 /* if fixed register, we try to use it */
2102                 reg = ts->reg;
2103                 if (ts->fixed_reg &&
2104                     tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2105                     goto oarg_end;
2106                 }
2107                 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2108             }
2109             tcg_regset_set_reg(allocated_regs, reg);
2110             /* if a fixed register is used, then a move will be done afterwards */
2111             if (!ts->fixed_reg) {
2112                 if (ts->val_type == TEMP_VAL_REG) {
2113                     s->reg_to_temp[ts->reg] = -1;
2114                 }
2115                 ts->val_type = TEMP_VAL_REG;
2116                 ts->reg = reg;
2117                 /* temp value is modified, so the value kept in memory is
2118                    potentially not the same */
2119                 ts->mem_coherent = 0;
2120                 s->reg_to_temp[reg] = arg;
2121             }
2122         oarg_end:
2123             new_args[i] = reg;
2124         }
2125     }
2126 
2127     /* emit instruction */
2128     tcg_out_op(s, opc, new_args, const_args);
2129 
2130     /* move the outputs in the correct register if needed */
2131     for(i = 0; i < nb_oargs; i++) {
2132         ts = &s->temps[args[i]];
2133         reg = new_args[i];
2134         if (ts->fixed_reg && ts->reg != reg) {
2135             tcg_out_mov(s, ts->type, ts->reg, reg);
2136         }
2137         if (NEED_SYNC_ARG(i)) {
2138             tcg_reg_sync(s, reg);
2139         }
2140         if (IS_DEAD_ARG(i)) {
2141             temp_dead(s, args[i]);
2142         }
2143     }
2144 }
2145 
2146 #ifdef TCG_TARGET_STACK_GROWSUP
2147 #define STACK_DIR(x) (-(x))
2148 #else
2149 #define STACK_DIR(x) (x)
2150 #endif
2151 
2152 static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
2153                                const TCGArg * const args, uint16_t dead_args,
2154                                uint8_t sync_args)
2155 {
2156     int flags, nb_regs, i, reg;
2157     TCGArg arg;
2158     TCGTemp *ts;
2159     intptr_t stack_offset;
2160     size_t call_stack_size;
2161     tcg_insn_unit *func_addr;
2162     int allocate_args;
2163     TCGRegSet allocated_regs;
2164 
2165     func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs];
2166     flags = args[nb_oargs + nb_iargs + 1];
2167 
2168     nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2169     if (nb_regs > nb_iargs) {
2170         nb_regs = nb_iargs;
2171     }
2172 
2173     /* assign stack slots first */
2174     call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
2175     call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
2176         ~(TCG_TARGET_STACK_ALIGN - 1);
2177     allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2178     if (allocate_args) {
2179         /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2180            preallocate call stack */
2181         tcg_abort();
2182     }
2183 
2184     stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
2185     for(i = nb_regs; i < nb_iargs; i++) {
2186         arg = args[nb_oargs + i];
2187 #ifdef TCG_TARGET_STACK_GROWSUP
2188         stack_offset -= sizeof(tcg_target_long);
2189 #endif
2190         if (arg != TCG_CALL_DUMMY_ARG) {
2191             ts = &s->temps[arg];
2192             if (ts->val_type == TEMP_VAL_REG) {
2193                 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2194             } else if (ts->val_type == TEMP_VAL_MEM) {
2195                 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
2196                                     s->reserved_regs);
2197                 /* XXX: not correct if reading values from the stack */
2198                 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2199                 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
2200             } else if (ts->val_type == TEMP_VAL_CONST) {
2201                 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
2202                                     s->reserved_regs);
2203                 /* XXX: sign extend may be needed on some targets */
2204                 tcg_out_movi(s, ts->type, reg, ts->val);
2205                 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
2206             } else {
2207                 tcg_abort();
2208             }
2209         }
2210 #ifndef TCG_TARGET_STACK_GROWSUP
2211         stack_offset += sizeof(tcg_target_long);
2212 #endif
2213     }
2214 
2215     /* assign input registers */
2216     tcg_regset_set(allocated_regs, s->reserved_regs);
2217     for(i = 0; i < nb_regs; i++) {
2218         arg = args[nb_oargs + i];
2219         if (arg != TCG_CALL_DUMMY_ARG) {
2220             ts = &s->temps[arg];
2221             reg = tcg_target_call_iarg_regs[i];
2222             tcg_reg_free(s, reg);
2223             if (ts->val_type == TEMP_VAL_REG) {
2224                 if (ts->reg != reg) {
2225                     tcg_out_mov(s, ts->type, reg, ts->reg);
2226                 }
2227             } else if (ts->val_type == TEMP_VAL_MEM) {
2228                 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2229             } else if (ts->val_type == TEMP_VAL_CONST) {
2230                 /* XXX: sign extend ? */
2231                 tcg_out_movi(s, ts->type, reg, ts->val);
2232             } else {
2233                 tcg_abort();
2234             }
2235             tcg_regset_set_reg(allocated_regs, reg);
2236         }
2237     }
2238 
2239     /* mark dead temporaries and free the associated registers */
2240     for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2241         if (IS_DEAD_ARG(i)) {
2242             temp_dead(s, args[i]);
2243         }
2244     }
2245 
2246     /* clobber call registers */
2247     for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2248         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
2249             tcg_reg_free(s, reg);
2250         }
2251     }
2252 
2253     /* Save globals if they might be written by the helper, sync them if
2254        they might be read. */
2255     if (flags & TCG_CALL_NO_READ_GLOBALS) {
2256         /* Nothing to do */
2257     } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2258         sync_globals(s, allocated_regs);
2259     } else {
2260         save_globals(s, allocated_regs);
2261     }
2262 
2263     tcg_out_call(s, func_addr);
2264 
2265     /* assign output registers and emit moves if needed */
2266     for(i = 0; i < nb_oargs; i++) {
2267         arg = args[i];
2268         ts = &s->temps[arg];
2269         reg = tcg_target_call_oarg_regs[i];
2270         assert(s->reg_to_temp[reg] == -1);
2271 
2272         if (ts->fixed_reg) {
2273             if (ts->reg != reg) {
2274                 tcg_out_mov(s, ts->type, ts->reg, reg);
2275             }
2276         } else {
2277             if (ts->val_type == TEMP_VAL_REG) {
2278                 s->reg_to_temp[ts->reg] = -1;
2279             }
2280             ts->val_type = TEMP_VAL_REG;
2281             ts->reg = reg;
2282             ts->mem_coherent = 0;
2283             s->reg_to_temp[reg] = arg;
2284             if (NEED_SYNC_ARG(i)) {
2285                 tcg_reg_sync(s, reg);
2286             }
2287             if (IS_DEAD_ARG(i)) {
2288                 temp_dead(s, args[i]);
2289             }
2290         }
2291     }
2292 }
2293 
2294 #ifdef CONFIG_PROFILER
2295 
2296 static int64_t tcg_table_op_count[NB_OPS];
2297 
2298 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2299 {
2300     int i;
2301 
2302     for (i = 0; i < NB_OPS; i++) {
2303         cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
2304                     tcg_table_op_count[i]);
2305     }
2306 }
2307 #else
2308 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2309 {
2310     cpu_fprintf(f, "[TCG profiler not compiled]\n");
2311 }
2312 #endif
2313 
2314 
2315 int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf)
2316 {
2317     int i, oi, oi_next, num_insns;
2318 
2319 #ifdef CONFIG_PROFILER
2320     {
2321         int n;
2322 
2323         n = s->gen_last_op_idx + 1;
2324         s->op_count += n;
2325         if (n > s->op_count_max) {
2326             s->op_count_max = n;
2327         }
2328 
2329         n = s->nb_temps;
2330         s->temp_count += n;
2331         if (n > s->temp_count_max) {
2332             s->temp_count_max = n;
2333         }
2334     }
2335 #endif
2336 
2337 #ifdef DEBUG_DISAS
2338     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2339         qemu_log("OP:\n");
2340         tcg_dump_ops(s);
2341         qemu_log("\n");
2342     }
2343 #endif
2344 
2345 #ifdef CONFIG_PROFILER
2346     s->opt_time -= profile_getclock();
2347 #endif
2348 
2349 #ifdef USE_TCG_OPTIMIZATIONS
2350     tcg_optimize(s);
2351 #endif
2352 
2353 #ifdef CONFIG_PROFILER
2354     s->opt_time += profile_getclock();
2355     s->la_time -= profile_getclock();
2356 #endif
2357 
2358     tcg_liveness_analysis(s);
2359 
2360 #ifdef CONFIG_PROFILER
2361     s->la_time += profile_getclock();
2362 #endif
2363 
2364 #ifdef DEBUG_DISAS
2365     if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) {
2366         qemu_log("OP after optimization and liveness analysis:\n");
2367         tcg_dump_ops(s);
2368         qemu_log("\n");
2369     }
2370 #endif
2371 
2372     tcg_reg_alloc_start(s);
2373 
2374     s->code_buf = gen_code_buf;
2375     s->code_ptr = gen_code_buf;
2376 
2377     tcg_out_tb_init(s);
2378 
2379     num_insns = -1;
2380     for (oi = s->gen_first_op_idx; oi >= 0; oi = oi_next) {
2381         TCGOp * const op = &s->gen_op_buf[oi];
2382         TCGArg * const args = &s->gen_opparam_buf[op->args];
2383         TCGOpcode opc = op->opc;
2384         const TCGOpDef *def = &tcg_op_defs[opc];
2385         uint16_t dead_args = s->op_dead_args[oi];
2386         uint8_t sync_args = s->op_sync_args[oi];
2387 
2388         oi_next = op->next;
2389 #ifdef CONFIG_PROFILER
2390         tcg_table_op_count[opc]++;
2391 #endif
2392 
2393         switch (opc) {
2394         case INDEX_op_mov_i32:
2395         case INDEX_op_mov_i64:
2396             tcg_reg_alloc_mov(s, def, args, dead_args, sync_args);
2397             break;
2398         case INDEX_op_movi_i32:
2399         case INDEX_op_movi_i64:
2400             tcg_reg_alloc_movi(s, args, dead_args, sync_args);
2401             break;
2402         case INDEX_op_insn_start:
2403             if (num_insns >= 0) {
2404                 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2405             }
2406             num_insns++;
2407             for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
2408                 target_ulong a;
2409 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
2410                 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
2411 #else
2412                 a = args[i];
2413 #endif
2414                 s->gen_insn_data[num_insns][i] = a;
2415             }
2416             break;
2417         case INDEX_op_discard:
2418             temp_dead(s, args[0]);
2419             break;
2420         case INDEX_op_set_label:
2421             tcg_reg_alloc_bb_end(s, s->reserved_regs);
2422             tcg_out_label(s, arg_label(args[0]), s->code_ptr);
2423             break;
2424         case INDEX_op_call:
2425             tcg_reg_alloc_call(s, op->callo, op->calli, args,
2426                                dead_args, sync_args);
2427             break;
2428         default:
2429             /* Sanity check that we've not introduced any unhandled opcodes. */
2430             if (def->flags & TCG_OPF_NOT_PRESENT) {
2431                 tcg_abort();
2432             }
2433             /* Note: in order to speed up the code, it would be much
2434                faster to have specialized register allocator functions for
2435                some common argument patterns */
2436             tcg_reg_alloc_op(s, def, opc, args, dead_args, sync_args);
2437             break;
2438         }
2439 #ifndef NDEBUG
2440         check_regs(s);
2441 #endif
2442         /* Test for (pending) buffer overflow.  The assumption is that any
2443            one operation beginning below the high water mark cannot overrun
2444            the buffer completely.  Thus we can test for overflow after
2445            generating code without having to check during generation.  */
2446         if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
2447             return -1;
2448         }
2449     }
2450     tcg_debug_assert(num_insns >= 0);
2451     s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2452 
2453     /* Generate TB finalization at the end of block */
2454     tcg_out_tb_finalize(s);
2455 
2456     /* flush instruction cache */
2457     flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2458 
2459     return tcg_current_code_size(s);
2460 }
2461 
2462 #ifdef CONFIG_PROFILER
2463 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2464 {
2465     TCGContext *s = &tcg_ctx;
2466     int64_t tb_count = s->tb_count;
2467     int64_t tb_div_count = tb_count ? tb_count : 1;
2468     int64_t tot = s->interm_time + s->code_time;
2469 
2470     cpu_fprintf(f, "JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2471                 tot, tot / 2.4e9);
2472     cpu_fprintf(f, "translated TBs      %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2473                 tb_count, s->tb_count1 - tb_count,
2474                 (double)(s->tb_count1 - s->tb_count)
2475                 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
2476     cpu_fprintf(f, "avg ops/TB          %0.1f max=%d\n",
2477                 (double)s->op_count / tb_div_count, s->op_count_max);
2478     cpu_fprintf(f, "deleted ops/TB      %0.2f\n",
2479                 (double)s->del_op_count / tb_div_count);
2480     cpu_fprintf(f, "avg temps/TB        %0.2f max=%d\n",
2481                 (double)s->temp_count / tb_div_count, s->temp_count_max);
2482     cpu_fprintf(f, "avg host code/TB    %0.1f\n",
2483                 (double)s->code_out_len / tb_div_count);
2484     cpu_fprintf(f, "avg search data/TB  %0.1f\n",
2485                 (double)s->search_out_len / tb_div_count);
2486 
2487     cpu_fprintf(f, "cycles/op           %0.1f\n",
2488                 s->op_count ? (double)tot / s->op_count : 0);
2489     cpu_fprintf(f, "cycles/in byte      %0.1f\n",
2490                 s->code_in_len ? (double)tot / s->code_in_len : 0);
2491     cpu_fprintf(f, "cycles/out byte     %0.1f\n",
2492                 s->code_out_len ? (double)tot / s->code_out_len : 0);
2493     cpu_fprintf(f, "cycles/search byte     %0.1f\n",
2494                 s->search_out_len ? (double)tot / s->search_out_len : 0);
2495     if (tot == 0) {
2496         tot = 1;
2497     }
2498     cpu_fprintf(f, "  gen_interm time   %0.1f%%\n",
2499                 (double)s->interm_time / tot * 100.0);
2500     cpu_fprintf(f, "  gen_code time     %0.1f%%\n",
2501                 (double)s->code_time / tot * 100.0);
2502     cpu_fprintf(f, "optim./code time    %0.1f%%\n",
2503                 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2504                 * 100.0);
2505     cpu_fprintf(f, "liveness/code time  %0.1f%%\n",
2506                 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2507     cpu_fprintf(f, "cpu_restore count   %" PRId64 "\n",
2508                 s->restore_count);
2509     cpu_fprintf(f, "  avg cycles        %0.1f\n",
2510                 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2511 }
2512 #else
2513 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2514 {
2515     cpu_fprintf(f, "[TCG profiler not compiled]\n");
2516 }
2517 #endif
2518 
2519 #ifdef ELF_HOST_MACHINE
2520 /* In order to use this feature, the backend needs to do three things:
2521 
2522    (1) Define ELF_HOST_MACHINE to indicate both what value to
2523        put into the ELF image and to indicate support for the feature.
2524 
2525    (2) Define tcg_register_jit.  This should create a buffer containing
2526        the contents of a .debug_frame section that describes the post-
2527        prologue unwind info for the tcg machine.
2528 
2529    (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2530 */
2531 
2532 /* Begin GDB interface.  THE FOLLOWING MUST MATCH GDB DOCS.  */
2533 typedef enum {
2534     JIT_NOACTION = 0,
2535     JIT_REGISTER_FN,
2536     JIT_UNREGISTER_FN
2537 } jit_actions_t;
2538 
2539 struct jit_code_entry {
2540     struct jit_code_entry *next_entry;
2541     struct jit_code_entry *prev_entry;
2542     const void *symfile_addr;
2543     uint64_t symfile_size;
2544 };
2545 
2546 struct jit_descriptor {
2547     uint32_t version;
2548     uint32_t action_flag;
2549     struct jit_code_entry *relevant_entry;
2550     struct jit_code_entry *first_entry;
2551 };
2552 
2553 void __jit_debug_register_code(void) __attribute__((noinline));
2554 void __jit_debug_register_code(void)
2555 {
2556     asm("");
2557 }
2558 
2559 /* Must statically initialize the version, because GDB may check
2560    the version before we can set it.  */
2561 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2562 
2563 /* End GDB interface.  */
2564 
2565 static int find_string(const char *strtab, const char *str)
2566 {
2567     const char *p = strtab + 1;
2568 
2569     while (1) {
2570         if (strcmp(p, str) == 0) {
2571             return p - strtab;
2572         }
2573         p += strlen(p) + 1;
2574     }
2575 }
2576 
2577 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2578                                  const void *debug_frame,
2579                                  size_t debug_frame_size)
2580 {
2581     struct __attribute__((packed)) DebugInfo {
2582         uint32_t  len;
2583         uint16_t  version;
2584         uint32_t  abbrev;
2585         uint8_t   ptr_size;
2586         uint8_t   cu_die;
2587         uint16_t  cu_lang;
2588         uintptr_t cu_low_pc;
2589         uintptr_t cu_high_pc;
2590         uint8_t   fn_die;
2591         char      fn_name[16];
2592         uintptr_t fn_low_pc;
2593         uintptr_t fn_high_pc;
2594         uint8_t   cu_eoc;
2595     };
2596 
2597     struct ElfImage {
2598         ElfW(Ehdr) ehdr;
2599         ElfW(Phdr) phdr;
2600         ElfW(Shdr) shdr[7];
2601         ElfW(Sym)  sym[2];
2602         struct DebugInfo di;
2603         uint8_t    da[24];
2604         char       str[80];
2605     };
2606 
2607     struct ElfImage *img;
2608 
2609     static const struct ElfImage img_template = {
2610         .ehdr = {
2611             .e_ident[EI_MAG0] = ELFMAG0,
2612             .e_ident[EI_MAG1] = ELFMAG1,
2613             .e_ident[EI_MAG2] = ELFMAG2,
2614             .e_ident[EI_MAG3] = ELFMAG3,
2615             .e_ident[EI_CLASS] = ELF_CLASS,
2616             .e_ident[EI_DATA] = ELF_DATA,
2617             .e_ident[EI_VERSION] = EV_CURRENT,
2618             .e_type = ET_EXEC,
2619             .e_machine = ELF_HOST_MACHINE,
2620             .e_version = EV_CURRENT,
2621             .e_phoff = offsetof(struct ElfImage, phdr),
2622             .e_shoff = offsetof(struct ElfImage, shdr),
2623             .e_ehsize = sizeof(ElfW(Shdr)),
2624             .e_phentsize = sizeof(ElfW(Phdr)),
2625             .e_phnum = 1,
2626             .e_shentsize = sizeof(ElfW(Shdr)),
2627             .e_shnum = ARRAY_SIZE(img->shdr),
2628             .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2629 #ifdef ELF_HOST_FLAGS
2630             .e_flags = ELF_HOST_FLAGS,
2631 #endif
2632 #ifdef ELF_OSABI
2633             .e_ident[EI_OSABI] = ELF_OSABI,
2634 #endif
2635         },
2636         .phdr = {
2637             .p_type = PT_LOAD,
2638             .p_flags = PF_X,
2639         },
2640         .shdr = {
2641             [0] = { .sh_type = SHT_NULL },
2642             /* Trick: The contents of code_gen_buffer are not present in
2643                this fake ELF file; that got allocated elsewhere.  Therefore
2644                we mark .text as SHT_NOBITS (similar to .bss) so that readers
2645                will not look for contents.  We can record any address.  */
2646             [1] = { /* .text */
2647                 .sh_type = SHT_NOBITS,
2648                 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2649             },
2650             [2] = { /* .debug_info */
2651                 .sh_type = SHT_PROGBITS,
2652                 .sh_offset = offsetof(struct ElfImage, di),
2653                 .sh_size = sizeof(struct DebugInfo),
2654             },
2655             [3] = { /* .debug_abbrev */
2656                 .sh_type = SHT_PROGBITS,
2657                 .sh_offset = offsetof(struct ElfImage, da),
2658                 .sh_size = sizeof(img->da),
2659             },
2660             [4] = { /* .debug_frame */
2661                 .sh_type = SHT_PROGBITS,
2662                 .sh_offset = sizeof(struct ElfImage),
2663             },
2664             [5] = { /* .symtab */
2665                 .sh_type = SHT_SYMTAB,
2666                 .sh_offset = offsetof(struct ElfImage, sym),
2667                 .sh_size = sizeof(img->sym),
2668                 .sh_info = 1,
2669                 .sh_link = ARRAY_SIZE(img->shdr) - 1,
2670                 .sh_entsize = sizeof(ElfW(Sym)),
2671             },
2672             [6] = { /* .strtab */
2673                 .sh_type = SHT_STRTAB,
2674                 .sh_offset = offsetof(struct ElfImage, str),
2675                 .sh_size = sizeof(img->str),
2676             }
2677         },
2678         .sym = {
2679             [1] = { /* code_gen_buffer */
2680                 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2681                 .st_shndx = 1,
2682             }
2683         },
2684         .di = {
2685             .len = sizeof(struct DebugInfo) - 4,
2686             .version = 2,
2687             .ptr_size = sizeof(void *),
2688             .cu_die = 1,
2689             .cu_lang = 0x8001,  /* DW_LANG_Mips_Assembler */
2690             .fn_die = 2,
2691             .fn_name = "code_gen_buffer"
2692         },
2693         .da = {
2694             1,          /* abbrev number (the cu) */
2695             0x11, 1,    /* DW_TAG_compile_unit, has children */
2696             0x13, 0x5,  /* DW_AT_language, DW_FORM_data2 */
2697             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
2698             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
2699             0, 0,       /* end of abbrev */
2700             2,          /* abbrev number (the fn) */
2701             0x2e, 0,    /* DW_TAG_subprogram, no children */
2702             0x3, 0x8,   /* DW_AT_name, DW_FORM_string */
2703             0x11, 0x1,  /* DW_AT_low_pc, DW_FORM_addr */
2704             0x12, 0x1,  /* DW_AT_high_pc, DW_FORM_addr */
2705             0, 0,       /* end of abbrev */
2706             0           /* no more abbrev */
2707         },
2708         .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2709                ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2710     };
2711 
2712     /* We only need a single jit entry; statically allocate it.  */
2713     static struct jit_code_entry one_entry;
2714 
2715     uintptr_t buf = (uintptr_t)buf_ptr;
2716     size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2717     DebugFrameHeader *dfh;
2718 
2719     img = g_malloc(img_size);
2720     *img = img_template;
2721 
2722     img->phdr.p_vaddr = buf;
2723     img->phdr.p_paddr = buf;
2724     img->phdr.p_memsz = buf_size;
2725 
2726     img->shdr[1].sh_name = find_string(img->str, ".text");
2727     img->shdr[1].sh_addr = buf;
2728     img->shdr[1].sh_size = buf_size;
2729 
2730     img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2731     img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2732 
2733     img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2734     img->shdr[4].sh_size = debug_frame_size;
2735 
2736     img->shdr[5].sh_name = find_string(img->str, ".symtab");
2737     img->shdr[6].sh_name = find_string(img->str, ".strtab");
2738 
2739     img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2740     img->sym[1].st_value = buf;
2741     img->sym[1].st_size = buf_size;
2742 
2743     img->di.cu_low_pc = buf;
2744     img->di.cu_high_pc = buf + buf_size;
2745     img->di.fn_low_pc = buf;
2746     img->di.fn_high_pc = buf + buf_size;
2747 
2748     dfh = (DebugFrameHeader *)(img + 1);
2749     memcpy(dfh, debug_frame, debug_frame_size);
2750     dfh->fde.func_start = buf;
2751     dfh->fde.func_len = buf_size;
2752 
2753 #ifdef DEBUG_JIT
2754     /* Enable this block to be able to debug the ELF image file creation.
2755        One can use readelf, objdump, or other inspection utilities.  */
2756     {
2757         FILE *f = fopen("/tmp/qemu.jit", "w+b");
2758         if (f) {
2759             if (fwrite(img, img_size, 1, f) != img_size) {
2760                 /* Avoid stupid unused return value warning for fwrite.  */
2761             }
2762             fclose(f);
2763         }
2764     }
2765 #endif
2766 
2767     one_entry.symfile_addr = img;
2768     one_entry.symfile_size = img_size;
2769 
2770     __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
2771     __jit_debug_descriptor.relevant_entry = &one_entry;
2772     __jit_debug_descriptor.first_entry = &one_entry;
2773     __jit_debug_register_code();
2774 }
2775 #else
2776 /* No support for the feature.  Provide the entry point expected by exec.c,
2777    and implement the internal function we declared earlier.  */
2778 
2779 static void tcg_register_jit_int(void *buf, size_t size,
2780                                  const void *debug_frame,
2781                                  size_t debug_frame_size)
2782 {
2783 }
2784 
2785 void tcg_register_jit(void *buf, size_t buf_size)
2786 {
2787 }
2788 #endif /* ELF_HOST_MACHINE */
2789