xref: /qemu/accel/tcg/translate-all.c (revision 7271a819)
1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifdef _WIN32
20 #include <windows.h>
21 #endif
22 #include "qemu/osdep.h"
23 
24 
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
27 #include "cpu.h"
28 #include "trace.h"
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
31 #include "tcg.h"
32 #if defined(CONFIG_USER_ONLY)
33 #include "qemu.h"
34 #include "exec/exec-all.h"
35 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36 #include <sys/param.h>
37 #if __FreeBSD_version >= 700104
38 #define HAVE_KINFO_GETVMMAP
39 #define sigqueue sigqueue_freebsd  /* avoid redefinition */
40 #include <sys/proc.h>
41 #include <machine/profile.h>
42 #define _KERNEL
43 #include <sys/user.h>
44 #undef _KERNEL
45 #undef sigqueue
46 #include <libutil.h>
47 #endif
48 #endif
49 #else
50 #include "exec/address-spaces.h"
51 #endif
52 
53 #include "exec/cputlb.h"
54 #include "exec/tb-hash.h"
55 #include "translate-all.h"
56 #include "qemu/bitmap.h"
57 #include "qemu/error-report.h"
58 #include "qemu/timer.h"
59 #include "qemu/main-loop.h"
60 #include "exec/log.h"
61 #include "sysemu/cpus.h"
62 
63 /* #define DEBUG_TB_INVALIDATE */
64 /* #define DEBUG_TB_FLUSH */
65 /* make various TB consistency checks */
66 /* #define DEBUG_TB_CHECK */
67 
68 #ifdef DEBUG_TB_INVALIDATE
69 #define DEBUG_TB_INVALIDATE_GATE 1
70 #else
71 #define DEBUG_TB_INVALIDATE_GATE 0
72 #endif
73 
74 #ifdef DEBUG_TB_FLUSH
75 #define DEBUG_TB_FLUSH_GATE 1
76 #else
77 #define DEBUG_TB_FLUSH_GATE 0
78 #endif
79 
80 #if !defined(CONFIG_USER_ONLY)
81 /* TB consistency checks only implemented for usermode emulation.  */
82 #undef DEBUG_TB_CHECK
83 #endif
84 
85 #ifdef DEBUG_TB_CHECK
86 #define DEBUG_TB_CHECK_GATE 1
87 #else
88 #define DEBUG_TB_CHECK_GATE 0
89 #endif
90 
91 /* Access to the various translations structures need to be serialised via locks
92  * for consistency. This is automatic for SoftMMU based system
93  * emulation due to its single threaded nature. In user-mode emulation
94  * access to the memory related structures are protected with the
95  * mmap_lock.
96  */
97 #ifdef CONFIG_SOFTMMU
98 #define assert_memory_lock() tcg_debug_assert(have_tb_lock)
99 #else
100 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
101 #endif
102 
103 #define SMC_BITMAP_USE_THRESHOLD 10
104 
105 typedef struct PageDesc {
106     /* list of TBs intersecting this ram page */
107     TranslationBlock *first_tb;
108 #ifdef CONFIG_SOFTMMU
109     /* in order to optimize self modifying code, we count the number
110        of lookups we do to a given page to use a bitmap */
111     unsigned int code_write_count;
112     unsigned long *code_bitmap;
113 #else
114     unsigned long flags;
115 #endif
116 } PageDesc;
117 
118 /* In system mode we want L1_MAP to be based on ram offsets,
119    while in user mode we want it to be based on virtual addresses.  */
120 #if !defined(CONFIG_USER_ONLY)
121 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
122 # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
123 #else
124 # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
125 #endif
126 #else
127 # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
128 #endif
129 
130 /* Size of the L2 (and L3, etc) page tables.  */
131 #define V_L2_BITS 10
132 #define V_L2_SIZE (1 << V_L2_BITS)
133 
134 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
135 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
136                   sizeof(((TranslationBlock *)0)->trace_vcpu_dstate)
137                   * BITS_PER_BYTE);
138 
139 /*
140  * L1 Mapping properties
141  */
142 static int v_l1_size;
143 static int v_l1_shift;
144 static int v_l2_levels;
145 
146 /* The bottom level has pointers to PageDesc, and is indexed by
147  * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
148  */
149 #define V_L1_MIN_BITS 4
150 #define V_L1_MAX_BITS (V_L2_BITS + 3)
151 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
152 
153 static void *l1_map[V_L1_MAX_SIZE];
154 
155 /* code generation context */
156 TCGContext tcg_ctx;
157 bool parallel_cpus;
158 
159 /* translation block context */
160 static __thread int have_tb_lock;
161 
162 static void page_table_config_init(void)
163 {
164     uint32_t v_l1_bits;
165 
166     assert(TARGET_PAGE_BITS);
167     /* The bits remaining after N lower levels of page tables.  */
168     v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
169     if (v_l1_bits < V_L1_MIN_BITS) {
170         v_l1_bits += V_L2_BITS;
171     }
172 
173     v_l1_size = 1 << v_l1_bits;
174     v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
175     v_l2_levels = v_l1_shift / V_L2_BITS - 1;
176 
177     assert(v_l1_bits <= V_L1_MAX_BITS);
178     assert(v_l1_shift % V_L2_BITS == 0);
179     assert(v_l2_levels >= 0);
180 }
181 
182 #define assert_tb_locked() tcg_debug_assert(have_tb_lock)
183 #define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
184 
185 void tb_lock(void)
186 {
187     assert_tb_unlocked();
188     qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
189     have_tb_lock++;
190 }
191 
192 void tb_unlock(void)
193 {
194     assert_tb_locked();
195     have_tb_lock--;
196     qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
197 }
198 
199 void tb_lock_reset(void)
200 {
201     if (have_tb_lock) {
202         qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
203         have_tb_lock = 0;
204     }
205 }
206 
207 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
208 
209 void cpu_gen_init(void)
210 {
211     tcg_context_init(&tcg_ctx);
212 }
213 
214 /* Encode VAL as a signed leb128 sequence at P.
215    Return P incremented past the encoded value.  */
216 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
217 {
218     int more, byte;
219 
220     do {
221         byte = val & 0x7f;
222         val >>= 7;
223         more = !((val == 0 && (byte & 0x40) == 0)
224                  || (val == -1 && (byte & 0x40) != 0));
225         if (more) {
226             byte |= 0x80;
227         }
228         *p++ = byte;
229     } while (more);
230 
231     return p;
232 }
233 
234 /* Decode a signed leb128 sequence at *PP; increment *PP past the
235    decoded value.  Return the decoded value.  */
236 static target_long decode_sleb128(uint8_t **pp)
237 {
238     uint8_t *p = *pp;
239     target_long val = 0;
240     int byte, shift = 0;
241 
242     do {
243         byte = *p++;
244         val |= (target_ulong)(byte & 0x7f) << shift;
245         shift += 7;
246     } while (byte & 0x80);
247     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
248         val |= -(target_ulong)1 << shift;
249     }
250 
251     *pp = p;
252     return val;
253 }
254 
255 /* Encode the data collected about the instructions while compiling TB.
256    Place the data at BLOCK, and return the number of bytes consumed.
257 
258    The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
259    which come from the target's insn_start data, followed by a uintptr_t
260    which comes from the host pc of the end of the code implementing the insn.
261 
262    Each line of the table is encoded as sleb128 deltas from the previous
263    line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
264    That is, the first column is seeded with the guest pc, the last column
265    with the host pc, and the middle columns with zeros.  */
266 
267 static int encode_search(TranslationBlock *tb, uint8_t *block)
268 {
269     uint8_t *highwater = tcg_ctx.code_gen_highwater;
270     uint8_t *p = block;
271     int i, j, n;
272 
273     tb->tc.search = block;
274 
275     for (i = 0, n = tb->icount; i < n; ++i) {
276         target_ulong prev;
277 
278         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
279             if (i == 0) {
280                 prev = (j == 0 ? tb->pc : 0);
281             } else {
282                 prev = tcg_ctx.gen_insn_data[i - 1][j];
283             }
284             p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
285         }
286         prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
287         p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
288 
289         /* Test for (pending) buffer overflow.  The assumption is that any
290            one row beginning below the high water mark cannot overrun
291            the buffer completely.  Thus we can test for overflow after
292            encoding a row without having to check during encoding.  */
293         if (unlikely(p > highwater)) {
294             return -1;
295         }
296     }
297 
298     return p - block;
299 }
300 
301 /* The cpu state corresponding to 'searched_pc' is restored.
302  * Called with tb_lock held.
303  */
304 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
305                                      uintptr_t searched_pc)
306 {
307     target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
308     uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
309     CPUArchState *env = cpu->env_ptr;
310     uint8_t *p = tb->tc.search;
311     int i, j, num_insns = tb->icount;
312 #ifdef CONFIG_PROFILER
313     int64_t ti = profile_getclock();
314 #endif
315 
316     searched_pc -= GETPC_ADJ;
317 
318     if (searched_pc < host_pc) {
319         return -1;
320     }
321 
322     /* Reconstruct the stored insn data while looking for the point at
323        which the end of the insn exceeds the searched_pc.  */
324     for (i = 0; i < num_insns; ++i) {
325         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
326             data[j] += decode_sleb128(&p);
327         }
328         host_pc += decode_sleb128(&p);
329         if (host_pc > searched_pc) {
330             goto found;
331         }
332     }
333     return -1;
334 
335  found:
336     if (tb->cflags & CF_USE_ICOUNT) {
337         assert(use_icount);
338         /* Reset the cycle counter to the start of the block.  */
339         cpu->icount_decr.u16.low += num_insns;
340         /* Clear the IO flag.  */
341         cpu->can_do_io = 0;
342     }
343     cpu->icount_decr.u16.low -= i;
344     restore_state_to_opc(env, tb, data);
345 
346 #ifdef CONFIG_PROFILER
347     tcg_ctx.restore_time += profile_getclock() - ti;
348     tcg_ctx.restore_count++;
349 #endif
350     return 0;
351 }
352 
353 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
354 {
355     TranslationBlock *tb;
356     bool r = false;
357 
358     /* A retaddr of zero is invalid so we really shouldn't have ended
359      * up here. The target code has likely forgotten to check retaddr
360      * != 0 before attempting to restore state. We return early to
361      * avoid blowing up on a recursive tb_lock(). The target must have
362      * previously survived a failed cpu_restore_state because
363      * tb_find_pc(0) would have failed anyway. It still should be
364      * fixed though.
365      */
366 
367     if (!retaddr) {
368         return r;
369     }
370 
371     tb_lock();
372     tb = tb_find_pc(retaddr);
373     if (tb) {
374         cpu_restore_state_from_tb(cpu, tb, retaddr);
375         if (tb->cflags & CF_NOCACHE) {
376             /* one-shot translation, invalidate it immediately */
377             tb_phys_invalidate(tb, -1);
378             tb_free(tb);
379         }
380         r = true;
381     }
382     tb_unlock();
383 
384     return r;
385 }
386 
387 static void page_init(void)
388 {
389     page_size_init();
390     page_table_config_init();
391 
392 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
393     {
394 #ifdef HAVE_KINFO_GETVMMAP
395         struct kinfo_vmentry *freep;
396         int i, cnt;
397 
398         freep = kinfo_getvmmap(getpid(), &cnt);
399         if (freep) {
400             mmap_lock();
401             for (i = 0; i < cnt; i++) {
402                 unsigned long startaddr, endaddr;
403 
404                 startaddr = freep[i].kve_start;
405                 endaddr = freep[i].kve_end;
406                 if (h2g_valid(startaddr)) {
407                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
408 
409                     if (h2g_valid(endaddr)) {
410                         endaddr = h2g(endaddr);
411                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
412                     } else {
413 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
414                         endaddr = ~0ul;
415                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
416 #endif
417                     }
418                 }
419             }
420             free(freep);
421             mmap_unlock();
422         }
423 #else
424         FILE *f;
425 
426         last_brk = (unsigned long)sbrk(0);
427 
428         f = fopen("/compat/linux/proc/self/maps", "r");
429         if (f) {
430             mmap_lock();
431 
432             do {
433                 unsigned long startaddr, endaddr;
434                 int n;
435 
436                 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
437 
438                 if (n == 2 && h2g_valid(startaddr)) {
439                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
440 
441                     if (h2g_valid(endaddr)) {
442                         endaddr = h2g(endaddr);
443                     } else {
444                         endaddr = ~0ul;
445                     }
446                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
447                 }
448             } while (!feof(f));
449 
450             fclose(f);
451             mmap_unlock();
452         }
453 #endif
454     }
455 #endif
456 }
457 
458 /* If alloc=1:
459  * Called with tb_lock held for system emulation.
460  * Called with mmap_lock held for user-mode emulation.
461  */
462 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
463 {
464     PageDesc *pd;
465     void **lp;
466     int i;
467 
468     if (alloc) {
469         assert_memory_lock();
470     }
471 
472     /* Level 1.  Always allocated.  */
473     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
474 
475     /* Level 2..N-1.  */
476     for (i = v_l2_levels; i > 0; i--) {
477         void **p = atomic_rcu_read(lp);
478 
479         if (p == NULL) {
480             if (!alloc) {
481                 return NULL;
482             }
483             p = g_new0(void *, V_L2_SIZE);
484             atomic_rcu_set(lp, p);
485         }
486 
487         lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
488     }
489 
490     pd = atomic_rcu_read(lp);
491     if (pd == NULL) {
492         if (!alloc) {
493             return NULL;
494         }
495         pd = g_new0(PageDesc, V_L2_SIZE);
496         atomic_rcu_set(lp, pd);
497     }
498 
499     return pd + (index & (V_L2_SIZE - 1));
500 }
501 
502 static inline PageDesc *page_find(tb_page_addr_t index)
503 {
504     return page_find_alloc(index, 0);
505 }
506 
507 #if defined(CONFIG_USER_ONLY)
508 /* Currently it is not recommended to allocate big chunks of data in
509    user mode. It will change when a dedicated libc will be used.  */
510 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
511    region in which the guest needs to run.  Revisit this.  */
512 #define USE_STATIC_CODE_GEN_BUFFER
513 #endif
514 
515 /* Minimum size of the code gen buffer.  This number is randomly chosen,
516    but not so small that we can't have a fair number of TB's live.  */
517 #define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
518 
519 /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
520    indicated, this is constrained by the range of direct branches on the
521    host cpu, as used by the TCG implementation of goto_tb.  */
522 #if defined(__x86_64__)
523 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
524 #elif defined(__sparc__)
525 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
526 #elif defined(__powerpc64__)
527 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
528 #elif defined(__powerpc__)
529 # define MAX_CODE_GEN_BUFFER_SIZE  (32u * 1024 * 1024)
530 #elif defined(__aarch64__)
531 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
532 #elif defined(__s390x__)
533   /* We have a +- 4GB range on the branches; leave some slop.  */
534 # define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
535 #elif defined(__mips__)
536   /* We have a 256MB branch region, but leave room to make sure the
537      main executable is also within that region.  */
538 # define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
539 #else
540 # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
541 #endif
542 
543 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
544 
545 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
546   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
547    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
548 
549 static inline size_t size_code_gen_buffer(size_t tb_size)
550 {
551     /* Size the buffer.  */
552     if (tb_size == 0) {
553 #ifdef USE_STATIC_CODE_GEN_BUFFER
554         tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
555 #else
556         /* ??? Needs adjustments.  */
557         /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
558            static buffer, we could size this on RESERVED_VA, on the text
559            segment size of the executable, or continue to use the default.  */
560         tb_size = (unsigned long)(ram_size / 4);
561 #endif
562     }
563     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
564         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
565     }
566     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
567         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
568     }
569     return tb_size;
570 }
571 
572 #ifdef __mips__
573 /* In order to use J and JAL within the code_gen_buffer, we require
574    that the buffer not cross a 256MB boundary.  */
575 static inline bool cross_256mb(void *addr, size_t size)
576 {
577     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
578 }
579 
580 /* We weren't able to allocate a buffer without crossing that boundary,
581    so make do with the larger portion of the buffer that doesn't cross.
582    Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
583 static inline void *split_cross_256mb(void *buf1, size_t size1)
584 {
585     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
586     size_t size2 = buf1 + size1 - buf2;
587 
588     size1 = buf2 - buf1;
589     if (size1 < size2) {
590         size1 = size2;
591         buf1 = buf2;
592     }
593 
594     tcg_ctx.code_gen_buffer_size = size1;
595     return buf1;
596 }
597 #endif
598 
599 #ifdef USE_STATIC_CODE_GEN_BUFFER
600 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
601     __attribute__((aligned(CODE_GEN_ALIGN)));
602 
603 # ifdef _WIN32
604 static inline void do_protect(void *addr, long size, int prot)
605 {
606     DWORD old_protect;
607     VirtualProtect(addr, size, prot, &old_protect);
608 }
609 
610 static inline void map_exec(void *addr, long size)
611 {
612     do_protect(addr, size, PAGE_EXECUTE_READWRITE);
613 }
614 
615 static inline void map_none(void *addr, long size)
616 {
617     do_protect(addr, size, PAGE_NOACCESS);
618 }
619 # else
620 static inline void do_protect(void *addr, long size, int prot)
621 {
622     uintptr_t start, end;
623 
624     start = (uintptr_t)addr;
625     start &= qemu_real_host_page_mask;
626 
627     end = (uintptr_t)addr + size;
628     end = ROUND_UP(end, qemu_real_host_page_size);
629 
630     mprotect((void *)start, end - start, prot);
631 }
632 
633 static inline void map_exec(void *addr, long size)
634 {
635     do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
636 }
637 
638 static inline void map_none(void *addr, long size)
639 {
640     do_protect(addr, size, PROT_NONE);
641 }
642 # endif /* WIN32 */
643 
644 static inline void *alloc_code_gen_buffer(void)
645 {
646     void *buf = static_code_gen_buffer;
647     size_t full_size, size;
648 
649     /* The size of the buffer, rounded down to end on a page boundary.  */
650     full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
651                  & qemu_real_host_page_mask) - (uintptr_t)buf;
652 
653     /* Reserve a guard page.  */
654     size = full_size - qemu_real_host_page_size;
655 
656     /* Honor a command-line option limiting the size of the buffer.  */
657     if (size > tcg_ctx.code_gen_buffer_size) {
658         size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
659                 & qemu_real_host_page_mask) - (uintptr_t)buf;
660     }
661     tcg_ctx.code_gen_buffer_size = size;
662 
663 #ifdef __mips__
664     if (cross_256mb(buf, size)) {
665         buf = split_cross_256mb(buf, size);
666         size = tcg_ctx.code_gen_buffer_size;
667     }
668 #endif
669 
670     map_exec(buf, size);
671     map_none(buf + size, qemu_real_host_page_size);
672     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
673 
674     return buf;
675 }
676 #elif defined(_WIN32)
677 static inline void *alloc_code_gen_buffer(void)
678 {
679     size_t size = tcg_ctx.code_gen_buffer_size;
680     void *buf1, *buf2;
681 
682     /* Perform the allocation in two steps, so that the guard page
683        is reserved but uncommitted.  */
684     buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
685                         MEM_RESERVE, PAGE_NOACCESS);
686     if (buf1 != NULL) {
687         buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
688         assert(buf1 == buf2);
689     }
690 
691     return buf1;
692 }
693 #else
694 static inline void *alloc_code_gen_buffer(void)
695 {
696     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
697     uintptr_t start = 0;
698     size_t size = tcg_ctx.code_gen_buffer_size;
699     void *buf;
700 
701     /* Constrain the position of the buffer based on the host cpu.
702        Note that these addresses are chosen in concert with the
703        addresses assigned in the relevant linker script file.  */
704 # if defined(__PIE__) || defined(__PIC__)
705     /* Don't bother setting a preferred location if we're building
706        a position-independent executable.  We're more likely to get
707        an address near the main executable if we let the kernel
708        choose the address.  */
709 # elif defined(__x86_64__) && defined(MAP_32BIT)
710     /* Force the memory down into low memory with the executable.
711        Leave the choice of exact location with the kernel.  */
712     flags |= MAP_32BIT;
713     /* Cannot expect to map more than 800MB in low memory.  */
714     if (size > 800u * 1024 * 1024) {
715         tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
716     }
717 # elif defined(__sparc__)
718     start = 0x40000000ul;
719 # elif defined(__s390x__)
720     start = 0x90000000ul;
721 # elif defined(__mips__)
722 #  if _MIPS_SIM == _ABI64
723     start = 0x128000000ul;
724 #  else
725     start = 0x08000000ul;
726 #  endif
727 # endif
728 
729     buf = mmap((void *)start, size + qemu_real_host_page_size,
730                PROT_NONE, flags, -1, 0);
731     if (buf == MAP_FAILED) {
732         return NULL;
733     }
734 
735 #ifdef __mips__
736     if (cross_256mb(buf, size)) {
737         /* Try again, with the original still mapped, to avoid re-acquiring
738            that 256mb crossing.  This time don't specify an address.  */
739         size_t size2;
740         void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
741                           PROT_NONE, flags, -1, 0);
742         switch ((int)(buf2 != MAP_FAILED)) {
743         case 1:
744             if (!cross_256mb(buf2, size)) {
745                 /* Success!  Use the new buffer.  */
746                 munmap(buf, size + qemu_real_host_page_size);
747                 break;
748             }
749             /* Failure.  Work with what we had.  */
750             munmap(buf2, size + qemu_real_host_page_size);
751             /* fallthru */
752         default:
753             /* Split the original buffer.  Free the smaller half.  */
754             buf2 = split_cross_256mb(buf, size);
755             size2 = tcg_ctx.code_gen_buffer_size;
756             if (buf == buf2) {
757                 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
758             } else {
759                 munmap(buf, size - size2);
760             }
761             size = size2;
762             break;
763         }
764         buf = buf2;
765     }
766 #endif
767 
768     /* Make the final buffer accessible.  The guard page at the end
769        will remain inaccessible with PROT_NONE.  */
770     mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
771 
772     /* Request large pages for the buffer.  */
773     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
774 
775     return buf;
776 }
777 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
778 
779 static inline void code_gen_alloc(size_t tb_size)
780 {
781     tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
782     tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
783     if (tcg_ctx.code_gen_buffer == NULL) {
784         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
785         exit(1);
786     }
787 
788     /* size this conservatively -- realloc later if needed */
789     tcg_ctx.tb_ctx.tbs_size =
790         tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE / 8;
791     if (unlikely(!tcg_ctx.tb_ctx.tbs_size)) {
792         tcg_ctx.tb_ctx.tbs_size = 64 * 1024;
793     }
794     tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock *, tcg_ctx.tb_ctx.tbs_size);
795 
796     qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
797 }
798 
799 static void tb_htable_init(void)
800 {
801     unsigned int mode = QHT_MODE_AUTO_RESIZE;
802 
803     qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
804 }
805 
806 /* Must be called before using the QEMU cpus. 'tb_size' is the size
807    (in bytes) allocated to the translation buffer. Zero means default
808    size. */
809 void tcg_exec_init(unsigned long tb_size)
810 {
811     tcg_allowed = true;
812     cpu_gen_init();
813     page_init();
814     tb_htable_init();
815     code_gen_alloc(tb_size);
816 #if defined(CONFIG_SOFTMMU)
817     /* There's no guest base to take into account, so go ahead and
818        initialize the prologue now.  */
819     tcg_prologue_init(&tcg_ctx);
820 #endif
821 }
822 
823 /*
824  * Allocate a new translation block. Flush the translation buffer if
825  * too many translation blocks or too much generated code.
826  *
827  * Called with tb_lock held.
828  */
829 static TranslationBlock *tb_alloc(target_ulong pc)
830 {
831     TranslationBlock *tb;
832     TBContext *ctx;
833 
834     assert_tb_locked();
835 
836     tb = tcg_tb_alloc(&tcg_ctx);
837     if (unlikely(tb == NULL)) {
838         return NULL;
839     }
840     ctx = &tcg_ctx.tb_ctx;
841     if (unlikely(ctx->nb_tbs == ctx->tbs_size)) {
842         ctx->tbs_size *= 2;
843         ctx->tbs = g_renew(TranslationBlock *, ctx->tbs, ctx->tbs_size);
844     }
845     ctx->tbs[ctx->nb_tbs++] = tb;
846     return tb;
847 }
848 
849 /* Called with tb_lock held.  */
850 void tb_free(TranslationBlock *tb)
851 {
852     assert_tb_locked();
853 
854     /* In practice this is mostly used for single use temporary TB
855        Ignore the hard cases and just back up if this TB happens to
856        be the last one generated.  */
857     if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
858             tb == tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
859         size_t struct_size = ROUND_UP(sizeof(*tb), qemu_icache_linesize);
860 
861         tcg_ctx.code_gen_ptr = tb->tc.ptr - struct_size;
862         tcg_ctx.tb_ctx.nb_tbs--;
863     }
864 }
865 
866 static inline void invalidate_page_bitmap(PageDesc *p)
867 {
868 #ifdef CONFIG_SOFTMMU
869     g_free(p->code_bitmap);
870     p->code_bitmap = NULL;
871     p->code_write_count = 0;
872 #endif
873 }
874 
875 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
876 static void page_flush_tb_1(int level, void **lp)
877 {
878     int i;
879 
880     if (*lp == NULL) {
881         return;
882     }
883     if (level == 0) {
884         PageDesc *pd = *lp;
885 
886         for (i = 0; i < V_L2_SIZE; ++i) {
887             pd[i].first_tb = NULL;
888             invalidate_page_bitmap(pd + i);
889         }
890     } else {
891         void **pp = *lp;
892 
893         for (i = 0; i < V_L2_SIZE; ++i) {
894             page_flush_tb_1(level - 1, pp + i);
895         }
896     }
897 }
898 
899 static void page_flush_tb(void)
900 {
901     int i, l1_sz = v_l1_size;
902 
903     for (i = 0; i < l1_sz; i++) {
904         page_flush_tb_1(v_l2_levels, l1_map + i);
905     }
906 }
907 
908 /* flush all the translation blocks */
909 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
910 {
911     tb_lock();
912 
913     /* If it is already been done on request of another CPU,
914      * just retry.
915      */
916     if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
917         goto done;
918     }
919 
920     if (DEBUG_TB_FLUSH_GATE) {
921         printf("qemu: flush code_size=%td nb_tbs=%d avg_tb_size=%td\n",
922                tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
923                tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
924                (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) /
925                tcg_ctx.tb_ctx.nb_tbs : 0);
926     }
927     if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
928         > tcg_ctx.code_gen_buffer_size) {
929         cpu_abort(cpu, "Internal error: code buffer overflow\n");
930     }
931 
932     CPU_FOREACH(cpu) {
933         cpu_tb_jmp_cache_clear(cpu);
934     }
935 
936     tcg_ctx.tb_ctx.nb_tbs = 0;
937     qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
938     page_flush_tb();
939 
940     tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
941     /* XXX: flush processor icache at this point if cache flush is
942        expensive */
943     atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
944                   tcg_ctx.tb_ctx.tb_flush_count + 1);
945 
946 done:
947     tb_unlock();
948 }
949 
950 void tb_flush(CPUState *cpu)
951 {
952     if (tcg_enabled()) {
953         unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
954         async_safe_run_on_cpu(cpu, do_tb_flush,
955                               RUN_ON_CPU_HOST_INT(tb_flush_count));
956     }
957 }
958 
959 /*
960  * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
961  * so in order to prevent bit rot we compile them unconditionally in user-mode,
962  * and let the optimizer get rid of them by wrapping their user-only callers
963  * with if (DEBUG_TB_CHECK_GATE).
964  */
965 #ifdef CONFIG_USER_ONLY
966 
967 static void
968 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
969 {
970     TranslationBlock *tb = p;
971     target_ulong addr = *(target_ulong *)userp;
972 
973     if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
974         printf("ERROR invalidate: address=" TARGET_FMT_lx
975                " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
976     }
977 }
978 
979 /* verify that all the pages have correct rights for code
980  *
981  * Called with tb_lock held.
982  */
983 static void tb_invalidate_check(target_ulong address)
984 {
985     address &= TARGET_PAGE_MASK;
986     qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
987 }
988 
989 static void
990 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
991 {
992     TranslationBlock *tb = p;
993     int flags1, flags2;
994 
995     flags1 = page_get_flags(tb->pc);
996     flags2 = page_get_flags(tb->pc + tb->size - 1);
997     if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
998         printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
999                (long)tb->pc, tb->size, flags1, flags2);
1000     }
1001 }
1002 
1003 /* verify that all the pages have correct rights for code */
1004 static void tb_page_check(void)
1005 {
1006     qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
1007 }
1008 
1009 #endif /* CONFIG_USER_ONLY */
1010 
1011 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
1012 {
1013     TranslationBlock *tb1;
1014     unsigned int n1;
1015 
1016     for (;;) {
1017         tb1 = *ptb;
1018         n1 = (uintptr_t)tb1 & 3;
1019         tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1020         if (tb1 == tb) {
1021             *ptb = tb1->page_next[n1];
1022             break;
1023         }
1024         ptb = &tb1->page_next[n1];
1025     }
1026 }
1027 
1028 /* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1029 static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
1030 {
1031     TranslationBlock *tb1;
1032     uintptr_t *ptb, ntb;
1033     unsigned int n1;
1034 
1035     ptb = &tb->jmp_list_next[n];
1036     if (*ptb) {
1037         /* find tb(n) in circular list */
1038         for (;;) {
1039             ntb = *ptb;
1040             n1 = ntb & 3;
1041             tb1 = (TranslationBlock *)(ntb & ~3);
1042             if (n1 == n && tb1 == tb) {
1043                 break;
1044             }
1045             if (n1 == 2) {
1046                 ptb = &tb1->jmp_list_first;
1047             } else {
1048                 ptb = &tb1->jmp_list_next[n1];
1049             }
1050         }
1051         /* now we can suppress tb(n) from the list */
1052         *ptb = tb->jmp_list_next[n];
1053 
1054         tb->jmp_list_next[n] = (uintptr_t)NULL;
1055     }
1056 }
1057 
1058 /* reset the jump entry 'n' of a TB so that it is not chained to
1059    another TB */
1060 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1061 {
1062     uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1063     tb_set_jmp_target(tb, n, addr);
1064 }
1065 
1066 /* remove any jumps to the TB */
1067 static inline void tb_jmp_unlink(TranslationBlock *tb)
1068 {
1069     TranslationBlock *tb1;
1070     uintptr_t *ptb, ntb;
1071     unsigned int n1;
1072 
1073     ptb = &tb->jmp_list_first;
1074     for (;;) {
1075         ntb = *ptb;
1076         n1 = ntb & 3;
1077         tb1 = (TranslationBlock *)(ntb & ~3);
1078         if (n1 == 2) {
1079             break;
1080         }
1081         tb_reset_jump(tb1, n1);
1082         *ptb = tb1->jmp_list_next[n1];
1083         tb1->jmp_list_next[n1] = (uintptr_t)NULL;
1084     }
1085 }
1086 
1087 /* invalidate one TB
1088  *
1089  * Called with tb_lock held.
1090  */
1091 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1092 {
1093     CPUState *cpu;
1094     PageDesc *p;
1095     uint32_t h;
1096     tb_page_addr_t phys_pc;
1097 
1098     assert_tb_locked();
1099 
1100     atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1101 
1102     /* remove the TB from the hash list */
1103     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1104     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
1105     qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
1106 
1107     /* remove the TB from the page list */
1108     if (tb->page_addr[0] != page_addr) {
1109         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1110         tb_page_remove(&p->first_tb, tb);
1111         invalidate_page_bitmap(p);
1112     }
1113     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1114         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1115         tb_page_remove(&p->first_tb, tb);
1116         invalidate_page_bitmap(p);
1117     }
1118 
1119     /* remove the TB from the hash list */
1120     h = tb_jmp_cache_hash_func(tb->pc);
1121     CPU_FOREACH(cpu) {
1122         if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1123             atomic_set(&cpu->tb_jmp_cache[h], NULL);
1124         }
1125     }
1126 
1127     /* suppress this TB from the two jump lists */
1128     tb_remove_from_jmp_list(tb, 0);
1129     tb_remove_from_jmp_list(tb, 1);
1130 
1131     /* suppress any remaining jumps to this TB */
1132     tb_jmp_unlink(tb);
1133 
1134     tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
1135 }
1136 
1137 #ifdef CONFIG_SOFTMMU
1138 static void build_page_bitmap(PageDesc *p)
1139 {
1140     int n, tb_start, tb_end;
1141     TranslationBlock *tb;
1142 
1143     p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1144 
1145     tb = p->first_tb;
1146     while (tb != NULL) {
1147         n = (uintptr_t)tb & 3;
1148         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1149         /* NOTE: this is subtle as a TB may span two physical pages */
1150         if (n == 0) {
1151             /* NOTE: tb_end may be after the end of the page, but
1152                it is not a problem */
1153             tb_start = tb->pc & ~TARGET_PAGE_MASK;
1154             tb_end = tb_start + tb->size;
1155             if (tb_end > TARGET_PAGE_SIZE) {
1156                 tb_end = TARGET_PAGE_SIZE;
1157              }
1158         } else {
1159             tb_start = 0;
1160             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1161         }
1162         bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1163         tb = tb->page_next[n];
1164     }
1165 }
1166 #endif
1167 
1168 /* add the tb in the target page and protect it if necessary
1169  *
1170  * Called with mmap_lock held for user-mode emulation.
1171  */
1172 static inline void tb_alloc_page(TranslationBlock *tb,
1173                                  unsigned int n, tb_page_addr_t page_addr)
1174 {
1175     PageDesc *p;
1176 #ifndef CONFIG_USER_ONLY
1177     bool page_already_protected;
1178 #endif
1179 
1180     assert_memory_lock();
1181 
1182     tb->page_addr[n] = page_addr;
1183     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1184     tb->page_next[n] = p->first_tb;
1185 #ifndef CONFIG_USER_ONLY
1186     page_already_protected = p->first_tb != NULL;
1187 #endif
1188     p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1189     invalidate_page_bitmap(p);
1190 
1191 #if defined(CONFIG_USER_ONLY)
1192     if (p->flags & PAGE_WRITE) {
1193         target_ulong addr;
1194         PageDesc *p2;
1195         int prot;
1196 
1197         /* force the host page as non writable (writes will have a
1198            page fault + mprotect overhead) */
1199         page_addr &= qemu_host_page_mask;
1200         prot = 0;
1201         for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1202             addr += TARGET_PAGE_SIZE) {
1203 
1204             p2 = page_find(addr >> TARGET_PAGE_BITS);
1205             if (!p2) {
1206                 continue;
1207             }
1208             prot |= p2->flags;
1209             p2->flags &= ~PAGE_WRITE;
1210           }
1211         mprotect(g2h(page_addr), qemu_host_page_size,
1212                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1213         if (DEBUG_TB_INVALIDATE_GATE) {
1214             printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1215         }
1216     }
1217 #else
1218     /* if some code is already present, then the pages are already
1219        protected. So we handle the case where only the first TB is
1220        allocated in a physical page */
1221     if (!page_already_protected) {
1222         tlb_protect_code(page_addr);
1223     }
1224 #endif
1225 }
1226 
1227 /* add a new TB and link it to the physical page tables. phys_page2 is
1228  * (-1) to indicate that only one page contains the TB.
1229  *
1230  * Called with mmap_lock held for user-mode emulation.
1231  */
1232 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1233                          tb_page_addr_t phys_page2)
1234 {
1235     uint32_t h;
1236 
1237     assert_memory_lock();
1238 
1239     /* add in the page list */
1240     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1241     if (phys_page2 != -1) {
1242         tb_alloc_page(tb, 1, phys_page2);
1243     } else {
1244         tb->page_addr[1] = -1;
1245     }
1246 
1247     /* add in the hash table */
1248     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
1249     qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1250 
1251 #ifdef CONFIG_USER_ONLY
1252     if (DEBUG_TB_CHECK_GATE) {
1253         tb_page_check();
1254     }
1255 #endif
1256 }
1257 
1258 /* Called with mmap_lock held for user mode emulation.  */
1259 TranslationBlock *tb_gen_code(CPUState *cpu,
1260                               target_ulong pc, target_ulong cs_base,
1261                               uint32_t flags, int cflags)
1262 {
1263     CPUArchState *env = cpu->env_ptr;
1264     TranslationBlock *tb;
1265     tb_page_addr_t phys_pc, phys_page2;
1266     target_ulong virt_page2;
1267     tcg_insn_unit *gen_code_buf;
1268     int gen_code_size, search_size;
1269 #ifdef CONFIG_PROFILER
1270     int64_t ti;
1271 #endif
1272     assert_memory_lock();
1273 
1274     phys_pc = get_page_addr_code(env, pc);
1275     if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
1276         cflags |= CF_USE_ICOUNT;
1277     }
1278 
1279     tb = tb_alloc(pc);
1280     if (unlikely(!tb)) {
1281  buffer_overflow:
1282         /* flush must be done */
1283         tb_flush(cpu);
1284         mmap_unlock();
1285         /* Make the execution loop process the flush as soon as possible.  */
1286         cpu->exception_index = EXCP_INTERRUPT;
1287         cpu_loop_exit(cpu);
1288     }
1289 
1290     gen_code_buf = tcg_ctx.code_gen_ptr;
1291     tb->tc.ptr = gen_code_buf;
1292     tb->pc = pc;
1293     tb->cs_base = cs_base;
1294     tb->flags = flags;
1295     tb->cflags = cflags;
1296     tb->trace_vcpu_dstate = *cpu->trace_dstate;
1297 
1298 #ifdef CONFIG_PROFILER
1299     tcg_ctx.tb_count1++; /* includes aborted translations because of
1300                        exceptions */
1301     ti = profile_getclock();
1302 #endif
1303 
1304     tcg_func_start(&tcg_ctx);
1305 
1306     tcg_ctx.cpu = ENV_GET_CPU(env);
1307     gen_intermediate_code(cpu, tb);
1308     tcg_ctx.cpu = NULL;
1309 
1310     trace_translate_block(tb, tb->pc, tb->tc.ptr);
1311 
1312     /* generate machine code */
1313     tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1314     tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1315     tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
1316     if (TCG_TARGET_HAS_direct_jump) {
1317         tcg_ctx.tb_jmp_insn_offset = tb->jmp_target_arg;
1318         tcg_ctx.tb_jmp_target_addr = NULL;
1319     } else {
1320         tcg_ctx.tb_jmp_insn_offset = NULL;
1321         tcg_ctx.tb_jmp_target_addr = tb->jmp_target_arg;
1322     }
1323 
1324 #ifdef CONFIG_PROFILER
1325     tcg_ctx.tb_count++;
1326     tcg_ctx.interm_time += profile_getclock() - ti;
1327     ti = profile_getclock();
1328 #endif
1329 
1330     /* ??? Overflow could be handled better here.  In particular, we
1331        don't need to re-do gen_intermediate_code, nor should we re-do
1332        the tcg optimization currently hidden inside tcg_gen_code.  All
1333        that should be required is to flush the TBs, allocate a new TB,
1334        re-initialize it per above, and re-do the actual code generation.  */
1335     gen_code_size = tcg_gen_code(&tcg_ctx, tb);
1336     if (unlikely(gen_code_size < 0)) {
1337         goto buffer_overflow;
1338     }
1339     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1340     if (unlikely(search_size < 0)) {
1341         goto buffer_overflow;
1342     }
1343 
1344 #ifdef CONFIG_PROFILER
1345     tcg_ctx.code_time += profile_getclock() - ti;
1346     tcg_ctx.code_in_len += tb->size;
1347     tcg_ctx.code_out_len += gen_code_size;
1348     tcg_ctx.search_out_len += search_size;
1349 #endif
1350 
1351 #ifdef DEBUG_DISAS
1352     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1353         qemu_log_in_addr_range(tb->pc)) {
1354         qemu_log_lock();
1355         qemu_log("OUT: [size=%d]\n", gen_code_size);
1356         if (tcg_ctx.data_gen_ptr) {
1357             size_t code_size = tcg_ctx.data_gen_ptr - tb->tc.ptr;
1358             size_t data_size = gen_code_size - code_size;
1359             size_t i;
1360 
1361             log_disas(tb->tc.ptr, code_size);
1362 
1363             for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1364                 if (sizeof(tcg_target_ulong) == 8) {
1365                     qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1366                              (uintptr_t)tcg_ctx.data_gen_ptr + i,
1367                              *(uint64_t *)(tcg_ctx.data_gen_ptr + i));
1368                 } else {
1369                     qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
1370                              (uintptr_t)tcg_ctx.data_gen_ptr + i,
1371                              *(uint32_t *)(tcg_ctx.data_gen_ptr + i));
1372                 }
1373             }
1374         } else {
1375             log_disas(tb->tc.ptr, gen_code_size);
1376         }
1377         qemu_log("\n");
1378         qemu_log_flush();
1379         qemu_log_unlock();
1380     }
1381 #endif
1382 
1383     tcg_ctx.code_gen_ptr = (void *)
1384         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1385                  CODE_GEN_ALIGN);
1386 
1387     /* init jump list */
1388     assert(((uintptr_t)tb & 3) == 0);
1389     tb->jmp_list_first = (uintptr_t)tb | 2;
1390     tb->jmp_list_next[0] = (uintptr_t)NULL;
1391     tb->jmp_list_next[1] = (uintptr_t)NULL;
1392 
1393     /* init original jump addresses wich has been set during tcg_gen_code() */
1394     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1395         tb_reset_jump(tb, 0);
1396     }
1397     if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1398         tb_reset_jump(tb, 1);
1399     }
1400 
1401     /* check next page if needed */
1402     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1403     phys_page2 = -1;
1404     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1405         phys_page2 = get_page_addr_code(env, virt_page2);
1406     }
1407     /* As long as consistency of the TB stuff is provided by tb_lock in user
1408      * mode and is implicit in single-threaded softmmu emulation, no explicit
1409      * memory barrier is required before tb_link_page() makes the TB visible
1410      * through the physical hash table and physical page list.
1411      */
1412     tb_link_page(tb, phys_pc, phys_page2);
1413     return tb;
1414 }
1415 
1416 /*
1417  * Invalidate all TBs which intersect with the target physical address range
1418  * [start;end[. NOTE: start and end may refer to *different* physical pages.
1419  * 'is_cpu_write_access' should be true if called from a real cpu write
1420  * access: the virtual CPU will exit the current TB if code is modified inside
1421  * this TB.
1422  *
1423  * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1424  * Called with tb_lock held for system-mode emulation
1425  */
1426 static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
1427 {
1428     while (start < end) {
1429         tb_invalidate_phys_page_range(start, end, 0);
1430         start &= TARGET_PAGE_MASK;
1431         start += TARGET_PAGE_SIZE;
1432     }
1433 }
1434 
1435 #ifdef CONFIG_SOFTMMU
1436 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1437 {
1438     assert_tb_locked();
1439     tb_invalidate_phys_range_1(start, end);
1440 }
1441 #else
1442 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1443 {
1444     assert_memory_lock();
1445     tb_lock();
1446     tb_invalidate_phys_range_1(start, end);
1447     tb_unlock();
1448 }
1449 #endif
1450 /*
1451  * Invalidate all TBs which intersect with the target physical address range
1452  * [start;end[. NOTE: start and end must refer to the *same* physical page.
1453  * 'is_cpu_write_access' should be true if called from a real cpu write
1454  * access: the virtual CPU will exit the current TB if code is modified inside
1455  * this TB.
1456  *
1457  * Called with tb_lock/mmap_lock held for user-mode emulation
1458  * Called with tb_lock held for system-mode emulation
1459  */
1460 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1461                                    int is_cpu_write_access)
1462 {
1463     TranslationBlock *tb, *tb_next;
1464 #if defined(TARGET_HAS_PRECISE_SMC)
1465     CPUState *cpu = current_cpu;
1466     CPUArchState *env = NULL;
1467 #endif
1468     tb_page_addr_t tb_start, tb_end;
1469     PageDesc *p;
1470     int n;
1471 #ifdef TARGET_HAS_PRECISE_SMC
1472     int current_tb_not_found = is_cpu_write_access;
1473     TranslationBlock *current_tb = NULL;
1474     int current_tb_modified = 0;
1475     target_ulong current_pc = 0;
1476     target_ulong current_cs_base = 0;
1477     uint32_t current_flags = 0;
1478 #endif /* TARGET_HAS_PRECISE_SMC */
1479 
1480     assert_memory_lock();
1481     assert_tb_locked();
1482 
1483     p = page_find(start >> TARGET_PAGE_BITS);
1484     if (!p) {
1485         return;
1486     }
1487 #if defined(TARGET_HAS_PRECISE_SMC)
1488     if (cpu != NULL) {
1489         env = cpu->env_ptr;
1490     }
1491 #endif
1492 
1493     /* we remove all the TBs in the range [start, end[ */
1494     /* XXX: see if in some cases it could be faster to invalidate all
1495        the code */
1496     tb = p->first_tb;
1497     while (tb != NULL) {
1498         n = (uintptr_t)tb & 3;
1499         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1500         tb_next = tb->page_next[n];
1501         /* NOTE: this is subtle as a TB may span two physical pages */
1502         if (n == 0) {
1503             /* NOTE: tb_end may be after the end of the page, but
1504                it is not a problem */
1505             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1506             tb_end = tb_start + tb->size;
1507         } else {
1508             tb_start = tb->page_addr[1];
1509             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1510         }
1511         if (!(tb_end <= start || tb_start >= end)) {
1512 #ifdef TARGET_HAS_PRECISE_SMC
1513             if (current_tb_not_found) {
1514                 current_tb_not_found = 0;
1515                 current_tb = NULL;
1516                 if (cpu->mem_io_pc) {
1517                     /* now we have a real cpu fault */
1518                     current_tb = tb_find_pc(cpu->mem_io_pc);
1519                 }
1520             }
1521             if (current_tb == tb &&
1522                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1523                 /* If we are modifying the current TB, we must stop
1524                 its execution. We could be more precise by checking
1525                 that the modification is after the current PC, but it
1526                 would require a specialized function to partially
1527                 restore the CPU state */
1528 
1529                 current_tb_modified = 1;
1530                 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1531                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1532                                      &current_flags);
1533             }
1534 #endif /* TARGET_HAS_PRECISE_SMC */
1535             tb_phys_invalidate(tb, -1);
1536         }
1537         tb = tb_next;
1538     }
1539 #if !defined(CONFIG_USER_ONLY)
1540     /* if no code remaining, no need to continue to use slow writes */
1541     if (!p->first_tb) {
1542         invalidate_page_bitmap(p);
1543         tlb_unprotect_code(start);
1544     }
1545 #endif
1546 #ifdef TARGET_HAS_PRECISE_SMC
1547     if (current_tb_modified) {
1548         /* we generate a block containing just the instruction
1549            modifying the memory. It will ensure that it cannot modify
1550            itself */
1551         tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1552         cpu_loop_exit_noexc(cpu);
1553     }
1554 #endif
1555 }
1556 
1557 #ifdef CONFIG_SOFTMMU
1558 /* len must be <= 8 and start must be a multiple of len.
1559  * Called via softmmu_template.h when code areas are written to with
1560  * iothread mutex not held.
1561  */
1562 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1563 {
1564     PageDesc *p;
1565 
1566 #if 0
1567     if (1) {
1568         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1569                   cpu_single_env->mem_io_vaddr, len,
1570                   cpu_single_env->eip,
1571                   cpu_single_env->eip +
1572                   (intptr_t)cpu_single_env->segs[R_CS].base);
1573     }
1574 #endif
1575     assert_memory_lock();
1576 
1577     p = page_find(start >> TARGET_PAGE_BITS);
1578     if (!p) {
1579         return;
1580     }
1581     if (!p->code_bitmap &&
1582         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1583         /* build code bitmap.  FIXME: writes should be protected by
1584          * tb_lock, reads by tb_lock or RCU.
1585          */
1586         build_page_bitmap(p);
1587     }
1588     if (p->code_bitmap) {
1589         unsigned int nr;
1590         unsigned long b;
1591 
1592         nr = start & ~TARGET_PAGE_MASK;
1593         b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1594         if (b & ((1 << len) - 1)) {
1595             goto do_invalidate;
1596         }
1597     } else {
1598     do_invalidate:
1599         tb_invalidate_phys_page_range(start, start + len, 1);
1600     }
1601 }
1602 #else
1603 /* Called with mmap_lock held. If pc is not 0 then it indicates the
1604  * host PC of the faulting store instruction that caused this invalidate.
1605  * Returns true if the caller needs to abort execution of the current
1606  * TB (because it was modified by this store and the guest CPU has
1607  * precise-SMC semantics).
1608  */
1609 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1610 {
1611     TranslationBlock *tb;
1612     PageDesc *p;
1613     int n;
1614 #ifdef TARGET_HAS_PRECISE_SMC
1615     TranslationBlock *current_tb = NULL;
1616     CPUState *cpu = current_cpu;
1617     CPUArchState *env = NULL;
1618     int current_tb_modified = 0;
1619     target_ulong current_pc = 0;
1620     target_ulong current_cs_base = 0;
1621     uint32_t current_flags = 0;
1622 #endif
1623 
1624     assert_memory_lock();
1625 
1626     addr &= TARGET_PAGE_MASK;
1627     p = page_find(addr >> TARGET_PAGE_BITS);
1628     if (!p) {
1629         return false;
1630     }
1631 
1632     tb_lock();
1633     tb = p->first_tb;
1634 #ifdef TARGET_HAS_PRECISE_SMC
1635     if (tb && pc != 0) {
1636         current_tb = tb_find_pc(pc);
1637     }
1638     if (cpu != NULL) {
1639         env = cpu->env_ptr;
1640     }
1641 #endif
1642     while (tb != NULL) {
1643         n = (uintptr_t)tb & 3;
1644         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1645 #ifdef TARGET_HAS_PRECISE_SMC
1646         if (current_tb == tb &&
1647             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1648                 /* If we are modifying the current TB, we must stop
1649                    its execution. We could be more precise by checking
1650                    that the modification is after the current PC, but it
1651                    would require a specialized function to partially
1652                    restore the CPU state */
1653 
1654             current_tb_modified = 1;
1655             cpu_restore_state_from_tb(cpu, current_tb, pc);
1656             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1657                                  &current_flags);
1658         }
1659 #endif /* TARGET_HAS_PRECISE_SMC */
1660         tb_phys_invalidate(tb, addr);
1661         tb = tb->page_next[n];
1662     }
1663     p->first_tb = NULL;
1664 #ifdef TARGET_HAS_PRECISE_SMC
1665     if (current_tb_modified) {
1666         /* we generate a block containing just the instruction
1667            modifying the memory. It will ensure that it cannot modify
1668            itself */
1669         tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1670         /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1671          * back into the cpu_exec loop. */
1672         return true;
1673     }
1674 #endif
1675     tb_unlock();
1676 
1677     return false;
1678 }
1679 #endif
1680 
1681 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1682    tb[1].tc_ptr. Return NULL if not found */
1683 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1684 {
1685     int m_min, m_max, m;
1686     uintptr_t v;
1687     TranslationBlock *tb;
1688 
1689     if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1690         return NULL;
1691     }
1692     if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1693         tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1694         return NULL;
1695     }
1696     /* binary search (cf Knuth) */
1697     m_min = 0;
1698     m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1699     while (m_min <= m_max) {
1700         m = (m_min + m_max) >> 1;
1701         tb = tcg_ctx.tb_ctx.tbs[m];
1702         v = (uintptr_t)tb->tc.ptr;
1703         if (v == tc_ptr) {
1704             return tb;
1705         } else if (tc_ptr < v) {
1706             m_max = m - 1;
1707         } else {
1708             m_min = m + 1;
1709         }
1710     }
1711     return tcg_ctx.tb_ctx.tbs[m_max];
1712 }
1713 
1714 #if !defined(CONFIG_USER_ONLY)
1715 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1716 {
1717     ram_addr_t ram_addr;
1718     MemoryRegion *mr;
1719     hwaddr l = 1;
1720 
1721     rcu_read_lock();
1722     mr = address_space_translate(as, addr, &addr, &l, false);
1723     if (!(memory_region_is_ram(mr)
1724           || memory_region_is_romd(mr))) {
1725         rcu_read_unlock();
1726         return;
1727     }
1728     ram_addr = memory_region_get_ram_addr(mr) + addr;
1729     tb_lock();
1730     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1731     tb_unlock();
1732     rcu_read_unlock();
1733 }
1734 #endif /* !defined(CONFIG_USER_ONLY) */
1735 
1736 /* Called with tb_lock held.  */
1737 void tb_check_watchpoint(CPUState *cpu)
1738 {
1739     TranslationBlock *tb;
1740 
1741     tb = tb_find_pc(cpu->mem_io_pc);
1742     if (tb) {
1743         /* We can use retranslation to find the PC.  */
1744         cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1745         tb_phys_invalidate(tb, -1);
1746     } else {
1747         /* The exception probably happened in a helper.  The CPU state should
1748            have been saved before calling it. Fetch the PC from there.  */
1749         CPUArchState *env = cpu->env_ptr;
1750         target_ulong pc, cs_base;
1751         tb_page_addr_t addr;
1752         uint32_t flags;
1753 
1754         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1755         addr = get_page_addr_code(env, pc);
1756         tb_invalidate_phys_range(addr, addr + 1);
1757     }
1758 }
1759 
1760 #ifndef CONFIG_USER_ONLY
1761 /* in deterministic execution mode, instructions doing device I/Os
1762  * must be at the end of the TB.
1763  *
1764  * Called by softmmu_template.h, with iothread mutex not held.
1765  */
1766 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1767 {
1768 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1769     CPUArchState *env = cpu->env_ptr;
1770 #endif
1771     TranslationBlock *tb;
1772     uint32_t n, cflags;
1773     target_ulong pc, cs_base;
1774     uint32_t flags;
1775 
1776     tb_lock();
1777     tb = tb_find_pc(retaddr);
1778     if (!tb) {
1779         cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1780                   (void *)retaddr);
1781     }
1782     n = cpu->icount_decr.u16.low + tb->icount;
1783     cpu_restore_state_from_tb(cpu, tb, retaddr);
1784     /* Calculate how many instructions had been executed before the fault
1785        occurred.  */
1786     n = n - cpu->icount_decr.u16.low;
1787     /* Generate a new TB ending on the I/O insn.  */
1788     n++;
1789     /* On MIPS and SH, delay slot instructions can only be restarted if
1790        they were already the first instruction in the TB.  If this is not
1791        the first instruction in a TB then re-execute the preceding
1792        branch.  */
1793 #if defined(TARGET_MIPS)
1794     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1795         env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1796         cpu->icount_decr.u16.low++;
1797         env->hflags &= ~MIPS_HFLAG_BMASK;
1798     }
1799 #elif defined(TARGET_SH4)
1800     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1801             && n > 1) {
1802         env->pc -= 2;
1803         cpu->icount_decr.u16.low++;
1804         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1805     }
1806 #endif
1807     /* This should never happen.  */
1808     if (n > CF_COUNT_MASK) {
1809         cpu_abort(cpu, "TB too big during recompile");
1810     }
1811 
1812     cflags = n | CF_LAST_IO;
1813     pc = tb->pc;
1814     cs_base = tb->cs_base;
1815     flags = tb->flags;
1816     tb_phys_invalidate(tb, -1);
1817     if (tb->cflags & CF_NOCACHE) {
1818         if (tb->orig_tb) {
1819             /* Invalidate original TB if this TB was generated in
1820              * cpu_exec_nocache() */
1821             tb_phys_invalidate(tb->orig_tb, -1);
1822         }
1823         tb_free(tb);
1824     }
1825     /* FIXME: In theory this could raise an exception.  In practice
1826        we have already translated the block once so it's probably ok.  */
1827     tb_gen_code(cpu, pc, cs_base, flags, cflags);
1828 
1829     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1830      * the first in the TB) then we end up generating a whole new TB and
1831      *  repeating the fault, which is horribly inefficient.
1832      *  Better would be to execute just this insn uncached, or generate a
1833      *  second new TB.
1834      *
1835      * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1836      * tb_lock gets reset.
1837      */
1838     cpu_loop_exit_noexc(cpu);
1839 }
1840 
1841 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
1842 {
1843     unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
1844 
1845     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
1846         atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
1847     }
1848 }
1849 
1850 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1851 {
1852     /* Discard jump cache entries for any tb which might potentially
1853        overlap the flushed page.  */
1854     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
1855     tb_jmp_cache_clear_page(cpu, addr);
1856 }
1857 
1858 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1859                                  struct qht_stats hst)
1860 {
1861     uint32_t hgram_opts;
1862     size_t hgram_bins;
1863     char *hgram;
1864 
1865     if (!hst.head_buckets) {
1866         return;
1867     }
1868     cpu_fprintf(f, "TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
1869                 hst.used_head_buckets, hst.head_buckets,
1870                 (double)hst.used_head_buckets / hst.head_buckets * 100);
1871 
1872     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
1873     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
1874     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1875         hgram_opts |= QDIST_PR_NODECIMAL;
1876     }
1877     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1878     cpu_fprintf(f, "TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
1879                 qdist_avg(&hst.occupancy) * 100, hgram);
1880     g_free(hgram);
1881 
1882     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1883     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1884     if (hgram_bins > 10) {
1885         hgram_bins = 10;
1886     } else {
1887         hgram_bins = 0;
1888         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1889     }
1890     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1891     cpu_fprintf(f, "TB hash avg chain   %0.3f buckets. Histogram: %s\n",
1892                 qdist_avg(&hst.chain), hgram);
1893     g_free(hgram);
1894 }
1895 
1896 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1897 {
1898     int i, target_code_size, max_target_code_size;
1899     int direct_jmp_count, direct_jmp2_count, cross_page;
1900     TranslationBlock *tb;
1901     struct qht_stats hst;
1902 
1903     tb_lock();
1904 
1905     target_code_size = 0;
1906     max_target_code_size = 0;
1907     cross_page = 0;
1908     direct_jmp_count = 0;
1909     direct_jmp2_count = 0;
1910     for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1911         tb = tcg_ctx.tb_ctx.tbs[i];
1912         target_code_size += tb->size;
1913         if (tb->size > max_target_code_size) {
1914             max_target_code_size = tb->size;
1915         }
1916         if (tb->page_addr[1] != -1) {
1917             cross_page++;
1918         }
1919         if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1920             direct_jmp_count++;
1921             if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1922                 direct_jmp2_count++;
1923             }
1924         }
1925     }
1926     /* XXX: avoid using doubles ? */
1927     cpu_fprintf(f, "Translation buffer state:\n");
1928     cpu_fprintf(f, "gen code size       %td/%zd\n",
1929                 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1930                 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
1931     cpu_fprintf(f, "TB count            %d\n", tcg_ctx.tb_ctx.nb_tbs);
1932     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
1933             tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1934                     tcg_ctx.tb_ctx.nb_tbs : 0,
1935             max_target_code_size);
1936     cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
1937             tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1938                                      tcg_ctx.code_gen_buffer) /
1939                                      tcg_ctx.tb_ctx.nb_tbs : 0,
1940                 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1941                                              tcg_ctx.code_gen_buffer) /
1942                                              target_code_size : 0);
1943     cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1944             tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1945                                     tcg_ctx.tb_ctx.nb_tbs : 0);
1946     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
1947                 direct_jmp_count,
1948                 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1949                         tcg_ctx.tb_ctx.nb_tbs : 0,
1950                 direct_jmp2_count,
1951                 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1952                         tcg_ctx.tb_ctx.nb_tbs : 0);
1953 
1954     qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
1955     print_qht_statistics(f, cpu_fprintf, hst);
1956     qht_statistics_destroy(&hst);
1957 
1958     cpu_fprintf(f, "\nStatistics:\n");
1959     cpu_fprintf(f, "TB flush count      %u\n",
1960             atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
1961     cpu_fprintf(f, "TB invalidate count %d\n",
1962             tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1963     cpu_fprintf(f, "TLB flush count     %zu\n", tlb_flush_count());
1964     tcg_dump_info(f, cpu_fprintf);
1965 
1966     tb_unlock();
1967 }
1968 
1969 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1970 {
1971     tcg_dump_op_count(f, cpu_fprintf);
1972 }
1973 
1974 #else /* CONFIG_USER_ONLY */
1975 
1976 void cpu_interrupt(CPUState *cpu, int mask)
1977 {
1978     g_assert(qemu_mutex_iothread_locked());
1979     cpu->interrupt_request |= mask;
1980     cpu->icount_decr.u16.high = -1;
1981 }
1982 
1983 /*
1984  * Walks guest process memory "regions" one by one
1985  * and calls callback function 'fn' for each region.
1986  */
1987 struct walk_memory_regions_data {
1988     walk_memory_regions_fn fn;
1989     void *priv;
1990     target_ulong start;
1991     int prot;
1992 };
1993 
1994 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1995                                    target_ulong end, int new_prot)
1996 {
1997     if (data->start != -1u) {
1998         int rc = data->fn(data->priv, data->start, end, data->prot);
1999         if (rc != 0) {
2000             return rc;
2001         }
2002     }
2003 
2004     data->start = (new_prot ? end : -1u);
2005     data->prot = new_prot;
2006 
2007     return 0;
2008 }
2009 
2010 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2011                                  target_ulong base, int level, void **lp)
2012 {
2013     target_ulong pa;
2014     int i, rc;
2015 
2016     if (*lp == NULL) {
2017         return walk_memory_regions_end(data, base, 0);
2018     }
2019 
2020     if (level == 0) {
2021         PageDesc *pd = *lp;
2022 
2023         for (i = 0; i < V_L2_SIZE; ++i) {
2024             int prot = pd[i].flags;
2025 
2026             pa = base | (i << TARGET_PAGE_BITS);
2027             if (prot != data->prot) {
2028                 rc = walk_memory_regions_end(data, pa, prot);
2029                 if (rc != 0) {
2030                     return rc;
2031                 }
2032             }
2033         }
2034     } else {
2035         void **pp = *lp;
2036 
2037         for (i = 0; i < V_L2_SIZE; ++i) {
2038             pa = base | ((target_ulong)i <<
2039                 (TARGET_PAGE_BITS + V_L2_BITS * level));
2040             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2041             if (rc != 0) {
2042                 return rc;
2043             }
2044         }
2045     }
2046 
2047     return 0;
2048 }
2049 
2050 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2051 {
2052     struct walk_memory_regions_data data;
2053     uintptr_t i, l1_sz = v_l1_size;
2054 
2055     data.fn = fn;
2056     data.priv = priv;
2057     data.start = -1u;
2058     data.prot = 0;
2059 
2060     for (i = 0; i < l1_sz; i++) {
2061         target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2062         int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2063         if (rc != 0) {
2064             return rc;
2065         }
2066     }
2067 
2068     return walk_memory_regions_end(&data, 0, 0);
2069 }
2070 
2071 static int dump_region(void *priv, target_ulong start,
2072     target_ulong end, unsigned long prot)
2073 {
2074     FILE *f = (FILE *)priv;
2075 
2076     (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2077         " "TARGET_FMT_lx" %c%c%c\n",
2078         start, end, end - start,
2079         ((prot & PAGE_READ) ? 'r' : '-'),
2080         ((prot & PAGE_WRITE) ? 'w' : '-'),
2081         ((prot & PAGE_EXEC) ? 'x' : '-'));
2082 
2083     return 0;
2084 }
2085 
2086 /* dump memory mappings */
2087 void page_dump(FILE *f)
2088 {
2089     const int length = sizeof(target_ulong) * 2;
2090     (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2091             length, "start", length, "end", length, "size", "prot");
2092     walk_memory_regions(f, dump_region);
2093 }
2094 
2095 int page_get_flags(target_ulong address)
2096 {
2097     PageDesc *p;
2098 
2099     p = page_find(address >> TARGET_PAGE_BITS);
2100     if (!p) {
2101         return 0;
2102     }
2103     return p->flags;
2104 }
2105 
2106 /* Modify the flags of a page and invalidate the code if necessary.
2107    The flag PAGE_WRITE_ORG is positioned automatically depending
2108    on PAGE_WRITE.  The mmap_lock should already be held.  */
2109 void page_set_flags(target_ulong start, target_ulong end, int flags)
2110 {
2111     target_ulong addr, len;
2112 
2113     /* This function should never be called with addresses outside the
2114        guest address space.  If this assert fires, it probably indicates
2115        a missing call to h2g_valid.  */
2116 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2117     assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2118 #endif
2119     assert(start < end);
2120     assert_memory_lock();
2121 
2122     start = start & TARGET_PAGE_MASK;
2123     end = TARGET_PAGE_ALIGN(end);
2124 
2125     if (flags & PAGE_WRITE) {
2126         flags |= PAGE_WRITE_ORG;
2127     }
2128 
2129     for (addr = start, len = end - start;
2130          len != 0;
2131          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2132         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2133 
2134         /* If the write protection bit is set, then we invalidate
2135            the code inside.  */
2136         if (!(p->flags & PAGE_WRITE) &&
2137             (flags & PAGE_WRITE) &&
2138             p->first_tb) {
2139             tb_invalidate_phys_page(addr, 0);
2140         }
2141         p->flags = flags;
2142     }
2143 }
2144 
2145 int page_check_range(target_ulong start, target_ulong len, int flags)
2146 {
2147     PageDesc *p;
2148     target_ulong end;
2149     target_ulong addr;
2150 
2151     /* This function should never be called with addresses outside the
2152        guest address space.  If this assert fires, it probably indicates
2153        a missing call to h2g_valid.  */
2154 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2155     assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2156 #endif
2157 
2158     if (len == 0) {
2159         return 0;
2160     }
2161     if (start + len - 1 < start) {
2162         /* We've wrapped around.  */
2163         return -1;
2164     }
2165 
2166     /* must do before we loose bits in the next step */
2167     end = TARGET_PAGE_ALIGN(start + len);
2168     start = start & TARGET_PAGE_MASK;
2169 
2170     for (addr = start, len = end - start;
2171          len != 0;
2172          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2173         p = page_find(addr >> TARGET_PAGE_BITS);
2174         if (!p) {
2175             return -1;
2176         }
2177         if (!(p->flags & PAGE_VALID)) {
2178             return -1;
2179         }
2180 
2181         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2182             return -1;
2183         }
2184         if (flags & PAGE_WRITE) {
2185             if (!(p->flags & PAGE_WRITE_ORG)) {
2186                 return -1;
2187             }
2188             /* unprotect the page if it was put read-only because it
2189                contains translated code */
2190             if (!(p->flags & PAGE_WRITE)) {
2191                 if (!page_unprotect(addr, 0)) {
2192                     return -1;
2193                 }
2194             }
2195         }
2196     }
2197     return 0;
2198 }
2199 
2200 /* called from signal handler: invalidate the code and unprotect the
2201  * page. Return 0 if the fault was not handled, 1 if it was handled,
2202  * and 2 if it was handled but the caller must cause the TB to be
2203  * immediately exited. (We can only return 2 if the 'pc' argument is
2204  * non-zero.)
2205  */
2206 int page_unprotect(target_ulong address, uintptr_t pc)
2207 {
2208     unsigned int prot;
2209     bool current_tb_invalidated;
2210     PageDesc *p;
2211     target_ulong host_start, host_end, addr;
2212 
2213     /* Technically this isn't safe inside a signal handler.  However we
2214        know this only ever happens in a synchronous SEGV handler, so in
2215        practice it seems to be ok.  */
2216     mmap_lock();
2217 
2218     p = page_find(address >> TARGET_PAGE_BITS);
2219     if (!p) {
2220         mmap_unlock();
2221         return 0;
2222     }
2223 
2224     /* if the page was really writable, then we change its
2225        protection back to writable */
2226     if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2227         host_start = address & qemu_host_page_mask;
2228         host_end = host_start + qemu_host_page_size;
2229 
2230         prot = 0;
2231         current_tb_invalidated = false;
2232         for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2233             p = page_find(addr >> TARGET_PAGE_BITS);
2234             p->flags |= PAGE_WRITE;
2235             prot |= p->flags;
2236 
2237             /* and since the content will be modified, we must invalidate
2238                the corresponding translated code. */
2239             current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2240 #ifdef CONFIG_USER_ONLY
2241             if (DEBUG_TB_CHECK_GATE) {
2242                 tb_invalidate_check(addr);
2243             }
2244 #endif
2245         }
2246         mprotect((void *)g2h(host_start), qemu_host_page_size,
2247                  prot & PAGE_BITS);
2248 
2249         mmap_unlock();
2250         /* If current TB was invalidated return to main loop */
2251         return current_tb_invalidated ? 2 : 1;
2252     }
2253     mmap_unlock();
2254     return 0;
2255 }
2256 #endif /* CONFIG_USER_ONLY */
2257 
2258 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2259 void tcg_flush_softmmu_tlb(CPUState *cs)
2260 {
2261 #ifdef CONFIG_SOFTMMU
2262     tlb_flush(cs);
2263 #endif
2264 }
2265