xref: /qemu/accel/tcg/translate-all.c (revision 7606c99a)
1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifdef _WIN32
20 #include <windows.h>
21 #endif
22 #include "qemu/osdep.h"
23 
24 
25 #include "qemu-common.h"
26 #define NO_CPU_IO_DEFS
27 #include "cpu.h"
28 #include "trace.h"
29 #include "disas/disas.h"
30 #include "exec/exec-all.h"
31 #include "tcg.h"
32 #if defined(CONFIG_USER_ONLY)
33 #include "qemu.h"
34 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
35 #include <sys/param.h>
36 #if __FreeBSD_version >= 700104
37 #define HAVE_KINFO_GETVMMAP
38 #define sigqueue sigqueue_freebsd  /* avoid redefinition */
39 #include <sys/proc.h>
40 #include <machine/profile.h>
41 #define _KERNEL
42 #include <sys/user.h>
43 #undef _KERNEL
44 #undef sigqueue
45 #include <libutil.h>
46 #endif
47 #endif
48 #else
49 #include "exec/ram_addr.h"
50 #endif
51 
52 #include "exec/cputlb.h"
53 #include "exec/tb-hash.h"
54 #include "translate-all.h"
55 #include "qemu/bitmap.h"
56 #include "qemu/error-report.h"
57 #include "qemu/timer.h"
58 #include "qemu/main-loop.h"
59 #include "exec/log.h"
60 #include "sysemu/cpus.h"
61 
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
66 
67 #ifdef DEBUG_TB_INVALIDATE
68 #define DEBUG_TB_INVALIDATE_GATE 1
69 #else
70 #define DEBUG_TB_INVALIDATE_GATE 0
71 #endif
72 
73 #ifdef DEBUG_TB_FLUSH
74 #define DEBUG_TB_FLUSH_GATE 1
75 #else
76 #define DEBUG_TB_FLUSH_GATE 0
77 #endif
78 
79 #if !defined(CONFIG_USER_ONLY)
80 /* TB consistency checks only implemented for usermode emulation.  */
81 #undef DEBUG_TB_CHECK
82 #endif
83 
84 #ifdef DEBUG_TB_CHECK
85 #define DEBUG_TB_CHECK_GATE 1
86 #else
87 #define DEBUG_TB_CHECK_GATE 0
88 #endif
89 
90 /* Access to the various translations structures need to be serialised via locks
91  * for consistency.
92  * In user-mode emulation access to the memory related structures are protected
93  * with mmap_lock.
94  * In !user-mode we use per-page locks.
95  */
96 #ifdef CONFIG_SOFTMMU
97 #define assert_memory_lock()
98 #else
99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
100 #endif
101 
102 #define SMC_BITMAP_USE_THRESHOLD 10
103 
104 typedef struct PageDesc {
105     /* list of TBs intersecting this ram page */
106     uintptr_t first_tb;
107 #ifdef CONFIG_SOFTMMU
108     /* in order to optimize self modifying code, we count the number
109        of lookups we do to a given page to use a bitmap */
110     unsigned long *code_bitmap;
111     unsigned int code_write_count;
112 #else
113     unsigned long flags;
114 #endif
115 #ifndef CONFIG_USER_ONLY
116     QemuSpin lock;
117 #endif
118 } PageDesc;
119 
120 /**
121  * struct page_entry - page descriptor entry
122  * @pd:     pointer to the &struct PageDesc of the page this entry represents
123  * @index:  page index of the page
124  * @locked: whether the page is locked
125  *
126  * This struct helps us keep track of the locked state of a page, without
127  * bloating &struct PageDesc.
128  *
129  * A page lock protects accesses to all fields of &struct PageDesc.
130  *
131  * See also: &struct page_collection.
132  */
133 struct page_entry {
134     PageDesc *pd;
135     tb_page_addr_t index;
136     bool locked;
137 };
138 
139 /**
140  * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
141  * @tree:   Binary search tree (BST) of the pages, with key == page index
142  * @max:    Pointer to the page in @tree with the highest page index
143  *
144  * To avoid deadlock we lock pages in ascending order of page index.
145  * When operating on a set of pages, we need to keep track of them so that
146  * we can lock them in order and also unlock them later. For this we collect
147  * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
148  * @tree implementation we use does not provide an O(1) operation to obtain the
149  * highest-ranked element, we use @max to keep track of the inserted page
150  * with the highest index. This is valuable because if a page is not in
151  * the tree and its index is higher than @max's, then we can lock it
152  * without breaking the locking order rule.
153  *
154  * Note on naming: 'struct page_set' would be shorter, but we already have a few
155  * page_set_*() helpers, so page_collection is used instead to avoid confusion.
156  *
157  * See also: page_collection_lock().
158  */
159 struct page_collection {
160     GTree *tree;
161     struct page_entry *max;
162 };
163 
164 /* list iterators for lists of tagged pointers in TranslationBlock */
165 #define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
166     for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
167          tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
168              tb = (TranslationBlock *)((uintptr_t)tb & ~1))
169 
170 #define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
171     TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
172 
173 #define TB_FOR_EACH_JMP(head_tb, tb, n)                                 \
174     TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
175 
176 /* In system mode we want L1_MAP to be based on ram offsets,
177    while in user mode we want it to be based on virtual addresses.  */
178 #if !defined(CONFIG_USER_ONLY)
179 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
180 # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
181 #else
182 # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
183 #endif
184 #else
185 # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
186 #endif
187 
188 /* Size of the L2 (and L3, etc) page tables.  */
189 #define V_L2_BITS 10
190 #define V_L2_SIZE (1 << V_L2_BITS)
191 
192 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
193 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
194                   sizeof_field(TranslationBlock, trace_vcpu_dstate)
195                   * BITS_PER_BYTE);
196 
197 /*
198  * L1 Mapping properties
199  */
200 static int v_l1_size;
201 static int v_l1_shift;
202 static int v_l2_levels;
203 
204 /* The bottom level has pointers to PageDesc, and is indexed by
205  * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
206  */
207 #define V_L1_MIN_BITS 4
208 #define V_L1_MAX_BITS (V_L2_BITS + 3)
209 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
210 
211 static void *l1_map[V_L1_MAX_SIZE];
212 
213 /* code generation context */
214 TCGContext tcg_init_ctx;
215 __thread TCGContext *tcg_ctx;
216 TBContext tb_ctx;
217 bool parallel_cpus;
218 
219 static void page_table_config_init(void)
220 {
221     uint32_t v_l1_bits;
222 
223     assert(TARGET_PAGE_BITS);
224     /* The bits remaining after N lower levels of page tables.  */
225     v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
226     if (v_l1_bits < V_L1_MIN_BITS) {
227         v_l1_bits += V_L2_BITS;
228     }
229 
230     v_l1_size = 1 << v_l1_bits;
231     v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
232     v_l2_levels = v_l1_shift / V_L2_BITS - 1;
233 
234     assert(v_l1_bits <= V_L1_MAX_BITS);
235     assert(v_l1_shift % V_L2_BITS == 0);
236     assert(v_l2_levels >= 0);
237 }
238 
239 void cpu_gen_init(void)
240 {
241     tcg_context_init(&tcg_init_ctx);
242 }
243 
244 /* Encode VAL as a signed leb128 sequence at P.
245    Return P incremented past the encoded value.  */
246 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
247 {
248     int more, byte;
249 
250     do {
251         byte = val & 0x7f;
252         val >>= 7;
253         more = !((val == 0 && (byte & 0x40) == 0)
254                  || (val == -1 && (byte & 0x40) != 0));
255         if (more) {
256             byte |= 0x80;
257         }
258         *p++ = byte;
259     } while (more);
260 
261     return p;
262 }
263 
264 /* Decode a signed leb128 sequence at *PP; increment *PP past the
265    decoded value.  Return the decoded value.  */
266 static target_long decode_sleb128(uint8_t **pp)
267 {
268     uint8_t *p = *pp;
269     target_long val = 0;
270     int byte, shift = 0;
271 
272     do {
273         byte = *p++;
274         val |= (target_ulong)(byte & 0x7f) << shift;
275         shift += 7;
276     } while (byte & 0x80);
277     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
278         val |= -(target_ulong)1 << shift;
279     }
280 
281     *pp = p;
282     return val;
283 }
284 
285 /* Encode the data collected about the instructions while compiling TB.
286    Place the data at BLOCK, and return the number of bytes consumed.
287 
288    The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
289    which come from the target's insn_start data, followed by a uintptr_t
290    which comes from the host pc of the end of the code implementing the insn.
291 
292    Each line of the table is encoded as sleb128 deltas from the previous
293    line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
294    That is, the first column is seeded with the guest pc, the last column
295    with the host pc, and the middle columns with zeros.  */
296 
297 static int encode_search(TranslationBlock *tb, uint8_t *block)
298 {
299     uint8_t *highwater = tcg_ctx->code_gen_highwater;
300     uint8_t *p = block;
301     int i, j, n;
302 
303     for (i = 0, n = tb->icount; i < n; ++i) {
304         target_ulong prev;
305 
306         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
307             if (i == 0) {
308                 prev = (j == 0 ? tb->pc : 0);
309             } else {
310                 prev = tcg_ctx->gen_insn_data[i - 1][j];
311             }
312             p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
313         }
314         prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
315         p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
316 
317         /* Test for (pending) buffer overflow.  The assumption is that any
318            one row beginning below the high water mark cannot overrun
319            the buffer completely.  Thus we can test for overflow after
320            encoding a row without having to check during encoding.  */
321         if (unlikely(p > highwater)) {
322             return -1;
323         }
324     }
325 
326     return p - block;
327 }
328 
329 /* The cpu state corresponding to 'searched_pc' is restored.
330  * When reset_icount is true, current TB will be interrupted and
331  * icount should be recalculated.
332  */
333 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
334                                      uintptr_t searched_pc, bool reset_icount)
335 {
336     target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
337     uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
338     CPUArchState *env = cpu->env_ptr;
339     uint8_t *p = tb->tc.ptr + tb->tc.size;
340     int i, j, num_insns = tb->icount;
341 #ifdef CONFIG_PROFILER
342     TCGProfile *prof = &tcg_ctx->prof;
343     int64_t ti = profile_getclock();
344 #endif
345 
346     searched_pc -= GETPC_ADJ;
347 
348     if (searched_pc < host_pc) {
349         return -1;
350     }
351 
352     /* Reconstruct the stored insn data while looking for the point at
353        which the end of the insn exceeds the searched_pc.  */
354     for (i = 0; i < num_insns; ++i) {
355         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
356             data[j] += decode_sleb128(&p);
357         }
358         host_pc += decode_sleb128(&p);
359         if (host_pc > searched_pc) {
360             goto found;
361         }
362     }
363     return -1;
364 
365  found:
366     if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
367         assert(use_icount);
368         /* Reset the cycle counter to the start of the block
369            and shift if to the number of actually executed instructions */
370         cpu->icount_decr.u16.low += num_insns - i;
371     }
372     restore_state_to_opc(env, tb, data);
373 
374 #ifdef CONFIG_PROFILER
375     atomic_set(&prof->restore_time,
376                 prof->restore_time + profile_getclock() - ti);
377     atomic_set(&prof->restore_count, prof->restore_count + 1);
378 #endif
379     return 0;
380 }
381 
382 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
383 {
384     TranslationBlock *tb;
385     bool r = false;
386     uintptr_t check_offset;
387 
388     /* The host_pc has to be in the region of current code buffer. If
389      * it is not we will not be able to resolve it here. The two cases
390      * where host_pc will not be correct are:
391      *
392      *  - fault during translation (instruction fetch)
393      *  - fault from helper (not using GETPC() macro)
394      *
395      * Either way we need return early as we can't resolve it here.
396      *
397      * We are using unsigned arithmetic so if host_pc <
398      * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
399      * above the code_gen_buffer_size
400      */
401     check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
402 
403     if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
404         tb = tcg_tb_lookup(host_pc);
405         if (tb) {
406             cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
407             if (tb_cflags(tb) & CF_NOCACHE) {
408                 /* one-shot translation, invalidate it immediately */
409                 tb_phys_invalidate(tb, -1);
410                 tcg_tb_remove(tb);
411             }
412             r = true;
413         }
414     }
415 
416     return r;
417 }
418 
419 static void page_init(void)
420 {
421     page_size_init();
422     page_table_config_init();
423 
424 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
425     {
426 #ifdef HAVE_KINFO_GETVMMAP
427         struct kinfo_vmentry *freep;
428         int i, cnt;
429 
430         freep = kinfo_getvmmap(getpid(), &cnt);
431         if (freep) {
432             mmap_lock();
433             for (i = 0; i < cnt; i++) {
434                 unsigned long startaddr, endaddr;
435 
436                 startaddr = freep[i].kve_start;
437                 endaddr = freep[i].kve_end;
438                 if (h2g_valid(startaddr)) {
439                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
440 
441                     if (h2g_valid(endaddr)) {
442                         endaddr = h2g(endaddr);
443                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
444                     } else {
445 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
446                         endaddr = ~0ul;
447                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
448 #endif
449                     }
450                 }
451             }
452             free(freep);
453             mmap_unlock();
454         }
455 #else
456         FILE *f;
457 
458         last_brk = (unsigned long)sbrk(0);
459 
460         f = fopen("/compat/linux/proc/self/maps", "r");
461         if (f) {
462             mmap_lock();
463 
464             do {
465                 unsigned long startaddr, endaddr;
466                 int n;
467 
468                 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
469 
470                 if (n == 2 && h2g_valid(startaddr)) {
471                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
472 
473                     if (h2g_valid(endaddr)) {
474                         endaddr = h2g(endaddr);
475                     } else {
476                         endaddr = ~0ul;
477                     }
478                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
479                 }
480             } while (!feof(f));
481 
482             fclose(f);
483             mmap_unlock();
484         }
485 #endif
486     }
487 #endif
488 }
489 
490 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
491 {
492     PageDesc *pd;
493     void **lp;
494     int i;
495 
496     /* Level 1.  Always allocated.  */
497     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
498 
499     /* Level 2..N-1.  */
500     for (i = v_l2_levels; i > 0; i--) {
501         void **p = atomic_rcu_read(lp);
502 
503         if (p == NULL) {
504             void *existing;
505 
506             if (!alloc) {
507                 return NULL;
508             }
509             p = g_new0(void *, V_L2_SIZE);
510             existing = atomic_cmpxchg(lp, NULL, p);
511             if (unlikely(existing)) {
512                 g_free(p);
513                 p = existing;
514             }
515         }
516 
517         lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
518     }
519 
520     pd = atomic_rcu_read(lp);
521     if (pd == NULL) {
522         void *existing;
523 
524         if (!alloc) {
525             return NULL;
526         }
527         pd = g_new0(PageDesc, V_L2_SIZE);
528 #ifndef CONFIG_USER_ONLY
529         {
530             int i;
531 
532             for (i = 0; i < V_L2_SIZE; i++) {
533                 qemu_spin_init(&pd[i].lock);
534             }
535         }
536 #endif
537         existing = atomic_cmpxchg(lp, NULL, pd);
538         if (unlikely(existing)) {
539             g_free(pd);
540             pd = existing;
541         }
542     }
543 
544     return pd + (index & (V_L2_SIZE - 1));
545 }
546 
547 static inline PageDesc *page_find(tb_page_addr_t index)
548 {
549     return page_find_alloc(index, 0);
550 }
551 
552 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
553                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
554 
555 /* In user-mode page locks aren't used; mmap_lock is enough */
556 #ifdef CONFIG_USER_ONLY
557 
558 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
559 
560 static inline void page_lock(PageDesc *pd)
561 { }
562 
563 static inline void page_unlock(PageDesc *pd)
564 { }
565 
566 static inline void page_lock_tb(const TranslationBlock *tb)
567 { }
568 
569 static inline void page_unlock_tb(const TranslationBlock *tb)
570 { }
571 
572 struct page_collection *
573 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
574 {
575     return NULL;
576 }
577 
578 void page_collection_unlock(struct page_collection *set)
579 { }
580 #else /* !CONFIG_USER_ONLY */
581 
582 #ifdef CONFIG_DEBUG_TCG
583 
584 static __thread GHashTable *ht_pages_locked_debug;
585 
586 static void ht_pages_locked_debug_init(void)
587 {
588     if (ht_pages_locked_debug) {
589         return;
590     }
591     ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
592 }
593 
594 static bool page_is_locked(const PageDesc *pd)
595 {
596     PageDesc *found;
597 
598     ht_pages_locked_debug_init();
599     found = g_hash_table_lookup(ht_pages_locked_debug, pd);
600     return !!found;
601 }
602 
603 static void page_lock__debug(PageDesc *pd)
604 {
605     ht_pages_locked_debug_init();
606     g_assert(!page_is_locked(pd));
607     g_hash_table_insert(ht_pages_locked_debug, pd, pd);
608 }
609 
610 static void page_unlock__debug(const PageDesc *pd)
611 {
612     bool removed;
613 
614     ht_pages_locked_debug_init();
615     g_assert(page_is_locked(pd));
616     removed = g_hash_table_remove(ht_pages_locked_debug, pd);
617     g_assert(removed);
618 }
619 
620 static void
621 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
622 {
623     if (unlikely(!page_is_locked(pd))) {
624         error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
625                      pd, file, line);
626         abort();
627     }
628 }
629 
630 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
631 
632 void assert_no_pages_locked(void)
633 {
634     ht_pages_locked_debug_init();
635     g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
636 }
637 
638 #else /* !CONFIG_DEBUG_TCG */
639 
640 #define assert_page_locked(pd)
641 
642 static inline void page_lock__debug(const PageDesc *pd)
643 {
644 }
645 
646 static inline void page_unlock__debug(const PageDesc *pd)
647 {
648 }
649 
650 #endif /* CONFIG_DEBUG_TCG */
651 
652 static inline void page_lock(PageDesc *pd)
653 {
654     page_lock__debug(pd);
655     qemu_spin_lock(&pd->lock);
656 }
657 
658 static inline void page_unlock(PageDesc *pd)
659 {
660     qemu_spin_unlock(&pd->lock);
661     page_unlock__debug(pd);
662 }
663 
664 /* lock the page(s) of a TB in the correct acquisition order */
665 static inline void page_lock_tb(const TranslationBlock *tb)
666 {
667     page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
668 }
669 
670 static inline void page_unlock_tb(const TranslationBlock *tb)
671 {
672     PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
673 
674     page_unlock(p1);
675     if (unlikely(tb->page_addr[1] != -1)) {
676         PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
677 
678         if (p2 != p1) {
679             page_unlock(p2);
680         }
681     }
682 }
683 
684 static inline struct page_entry *
685 page_entry_new(PageDesc *pd, tb_page_addr_t index)
686 {
687     struct page_entry *pe = g_malloc(sizeof(*pe));
688 
689     pe->index = index;
690     pe->pd = pd;
691     pe->locked = false;
692     return pe;
693 }
694 
695 static void page_entry_destroy(gpointer p)
696 {
697     struct page_entry *pe = p;
698 
699     g_assert(pe->locked);
700     page_unlock(pe->pd);
701     g_free(pe);
702 }
703 
704 /* returns false on success */
705 static bool page_entry_trylock(struct page_entry *pe)
706 {
707     bool busy;
708 
709     busy = qemu_spin_trylock(&pe->pd->lock);
710     if (!busy) {
711         g_assert(!pe->locked);
712         pe->locked = true;
713         page_lock__debug(pe->pd);
714     }
715     return busy;
716 }
717 
718 static void do_page_entry_lock(struct page_entry *pe)
719 {
720     page_lock(pe->pd);
721     g_assert(!pe->locked);
722     pe->locked = true;
723 }
724 
725 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
726 {
727     struct page_entry *pe = value;
728 
729     do_page_entry_lock(pe);
730     return FALSE;
731 }
732 
733 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
734 {
735     struct page_entry *pe = value;
736 
737     if (pe->locked) {
738         pe->locked = false;
739         page_unlock(pe->pd);
740     }
741     return FALSE;
742 }
743 
744 /*
745  * Trylock a page, and if successful, add the page to a collection.
746  * Returns true ("busy") if the page could not be locked; false otherwise.
747  */
748 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
749 {
750     tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
751     struct page_entry *pe;
752     PageDesc *pd;
753 
754     pe = g_tree_lookup(set->tree, &index);
755     if (pe) {
756         return false;
757     }
758 
759     pd = page_find(index);
760     if (pd == NULL) {
761         return false;
762     }
763 
764     pe = page_entry_new(pd, index);
765     g_tree_insert(set->tree, &pe->index, pe);
766 
767     /*
768      * If this is either (1) the first insertion or (2) a page whose index
769      * is higher than any other so far, just lock the page and move on.
770      */
771     if (set->max == NULL || pe->index > set->max->index) {
772         set->max = pe;
773         do_page_entry_lock(pe);
774         return false;
775     }
776     /*
777      * Try to acquire out-of-order lock; if busy, return busy so that we acquire
778      * locks in order.
779      */
780     return page_entry_trylock(pe);
781 }
782 
783 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
784 {
785     tb_page_addr_t a = *(const tb_page_addr_t *)ap;
786     tb_page_addr_t b = *(const tb_page_addr_t *)bp;
787 
788     if (a == b) {
789         return 0;
790     } else if (a < b) {
791         return -1;
792     }
793     return 1;
794 }
795 
796 /*
797  * Lock a range of pages ([@start,@end[) as well as the pages of all
798  * intersecting TBs.
799  * Locking order: acquire locks in ascending order of page index.
800  */
801 struct page_collection *
802 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
803 {
804     struct page_collection *set = g_malloc(sizeof(*set));
805     tb_page_addr_t index;
806     PageDesc *pd;
807 
808     start >>= TARGET_PAGE_BITS;
809     end   >>= TARGET_PAGE_BITS;
810     g_assert(start <= end);
811 
812     set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
813                                 page_entry_destroy);
814     set->max = NULL;
815     assert_no_pages_locked();
816 
817  retry:
818     g_tree_foreach(set->tree, page_entry_lock, NULL);
819 
820     for (index = start; index <= end; index++) {
821         TranslationBlock *tb;
822         int n;
823 
824         pd = page_find(index);
825         if (pd == NULL) {
826             continue;
827         }
828         if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
829             g_tree_foreach(set->tree, page_entry_unlock, NULL);
830             goto retry;
831         }
832         assert_page_locked(pd);
833         PAGE_FOR_EACH_TB(pd, tb, n) {
834             if (page_trylock_add(set, tb->page_addr[0]) ||
835                 (tb->page_addr[1] != -1 &&
836                  page_trylock_add(set, tb->page_addr[1]))) {
837                 /* drop all locks, and reacquire in order */
838                 g_tree_foreach(set->tree, page_entry_unlock, NULL);
839                 goto retry;
840             }
841         }
842     }
843     return set;
844 }
845 
846 void page_collection_unlock(struct page_collection *set)
847 {
848     /* entries are unlocked and freed via page_entry_destroy */
849     g_tree_destroy(set->tree);
850     g_free(set);
851 }
852 
853 #endif /* !CONFIG_USER_ONLY */
854 
855 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
856                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
857 {
858     PageDesc *p1, *p2;
859     tb_page_addr_t page1;
860     tb_page_addr_t page2;
861 
862     assert_memory_lock();
863     g_assert(phys1 != -1);
864 
865     page1 = phys1 >> TARGET_PAGE_BITS;
866     page2 = phys2 >> TARGET_PAGE_BITS;
867 
868     p1 = page_find_alloc(page1, alloc);
869     if (ret_p1) {
870         *ret_p1 = p1;
871     }
872     if (likely(phys2 == -1)) {
873         page_lock(p1);
874         return;
875     } else if (page1 == page2) {
876         page_lock(p1);
877         if (ret_p2) {
878             *ret_p2 = p1;
879         }
880         return;
881     }
882     p2 = page_find_alloc(page2, alloc);
883     if (ret_p2) {
884         *ret_p2 = p2;
885     }
886     if (page1 < page2) {
887         page_lock(p1);
888         page_lock(p2);
889     } else {
890         page_lock(p2);
891         page_lock(p1);
892     }
893 }
894 
895 #if defined(CONFIG_USER_ONLY)
896 /* Currently it is not recommended to allocate big chunks of data in
897    user mode. It will change when a dedicated libc will be used.  */
898 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
899    region in which the guest needs to run.  Revisit this.  */
900 #define USE_STATIC_CODE_GEN_BUFFER
901 #endif
902 
903 /* Minimum size of the code gen buffer.  This number is randomly chosen,
904    but not so small that we can't have a fair number of TB's live.  */
905 #define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
906 
907 /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
908    indicated, this is constrained by the range of direct branches on the
909    host cpu, as used by the TCG implementation of goto_tb.  */
910 #if defined(__x86_64__)
911 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
912 #elif defined(__sparc__)
913 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
914 #elif defined(__powerpc64__)
915 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
916 #elif defined(__powerpc__)
917 # define MAX_CODE_GEN_BUFFER_SIZE  (32u * 1024 * 1024)
918 #elif defined(__aarch64__)
919 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
920 #elif defined(__s390x__)
921   /* We have a +- 4GB range on the branches; leave some slop.  */
922 # define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
923 #elif defined(__mips__)
924   /* We have a 256MB branch region, but leave room to make sure the
925      main executable is also within that region.  */
926 # define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
927 #else
928 # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
929 #endif
930 
931 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
932 
933 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
934   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
935    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
936 
937 static inline size_t size_code_gen_buffer(size_t tb_size)
938 {
939     /* Size the buffer.  */
940     if (tb_size == 0) {
941 #ifdef USE_STATIC_CODE_GEN_BUFFER
942         tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
943 #else
944         /* ??? Needs adjustments.  */
945         /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
946            static buffer, we could size this on RESERVED_VA, on the text
947            segment size of the executable, or continue to use the default.  */
948         tb_size = (unsigned long)(ram_size / 4);
949 #endif
950     }
951     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
952         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
953     }
954     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
955         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
956     }
957     return tb_size;
958 }
959 
960 #ifdef __mips__
961 /* In order to use J and JAL within the code_gen_buffer, we require
962    that the buffer not cross a 256MB boundary.  */
963 static inline bool cross_256mb(void *addr, size_t size)
964 {
965     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
966 }
967 
968 /* We weren't able to allocate a buffer without crossing that boundary,
969    so make do with the larger portion of the buffer that doesn't cross.
970    Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
971 static inline void *split_cross_256mb(void *buf1, size_t size1)
972 {
973     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
974     size_t size2 = buf1 + size1 - buf2;
975 
976     size1 = buf2 - buf1;
977     if (size1 < size2) {
978         size1 = size2;
979         buf1 = buf2;
980     }
981 
982     tcg_ctx->code_gen_buffer_size = size1;
983     return buf1;
984 }
985 #endif
986 
987 #ifdef USE_STATIC_CODE_GEN_BUFFER
988 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
989     __attribute__((aligned(CODE_GEN_ALIGN)));
990 
991 static inline void *alloc_code_gen_buffer(void)
992 {
993     void *buf = static_code_gen_buffer;
994     void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
995     size_t size;
996 
997     /* page-align the beginning and end of the buffer */
998     buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
999     end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1000 
1001     size = end - buf;
1002 
1003     /* Honor a command-line option limiting the size of the buffer.  */
1004     if (size > tcg_ctx->code_gen_buffer_size) {
1005         size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1006                                qemu_real_host_page_size);
1007     }
1008     tcg_ctx->code_gen_buffer_size = size;
1009 
1010 #ifdef __mips__
1011     if (cross_256mb(buf, size)) {
1012         buf = split_cross_256mb(buf, size);
1013         size = tcg_ctx->code_gen_buffer_size;
1014     }
1015 #endif
1016 
1017     if (qemu_mprotect_rwx(buf, size)) {
1018         abort();
1019     }
1020     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1021 
1022     return buf;
1023 }
1024 #elif defined(_WIN32)
1025 static inline void *alloc_code_gen_buffer(void)
1026 {
1027     size_t size = tcg_ctx->code_gen_buffer_size;
1028     return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1029                         PAGE_EXECUTE_READWRITE);
1030 }
1031 #else
1032 static inline void *alloc_code_gen_buffer(void)
1033 {
1034     int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
1035     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
1036     uintptr_t start = 0;
1037     size_t size = tcg_ctx->code_gen_buffer_size;
1038     void *buf;
1039 
1040     /* Constrain the position of the buffer based on the host cpu.
1041        Note that these addresses are chosen in concert with the
1042        addresses assigned in the relevant linker script file.  */
1043 # if defined(__PIE__) || defined(__PIC__)
1044     /* Don't bother setting a preferred location if we're building
1045        a position-independent executable.  We're more likely to get
1046        an address near the main executable if we let the kernel
1047        choose the address.  */
1048 # elif defined(__x86_64__) && defined(MAP_32BIT)
1049     /* Force the memory down into low memory with the executable.
1050        Leave the choice of exact location with the kernel.  */
1051     flags |= MAP_32BIT;
1052     /* Cannot expect to map more than 800MB in low memory.  */
1053     if (size > 800u * 1024 * 1024) {
1054         tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
1055     }
1056 # elif defined(__sparc__)
1057     start = 0x40000000ul;
1058 # elif defined(__s390x__)
1059     start = 0x90000000ul;
1060 # elif defined(__mips__)
1061 #  if _MIPS_SIM == _ABI64
1062     start = 0x128000000ul;
1063 #  else
1064     start = 0x08000000ul;
1065 #  endif
1066 # endif
1067 
1068     buf = mmap((void *)start, size, prot, flags, -1, 0);
1069     if (buf == MAP_FAILED) {
1070         return NULL;
1071     }
1072 
1073 #ifdef __mips__
1074     if (cross_256mb(buf, size)) {
1075         /* Try again, with the original still mapped, to avoid re-acquiring
1076            that 256mb crossing.  This time don't specify an address.  */
1077         size_t size2;
1078         void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1079         switch ((int)(buf2 != MAP_FAILED)) {
1080         case 1:
1081             if (!cross_256mb(buf2, size)) {
1082                 /* Success!  Use the new buffer.  */
1083                 munmap(buf, size);
1084                 break;
1085             }
1086             /* Failure.  Work with what we had.  */
1087             munmap(buf2, size);
1088             /* fallthru */
1089         default:
1090             /* Split the original buffer.  Free the smaller half.  */
1091             buf2 = split_cross_256mb(buf, size);
1092             size2 = tcg_ctx->code_gen_buffer_size;
1093             if (buf == buf2) {
1094                 munmap(buf + size2, size - size2);
1095             } else {
1096                 munmap(buf, size - size2);
1097             }
1098             size = size2;
1099             break;
1100         }
1101         buf = buf2;
1102     }
1103 #endif
1104 
1105     /* Request large pages for the buffer.  */
1106     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1107 
1108     return buf;
1109 }
1110 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1111 
1112 static inline void code_gen_alloc(size_t tb_size)
1113 {
1114     tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1115     tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1116     if (tcg_ctx->code_gen_buffer == NULL) {
1117         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1118         exit(1);
1119     }
1120 }
1121 
1122 static bool tb_cmp(const void *ap, const void *bp)
1123 {
1124     const TranslationBlock *a = ap;
1125     const TranslationBlock *b = bp;
1126 
1127     return a->pc == b->pc &&
1128         a->cs_base == b->cs_base &&
1129         a->flags == b->flags &&
1130         (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1131         a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1132         a->page_addr[0] == b->page_addr[0] &&
1133         a->page_addr[1] == b->page_addr[1];
1134 }
1135 
1136 static void tb_htable_init(void)
1137 {
1138     unsigned int mode = QHT_MODE_AUTO_RESIZE;
1139 
1140     qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1141 }
1142 
1143 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1144    (in bytes) allocated to the translation buffer. Zero means default
1145    size. */
1146 void tcg_exec_init(unsigned long tb_size)
1147 {
1148     tcg_allowed = true;
1149     cpu_gen_init();
1150     page_init();
1151     tb_htable_init();
1152     code_gen_alloc(tb_size);
1153 #if defined(CONFIG_SOFTMMU)
1154     /* There's no guest base to take into account, so go ahead and
1155        initialize the prologue now.  */
1156     tcg_prologue_init(tcg_ctx);
1157 #endif
1158 }
1159 
1160 /*
1161  * Allocate a new translation block. Flush the translation buffer if
1162  * too many translation blocks or too much generated code.
1163  */
1164 static TranslationBlock *tb_alloc(target_ulong pc)
1165 {
1166     TranslationBlock *tb;
1167 
1168     assert_memory_lock();
1169 
1170     tb = tcg_tb_alloc(tcg_ctx);
1171     if (unlikely(tb == NULL)) {
1172         return NULL;
1173     }
1174     return tb;
1175 }
1176 
1177 /* call with @p->lock held */
1178 static inline void invalidate_page_bitmap(PageDesc *p)
1179 {
1180     assert_page_locked(p);
1181 #ifdef CONFIG_SOFTMMU
1182     g_free(p->code_bitmap);
1183     p->code_bitmap = NULL;
1184     p->code_write_count = 0;
1185 #endif
1186 }
1187 
1188 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
1189 static void page_flush_tb_1(int level, void **lp)
1190 {
1191     int i;
1192 
1193     if (*lp == NULL) {
1194         return;
1195     }
1196     if (level == 0) {
1197         PageDesc *pd = *lp;
1198 
1199         for (i = 0; i < V_L2_SIZE; ++i) {
1200             page_lock(&pd[i]);
1201             pd[i].first_tb = (uintptr_t)NULL;
1202             invalidate_page_bitmap(pd + i);
1203             page_unlock(&pd[i]);
1204         }
1205     } else {
1206         void **pp = *lp;
1207 
1208         for (i = 0; i < V_L2_SIZE; ++i) {
1209             page_flush_tb_1(level - 1, pp + i);
1210         }
1211     }
1212 }
1213 
1214 static void page_flush_tb(void)
1215 {
1216     int i, l1_sz = v_l1_size;
1217 
1218     for (i = 0; i < l1_sz; i++) {
1219         page_flush_tb_1(v_l2_levels, l1_map + i);
1220     }
1221 }
1222 
1223 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1224 {
1225     const TranslationBlock *tb = value;
1226     size_t *size = data;
1227 
1228     *size += tb->tc.size;
1229     return false;
1230 }
1231 
1232 /* flush all the translation blocks */
1233 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1234 {
1235     mmap_lock();
1236     /* If it is already been done on request of another CPU,
1237      * just retry.
1238      */
1239     if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1240         goto done;
1241     }
1242 
1243     if (DEBUG_TB_FLUSH_GATE) {
1244         size_t nb_tbs = tcg_nb_tbs();
1245         size_t host_size = 0;
1246 
1247         tcg_tb_foreach(tb_host_size_iter, &host_size);
1248         printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1249                tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1250     }
1251 
1252     CPU_FOREACH(cpu) {
1253         cpu_tb_jmp_cache_clear(cpu);
1254     }
1255 
1256     qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1257     page_flush_tb();
1258 
1259     tcg_region_reset_all();
1260     /* XXX: flush processor icache at this point if cache flush is
1261        expensive */
1262     atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1263 
1264 done:
1265     mmap_unlock();
1266 }
1267 
1268 void tb_flush(CPUState *cpu)
1269 {
1270     if (tcg_enabled()) {
1271         unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
1272         async_safe_run_on_cpu(cpu, do_tb_flush,
1273                               RUN_ON_CPU_HOST_INT(tb_flush_count));
1274     }
1275 }
1276 
1277 /*
1278  * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1279  * so in order to prevent bit rot we compile them unconditionally in user-mode,
1280  * and let the optimizer get rid of them by wrapping their user-only callers
1281  * with if (DEBUG_TB_CHECK_GATE).
1282  */
1283 #ifdef CONFIG_USER_ONLY
1284 
1285 static void
1286 do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
1287 {
1288     TranslationBlock *tb = p;
1289     target_ulong addr = *(target_ulong *)userp;
1290 
1291     if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1292         printf("ERROR invalidate: address=" TARGET_FMT_lx
1293                " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1294     }
1295 }
1296 
1297 /* verify that all the pages have correct rights for code
1298  *
1299  * Called with mmap_lock held.
1300  */
1301 static void tb_invalidate_check(target_ulong address)
1302 {
1303     address &= TARGET_PAGE_MASK;
1304     qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1305 }
1306 
1307 static void
1308 do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
1309 {
1310     TranslationBlock *tb = p;
1311     int flags1, flags2;
1312 
1313     flags1 = page_get_flags(tb->pc);
1314     flags2 = page_get_flags(tb->pc + tb->size - 1);
1315     if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1316         printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1317                (long)tb->pc, tb->size, flags1, flags2);
1318     }
1319 }
1320 
1321 /* verify that all the pages have correct rights for code */
1322 static void tb_page_check(void)
1323 {
1324     qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1325 }
1326 
1327 #endif /* CONFIG_USER_ONLY */
1328 
1329 /*
1330  * user-mode: call with mmap_lock held
1331  * !user-mode: call with @pd->lock held
1332  */
1333 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1334 {
1335     TranslationBlock *tb1;
1336     uintptr_t *pprev;
1337     unsigned int n1;
1338 
1339     assert_page_locked(pd);
1340     pprev = &pd->first_tb;
1341     PAGE_FOR_EACH_TB(pd, tb1, n1) {
1342         if (tb1 == tb) {
1343             *pprev = tb1->page_next[n1];
1344             return;
1345         }
1346         pprev = &tb1->page_next[n1];
1347     }
1348     g_assert_not_reached();
1349 }
1350 
1351 /* remove @orig from its @n_orig-th jump list */
1352 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1353 {
1354     uintptr_t ptr, ptr_locked;
1355     TranslationBlock *dest;
1356     TranslationBlock *tb;
1357     uintptr_t *pprev;
1358     int n;
1359 
1360     /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1361     ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1362     dest = (TranslationBlock *)(ptr & ~1);
1363     if (dest == NULL) {
1364         return;
1365     }
1366 
1367     qemu_spin_lock(&dest->jmp_lock);
1368     /*
1369      * While acquiring the lock, the jump might have been removed if the
1370      * destination TB was invalidated; check again.
1371      */
1372     ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
1373     if (ptr_locked != ptr) {
1374         qemu_spin_unlock(&dest->jmp_lock);
1375         /*
1376          * The only possibility is that the jump was unlinked via
1377          * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1378          * because we set the LSB above.
1379          */
1380         g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1381         return;
1382     }
1383     /*
1384      * We first acquired the lock, and since the destination pointer matches,
1385      * we know for sure that @orig is in the jmp list.
1386      */
1387     pprev = &dest->jmp_list_head;
1388     TB_FOR_EACH_JMP(dest, tb, n) {
1389         if (tb == orig && n == n_orig) {
1390             *pprev = tb->jmp_list_next[n];
1391             /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1392             qemu_spin_unlock(&dest->jmp_lock);
1393             return;
1394         }
1395         pprev = &tb->jmp_list_next[n];
1396     }
1397     g_assert_not_reached();
1398 }
1399 
1400 /* reset the jump entry 'n' of a TB so that it is not chained to
1401    another TB */
1402 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1403 {
1404     uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1405     tb_set_jmp_target(tb, n, addr);
1406 }
1407 
1408 /* remove any jumps to the TB */
1409 static inline void tb_jmp_unlink(TranslationBlock *dest)
1410 {
1411     TranslationBlock *tb;
1412     int n;
1413 
1414     qemu_spin_lock(&dest->jmp_lock);
1415 
1416     TB_FOR_EACH_JMP(dest, tb, n) {
1417         tb_reset_jump(tb, n);
1418         atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1419         /* No need to clear the list entry; setting the dest ptr is enough */
1420     }
1421     dest->jmp_list_head = (uintptr_t)NULL;
1422 
1423     qemu_spin_unlock(&dest->jmp_lock);
1424 }
1425 
1426 /*
1427  * In user-mode, call with mmap_lock held.
1428  * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1429  * locks held.
1430  */
1431 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1432 {
1433     CPUState *cpu;
1434     PageDesc *p;
1435     uint32_t h;
1436     tb_page_addr_t phys_pc;
1437 
1438     assert_memory_lock();
1439 
1440     /* make sure no further incoming jumps will be chained to this TB */
1441     qemu_spin_lock(&tb->jmp_lock);
1442     atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1443     qemu_spin_unlock(&tb->jmp_lock);
1444 
1445     /* remove the TB from the hash list */
1446     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1447     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
1448                      tb->trace_vcpu_dstate);
1449     if (!qht_remove(&tb_ctx.htable, tb, h)) {
1450         return;
1451     }
1452 
1453     /* remove the TB from the page list */
1454     if (rm_from_page_list) {
1455         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1456         tb_page_remove(p, tb);
1457         invalidate_page_bitmap(p);
1458         if (tb->page_addr[1] != -1) {
1459             p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1460             tb_page_remove(p, tb);
1461             invalidate_page_bitmap(p);
1462         }
1463     }
1464 
1465     /* remove the TB from the hash list */
1466     h = tb_jmp_cache_hash_func(tb->pc);
1467     CPU_FOREACH(cpu) {
1468         if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1469             atomic_set(&cpu->tb_jmp_cache[h], NULL);
1470         }
1471     }
1472 
1473     /* suppress this TB from the two jump lists */
1474     tb_remove_from_jmp_list(tb, 0);
1475     tb_remove_from_jmp_list(tb, 1);
1476 
1477     /* suppress any remaining jumps to this TB */
1478     tb_jmp_unlink(tb);
1479 
1480     atomic_set(&tcg_ctx->tb_phys_invalidate_count,
1481                tcg_ctx->tb_phys_invalidate_count + 1);
1482 }
1483 
1484 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1485 {
1486     do_tb_phys_invalidate(tb, true);
1487 }
1488 
1489 /* invalidate one TB
1490  *
1491  * Called with mmap_lock held in user-mode.
1492  */
1493 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1494 {
1495     if (page_addr == -1) {
1496         page_lock_tb(tb);
1497         do_tb_phys_invalidate(tb, true);
1498         page_unlock_tb(tb);
1499     } else {
1500         do_tb_phys_invalidate(tb, false);
1501     }
1502 }
1503 
1504 #ifdef CONFIG_SOFTMMU
1505 /* call with @p->lock held */
1506 static void build_page_bitmap(PageDesc *p)
1507 {
1508     int n, tb_start, tb_end;
1509     TranslationBlock *tb;
1510 
1511     assert_page_locked(p);
1512     p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1513 
1514     PAGE_FOR_EACH_TB(p, tb, n) {
1515         /* NOTE: this is subtle as a TB may span two physical pages */
1516         if (n == 0) {
1517             /* NOTE: tb_end may be after the end of the page, but
1518                it is not a problem */
1519             tb_start = tb->pc & ~TARGET_PAGE_MASK;
1520             tb_end = tb_start + tb->size;
1521             if (tb_end > TARGET_PAGE_SIZE) {
1522                 tb_end = TARGET_PAGE_SIZE;
1523              }
1524         } else {
1525             tb_start = 0;
1526             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1527         }
1528         bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1529     }
1530 }
1531 #endif
1532 
1533 /* add the tb in the target page and protect it if necessary
1534  *
1535  * Called with mmap_lock held for user-mode emulation.
1536  * Called with @p->lock held in !user-mode.
1537  */
1538 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1539                                unsigned int n, tb_page_addr_t page_addr)
1540 {
1541 #ifndef CONFIG_USER_ONLY
1542     bool page_already_protected;
1543 #endif
1544 
1545     assert_page_locked(p);
1546 
1547     tb->page_addr[n] = page_addr;
1548     tb->page_next[n] = p->first_tb;
1549 #ifndef CONFIG_USER_ONLY
1550     page_already_protected = p->first_tb != (uintptr_t)NULL;
1551 #endif
1552     p->first_tb = (uintptr_t)tb | n;
1553     invalidate_page_bitmap(p);
1554 
1555 #if defined(CONFIG_USER_ONLY)
1556     if (p->flags & PAGE_WRITE) {
1557         target_ulong addr;
1558         PageDesc *p2;
1559         int prot;
1560 
1561         /* force the host page as non writable (writes will have a
1562            page fault + mprotect overhead) */
1563         page_addr &= qemu_host_page_mask;
1564         prot = 0;
1565         for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1566             addr += TARGET_PAGE_SIZE) {
1567 
1568             p2 = page_find(addr >> TARGET_PAGE_BITS);
1569             if (!p2) {
1570                 continue;
1571             }
1572             prot |= p2->flags;
1573             p2->flags &= ~PAGE_WRITE;
1574           }
1575         mprotect(g2h(page_addr), qemu_host_page_size,
1576                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1577         if (DEBUG_TB_INVALIDATE_GATE) {
1578             printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1579         }
1580     }
1581 #else
1582     /* if some code is already present, then the pages are already
1583        protected. So we handle the case where only the first TB is
1584        allocated in a physical page */
1585     if (!page_already_protected) {
1586         tlb_protect_code(page_addr);
1587     }
1588 #endif
1589 }
1590 
1591 /* add a new TB and link it to the physical page tables. phys_page2 is
1592  * (-1) to indicate that only one page contains the TB.
1593  *
1594  * Called with mmap_lock held for user-mode emulation.
1595  *
1596  * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1597  * Note that in !user-mode, another thread might have already added a TB
1598  * for the same block of guest code that @tb corresponds to. In that case,
1599  * the caller should discard the original @tb, and use instead the returned TB.
1600  */
1601 static TranslationBlock *
1602 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1603              tb_page_addr_t phys_page2)
1604 {
1605     PageDesc *p;
1606     PageDesc *p2 = NULL;
1607     void *existing_tb = NULL;
1608     uint32_t h;
1609 
1610     assert_memory_lock();
1611 
1612     /*
1613      * Add the TB to the page list, acquiring first the pages's locks.
1614      * We keep the locks held until after inserting the TB in the hash table,
1615      * so that if the insertion fails we know for sure that the TBs are still
1616      * in the page descriptors.
1617      * Note that inserting into the hash table first isn't an option, since
1618      * we can only insert TBs that are fully initialized.
1619      */
1620     page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1621     tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1622     if (p2) {
1623         tb_page_add(p2, tb, 1, phys_page2);
1624     } else {
1625         tb->page_addr[1] = -1;
1626     }
1627 
1628     /* add in the hash table */
1629     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1630                      tb->trace_vcpu_dstate);
1631     qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1632 
1633     /* remove TB from the page(s) if we couldn't insert it */
1634     if (unlikely(existing_tb)) {
1635         tb_page_remove(p, tb);
1636         invalidate_page_bitmap(p);
1637         if (p2) {
1638             tb_page_remove(p2, tb);
1639             invalidate_page_bitmap(p2);
1640         }
1641         tb = existing_tb;
1642     }
1643 
1644     if (p2 && p2 != p) {
1645         page_unlock(p2);
1646     }
1647     page_unlock(p);
1648 
1649 #ifdef CONFIG_USER_ONLY
1650     if (DEBUG_TB_CHECK_GATE) {
1651         tb_page_check();
1652     }
1653 #endif
1654     return tb;
1655 }
1656 
1657 /* Called with mmap_lock held for user mode emulation.  */
1658 TranslationBlock *tb_gen_code(CPUState *cpu,
1659                               target_ulong pc, target_ulong cs_base,
1660                               uint32_t flags, int cflags)
1661 {
1662     CPUArchState *env = cpu->env_ptr;
1663     TranslationBlock *tb, *existing_tb;
1664     tb_page_addr_t phys_pc, phys_page2;
1665     target_ulong virt_page2;
1666     tcg_insn_unit *gen_code_buf;
1667     int gen_code_size, search_size;
1668 #ifdef CONFIG_PROFILER
1669     TCGProfile *prof = &tcg_ctx->prof;
1670     int64_t ti;
1671 #endif
1672     assert_memory_lock();
1673 
1674     phys_pc = get_page_addr_code(env, pc);
1675 
1676  buffer_overflow:
1677     tb = tb_alloc(pc);
1678     if (unlikely(!tb)) {
1679         /* flush must be done */
1680         tb_flush(cpu);
1681         mmap_unlock();
1682         /* Make the execution loop process the flush as soon as possible.  */
1683         cpu->exception_index = EXCP_INTERRUPT;
1684         cpu_loop_exit(cpu);
1685     }
1686 
1687     gen_code_buf = tcg_ctx->code_gen_ptr;
1688     tb->tc.ptr = gen_code_buf;
1689     tb->pc = pc;
1690     tb->cs_base = cs_base;
1691     tb->flags = flags;
1692     tb->cflags = cflags;
1693     tb->trace_vcpu_dstate = *cpu->trace_dstate;
1694     tcg_ctx->tb_cflags = cflags;
1695 
1696 #ifdef CONFIG_PROFILER
1697     /* includes aborted translations because of exceptions */
1698     atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1699     ti = profile_getclock();
1700 #endif
1701 
1702     tcg_func_start(tcg_ctx);
1703 
1704     tcg_ctx->cpu = ENV_GET_CPU(env);
1705     gen_intermediate_code(cpu, tb);
1706     tcg_ctx->cpu = NULL;
1707 
1708     trace_translate_block(tb, tb->pc, tb->tc.ptr);
1709 
1710     /* generate machine code */
1711     tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1712     tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1713     tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1714     if (TCG_TARGET_HAS_direct_jump) {
1715         tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1716         tcg_ctx->tb_jmp_target_addr = NULL;
1717     } else {
1718         tcg_ctx->tb_jmp_insn_offset = NULL;
1719         tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1720     }
1721 
1722 #ifdef CONFIG_PROFILER
1723     atomic_set(&prof->tb_count, prof->tb_count + 1);
1724     atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1725     ti = profile_getclock();
1726 #endif
1727 
1728     /* ??? Overflow could be handled better here.  In particular, we
1729        don't need to re-do gen_intermediate_code, nor should we re-do
1730        the tcg optimization currently hidden inside tcg_gen_code.  All
1731        that should be required is to flush the TBs, allocate a new TB,
1732        re-initialize it per above, and re-do the actual code generation.  */
1733     gen_code_size = tcg_gen_code(tcg_ctx, tb);
1734     if (unlikely(gen_code_size < 0)) {
1735         goto buffer_overflow;
1736     }
1737     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1738     if (unlikely(search_size < 0)) {
1739         goto buffer_overflow;
1740     }
1741     tb->tc.size = gen_code_size;
1742 
1743 #ifdef CONFIG_PROFILER
1744     atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1745     atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1746     atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1747     atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1748 #endif
1749 
1750 #ifdef DEBUG_DISAS
1751     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1752         qemu_log_in_addr_range(tb->pc)) {
1753         qemu_log_lock();
1754         qemu_log("OUT: [size=%d]\n", gen_code_size);
1755         if (tcg_ctx->data_gen_ptr) {
1756             size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1757             size_t data_size = gen_code_size - code_size;
1758             size_t i;
1759 
1760             log_disas(tb->tc.ptr, code_size);
1761 
1762             for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1763                 if (sizeof(tcg_target_ulong) == 8) {
1764                     qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1765                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1766                              *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1767                 } else {
1768                     qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
1769                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1770                              *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1771                 }
1772             }
1773         } else {
1774             log_disas(tb->tc.ptr, gen_code_size);
1775         }
1776         qemu_log("\n");
1777         qemu_log_flush();
1778         qemu_log_unlock();
1779     }
1780 #endif
1781 
1782     atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1783         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1784                  CODE_GEN_ALIGN));
1785 
1786     /* init jump list */
1787     qemu_spin_init(&tb->jmp_lock);
1788     tb->jmp_list_head = (uintptr_t)NULL;
1789     tb->jmp_list_next[0] = (uintptr_t)NULL;
1790     tb->jmp_list_next[1] = (uintptr_t)NULL;
1791     tb->jmp_dest[0] = (uintptr_t)NULL;
1792     tb->jmp_dest[1] = (uintptr_t)NULL;
1793 
1794     /* init original jump addresses wich has been set during tcg_gen_code() */
1795     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1796         tb_reset_jump(tb, 0);
1797     }
1798     if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1799         tb_reset_jump(tb, 1);
1800     }
1801 
1802     /* check next page if needed */
1803     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1804     phys_page2 = -1;
1805     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1806         phys_page2 = get_page_addr_code(env, virt_page2);
1807     }
1808     /*
1809      * No explicit memory barrier is required -- tb_link_page() makes the
1810      * TB visible in a consistent state.
1811      */
1812     existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1813     /* if the TB already exists, discard what we just translated */
1814     if (unlikely(existing_tb != tb)) {
1815         uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1816 
1817         orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1818         atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1819         return existing_tb;
1820     }
1821     tcg_tb_insert(tb);
1822     return tb;
1823 }
1824 
1825 /*
1826  * @p must be non-NULL.
1827  * user-mode: call with mmap_lock held.
1828  * !user-mode: call with all @pages locked.
1829  */
1830 static void
1831 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1832                                       PageDesc *p, tb_page_addr_t start,
1833                                       tb_page_addr_t end,
1834                                       int is_cpu_write_access)
1835 {
1836     TranslationBlock *tb;
1837     tb_page_addr_t tb_start, tb_end;
1838     int n;
1839 #ifdef TARGET_HAS_PRECISE_SMC
1840     CPUState *cpu = current_cpu;
1841     CPUArchState *env = NULL;
1842     int current_tb_not_found = is_cpu_write_access;
1843     TranslationBlock *current_tb = NULL;
1844     int current_tb_modified = 0;
1845     target_ulong current_pc = 0;
1846     target_ulong current_cs_base = 0;
1847     uint32_t current_flags = 0;
1848 #endif /* TARGET_HAS_PRECISE_SMC */
1849 
1850     assert_page_locked(p);
1851 
1852 #if defined(TARGET_HAS_PRECISE_SMC)
1853     if (cpu != NULL) {
1854         env = cpu->env_ptr;
1855     }
1856 #endif
1857 
1858     /* we remove all the TBs in the range [start, end[ */
1859     /* XXX: see if in some cases it could be faster to invalidate all
1860        the code */
1861     PAGE_FOR_EACH_TB(p, tb, n) {
1862         assert_page_locked(p);
1863         /* NOTE: this is subtle as a TB may span two physical pages */
1864         if (n == 0) {
1865             /* NOTE: tb_end may be after the end of the page, but
1866                it is not a problem */
1867             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1868             tb_end = tb_start + tb->size;
1869         } else {
1870             tb_start = tb->page_addr[1];
1871             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1872         }
1873         if (!(tb_end <= start || tb_start >= end)) {
1874 #ifdef TARGET_HAS_PRECISE_SMC
1875             if (current_tb_not_found) {
1876                 current_tb_not_found = 0;
1877                 current_tb = NULL;
1878                 if (cpu->mem_io_pc) {
1879                     /* now we have a real cpu fault */
1880                     current_tb = tcg_tb_lookup(cpu->mem_io_pc);
1881                 }
1882             }
1883             if (current_tb == tb &&
1884                 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1885                 /* If we are modifying the current TB, we must stop
1886                 its execution. We could be more precise by checking
1887                 that the modification is after the current PC, but it
1888                 would require a specialized function to partially
1889                 restore the CPU state */
1890 
1891                 current_tb_modified = 1;
1892                 cpu_restore_state_from_tb(cpu, current_tb,
1893                                           cpu->mem_io_pc, true);
1894                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1895                                      &current_flags);
1896             }
1897 #endif /* TARGET_HAS_PRECISE_SMC */
1898             tb_phys_invalidate__locked(tb);
1899         }
1900     }
1901 #if !defined(CONFIG_USER_ONLY)
1902     /* if no code remaining, no need to continue to use slow writes */
1903     if (!p->first_tb) {
1904         invalidate_page_bitmap(p);
1905         tlb_unprotect_code(start);
1906     }
1907 #endif
1908 #ifdef TARGET_HAS_PRECISE_SMC
1909     if (current_tb_modified) {
1910         page_collection_unlock(pages);
1911         /* Force execution of one insn next time.  */
1912         cpu->cflags_next_tb = 1 | curr_cflags();
1913         mmap_unlock();
1914         cpu_loop_exit_noexc(cpu);
1915     }
1916 #endif
1917 }
1918 
1919 /*
1920  * Invalidate all TBs which intersect with the target physical address range
1921  * [start;end[. NOTE: start and end must refer to the *same* physical page.
1922  * 'is_cpu_write_access' should be true if called from a real cpu write
1923  * access: the virtual CPU will exit the current TB if code is modified inside
1924  * this TB.
1925  *
1926  * Called with mmap_lock held for user-mode emulation
1927  */
1928 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1929                                    int is_cpu_write_access)
1930 {
1931     struct page_collection *pages;
1932     PageDesc *p;
1933 
1934     assert_memory_lock();
1935 
1936     p = page_find(start >> TARGET_PAGE_BITS);
1937     if (p == NULL) {
1938         return;
1939     }
1940     pages = page_collection_lock(start, end);
1941     tb_invalidate_phys_page_range__locked(pages, p, start, end,
1942                                           is_cpu_write_access);
1943     page_collection_unlock(pages);
1944 }
1945 
1946 /*
1947  * Invalidate all TBs which intersect with the target physical address range
1948  * [start;end[. NOTE: start and end may refer to *different* physical pages.
1949  * 'is_cpu_write_access' should be true if called from a real cpu write
1950  * access: the virtual CPU will exit the current TB if code is modified inside
1951  * this TB.
1952  *
1953  * Called with mmap_lock held for user-mode emulation.
1954  */
1955 #ifdef CONFIG_SOFTMMU
1956 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
1957 #else
1958 void tb_invalidate_phys_range(target_ulong start, target_ulong end)
1959 #endif
1960 {
1961     struct page_collection *pages;
1962     tb_page_addr_t next;
1963 
1964     assert_memory_lock();
1965 
1966     pages = page_collection_lock(start, end);
1967     for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1968          start < end;
1969          start = next, next += TARGET_PAGE_SIZE) {
1970         PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
1971         tb_page_addr_t bound = MIN(next, end);
1972 
1973         if (pd == NULL) {
1974             continue;
1975         }
1976         tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
1977     }
1978     page_collection_unlock(pages);
1979 }
1980 
1981 #ifdef CONFIG_SOFTMMU
1982 /* len must be <= 8 and start must be a multiple of len.
1983  * Called via softmmu_template.h when code areas are written to with
1984  * iothread mutex not held.
1985  *
1986  * Call with all @pages in the range [@start, @start + len[ locked.
1987  */
1988 void tb_invalidate_phys_page_fast(struct page_collection *pages,
1989                                   tb_page_addr_t start, int len)
1990 {
1991     PageDesc *p;
1992 
1993 #if 0
1994     if (1) {
1995         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1996                   cpu_single_env->mem_io_vaddr, len,
1997                   cpu_single_env->eip,
1998                   cpu_single_env->eip +
1999                   (intptr_t)cpu_single_env->segs[R_CS].base);
2000     }
2001 #endif
2002     assert_memory_lock();
2003 
2004     p = page_find(start >> TARGET_PAGE_BITS);
2005     if (!p) {
2006         return;
2007     }
2008 
2009     assert_page_locked(p);
2010     if (!p->code_bitmap &&
2011         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2012         build_page_bitmap(p);
2013     }
2014     if (p->code_bitmap) {
2015         unsigned int nr;
2016         unsigned long b;
2017 
2018         nr = start & ~TARGET_PAGE_MASK;
2019         b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2020         if (b & ((1 << len) - 1)) {
2021             goto do_invalidate;
2022         }
2023     } else {
2024     do_invalidate:
2025         tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 1);
2026     }
2027 }
2028 #else
2029 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2030  * host PC of the faulting store instruction that caused this invalidate.
2031  * Returns true if the caller needs to abort execution of the current
2032  * TB (because it was modified by this store and the guest CPU has
2033  * precise-SMC semantics).
2034  */
2035 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2036 {
2037     TranslationBlock *tb;
2038     PageDesc *p;
2039     int n;
2040 #ifdef TARGET_HAS_PRECISE_SMC
2041     TranslationBlock *current_tb = NULL;
2042     CPUState *cpu = current_cpu;
2043     CPUArchState *env = NULL;
2044     int current_tb_modified = 0;
2045     target_ulong current_pc = 0;
2046     target_ulong current_cs_base = 0;
2047     uint32_t current_flags = 0;
2048 #endif
2049 
2050     assert_memory_lock();
2051 
2052     addr &= TARGET_PAGE_MASK;
2053     p = page_find(addr >> TARGET_PAGE_BITS);
2054     if (!p) {
2055         return false;
2056     }
2057 
2058 #ifdef TARGET_HAS_PRECISE_SMC
2059     if (p->first_tb && pc != 0) {
2060         current_tb = tcg_tb_lookup(pc);
2061     }
2062     if (cpu != NULL) {
2063         env = cpu->env_ptr;
2064     }
2065 #endif
2066     assert_page_locked(p);
2067     PAGE_FOR_EACH_TB(p, tb, n) {
2068 #ifdef TARGET_HAS_PRECISE_SMC
2069         if (current_tb == tb &&
2070             (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2071                 /* If we are modifying the current TB, we must stop
2072                    its execution. We could be more precise by checking
2073                    that the modification is after the current PC, but it
2074                    would require a specialized function to partially
2075                    restore the CPU state */
2076 
2077             current_tb_modified = 1;
2078             cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2079             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2080                                  &current_flags);
2081         }
2082 #endif /* TARGET_HAS_PRECISE_SMC */
2083         tb_phys_invalidate(tb, addr);
2084     }
2085     p->first_tb = (uintptr_t)NULL;
2086 #ifdef TARGET_HAS_PRECISE_SMC
2087     if (current_tb_modified) {
2088         /* Force execution of one insn next time.  */
2089         cpu->cflags_next_tb = 1 | curr_cflags();
2090         return true;
2091     }
2092 #endif
2093 
2094     return false;
2095 }
2096 #endif
2097 
2098 /* user-mode: call with mmap_lock held */
2099 void tb_check_watchpoint(CPUState *cpu)
2100 {
2101     TranslationBlock *tb;
2102 
2103     assert_memory_lock();
2104 
2105     tb = tcg_tb_lookup(cpu->mem_io_pc);
2106     if (tb) {
2107         /* We can use retranslation to find the PC.  */
2108         cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true);
2109         tb_phys_invalidate(tb, -1);
2110     } else {
2111         /* The exception probably happened in a helper.  The CPU state should
2112            have been saved before calling it. Fetch the PC from there.  */
2113         CPUArchState *env = cpu->env_ptr;
2114         target_ulong pc, cs_base;
2115         tb_page_addr_t addr;
2116         uint32_t flags;
2117 
2118         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2119         addr = get_page_addr_code(env, pc);
2120         tb_invalidate_phys_range(addr, addr + 1);
2121     }
2122 }
2123 
2124 #ifndef CONFIG_USER_ONLY
2125 /* in deterministic execution mode, instructions doing device I/Os
2126  * must be at the end of the TB.
2127  *
2128  * Called by softmmu_template.h, with iothread mutex not held.
2129  */
2130 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2131 {
2132 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2133     CPUArchState *env = cpu->env_ptr;
2134 #endif
2135     TranslationBlock *tb;
2136     uint32_t n;
2137 
2138     tb = tcg_tb_lookup(retaddr);
2139     if (!tb) {
2140         cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2141                   (void *)retaddr);
2142     }
2143     cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2144 
2145     /* On MIPS and SH, delay slot instructions can only be restarted if
2146        they were already the first instruction in the TB.  If this is not
2147        the first instruction in a TB then re-execute the preceding
2148        branch.  */
2149     n = 1;
2150 #if defined(TARGET_MIPS)
2151     if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2152         && env->active_tc.PC != tb->pc) {
2153         env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2154         cpu->icount_decr.u16.low++;
2155         env->hflags &= ~MIPS_HFLAG_BMASK;
2156         n = 2;
2157     }
2158 #elif defined(TARGET_SH4)
2159     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2160         && env->pc != tb->pc) {
2161         env->pc -= 2;
2162         cpu->icount_decr.u16.low++;
2163         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2164         n = 2;
2165     }
2166 #endif
2167 
2168     /* Generate a new TB executing the I/O insn.  */
2169     cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2170 
2171     if (tb_cflags(tb) & CF_NOCACHE) {
2172         if (tb->orig_tb) {
2173             /* Invalidate original TB if this TB was generated in
2174              * cpu_exec_nocache() */
2175             tb_phys_invalidate(tb->orig_tb, -1);
2176         }
2177         tcg_tb_remove(tb);
2178     }
2179 
2180     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2181      * the first in the TB) then we end up generating a whole new TB and
2182      *  repeating the fault, which is horribly inefficient.
2183      *  Better would be to execute just this insn uncached, or generate a
2184      *  second new TB.
2185      */
2186     cpu_loop_exit_noexc(cpu);
2187 }
2188 
2189 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
2190 {
2191     unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
2192 
2193     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2194         atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2195     }
2196 }
2197 
2198 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2199 {
2200     /* Discard jump cache entries for any tb which might potentially
2201        overlap the flushed page.  */
2202     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2203     tb_jmp_cache_clear_page(cpu, addr);
2204 }
2205 
2206 static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
2207                                  struct qht_stats hst)
2208 {
2209     uint32_t hgram_opts;
2210     size_t hgram_bins;
2211     char *hgram;
2212 
2213     if (!hst.head_buckets) {
2214         return;
2215     }
2216     cpu_fprintf(f, "TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
2217                 hst.used_head_buckets, hst.head_buckets,
2218                 (double)hst.used_head_buckets / hst.head_buckets * 100);
2219 
2220     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
2221     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
2222     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2223         hgram_opts |= QDIST_PR_NODECIMAL;
2224     }
2225     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2226     cpu_fprintf(f, "TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
2227                 qdist_avg(&hst.occupancy) * 100, hgram);
2228     g_free(hgram);
2229 
2230     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2231     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2232     if (hgram_bins > 10) {
2233         hgram_bins = 10;
2234     } else {
2235         hgram_bins = 0;
2236         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2237     }
2238     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2239     cpu_fprintf(f, "TB hash avg chain   %0.3f buckets. Histogram: %s\n",
2240                 qdist_avg(&hst.chain), hgram);
2241     g_free(hgram);
2242 }
2243 
2244 struct tb_tree_stats {
2245     size_t nb_tbs;
2246     size_t host_size;
2247     size_t target_size;
2248     size_t max_target_size;
2249     size_t direct_jmp_count;
2250     size_t direct_jmp2_count;
2251     size_t cross_page;
2252 };
2253 
2254 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2255 {
2256     const TranslationBlock *tb = value;
2257     struct tb_tree_stats *tst = data;
2258 
2259     tst->nb_tbs++;
2260     tst->host_size += tb->tc.size;
2261     tst->target_size += tb->size;
2262     if (tb->size > tst->max_target_size) {
2263         tst->max_target_size = tb->size;
2264     }
2265     if (tb->page_addr[1] != -1) {
2266         tst->cross_page++;
2267     }
2268     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2269         tst->direct_jmp_count++;
2270         if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2271             tst->direct_jmp2_count++;
2272         }
2273     }
2274     return false;
2275 }
2276 
2277 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
2278 {
2279     struct tb_tree_stats tst = {};
2280     struct qht_stats hst;
2281     size_t nb_tbs;
2282 
2283     tcg_tb_foreach(tb_tree_stats_iter, &tst);
2284     nb_tbs = tst.nb_tbs;
2285     /* XXX: avoid using doubles ? */
2286     cpu_fprintf(f, "Translation buffer state:\n");
2287     /*
2288      * Report total code size including the padding and TB structs;
2289      * otherwise users might think "-tb-size" is not honoured.
2290      * For avg host size we use the precise numbers from tb_tree_stats though.
2291      */
2292     cpu_fprintf(f, "gen code size       %zu/%zu\n",
2293                 tcg_code_size(), tcg_code_capacity());
2294     cpu_fprintf(f, "TB count            %zu\n", nb_tbs);
2295     cpu_fprintf(f, "TB avg target size  %zu max=%zu bytes\n",
2296                 nb_tbs ? tst.target_size / nb_tbs : 0,
2297                 tst.max_target_size);
2298     cpu_fprintf(f, "TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
2299                 nb_tbs ? tst.host_size / nb_tbs : 0,
2300                 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2301     cpu_fprintf(f, "cross page TB count %zu (%zu%%)\n", tst.cross_page,
2302             nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2303     cpu_fprintf(f, "direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2304                 tst.direct_jmp_count,
2305                 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2306                 tst.direct_jmp2_count,
2307                 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2308 
2309     qht_statistics_init(&tb_ctx.htable, &hst);
2310     print_qht_statistics(f, cpu_fprintf, hst);
2311     qht_statistics_destroy(&hst);
2312 
2313     cpu_fprintf(f, "\nStatistics:\n");
2314     cpu_fprintf(f, "TB flush count      %u\n",
2315                 atomic_read(&tb_ctx.tb_flush_count));
2316     cpu_fprintf(f, "TB invalidate count %zu\n", tcg_tb_phys_invalidate_count());
2317     cpu_fprintf(f, "TLB flush count     %zu\n", tlb_flush_count());
2318     tcg_dump_info(f, cpu_fprintf);
2319 }
2320 
2321 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
2322 {
2323     tcg_dump_op_count(f, cpu_fprintf);
2324 }
2325 
2326 #else /* CONFIG_USER_ONLY */
2327 
2328 void cpu_interrupt(CPUState *cpu, int mask)
2329 {
2330     g_assert(qemu_mutex_iothread_locked());
2331     cpu->interrupt_request |= mask;
2332     cpu->icount_decr.u16.high = -1;
2333 }
2334 
2335 /*
2336  * Walks guest process memory "regions" one by one
2337  * and calls callback function 'fn' for each region.
2338  */
2339 struct walk_memory_regions_data {
2340     walk_memory_regions_fn fn;
2341     void *priv;
2342     target_ulong start;
2343     int prot;
2344 };
2345 
2346 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2347                                    target_ulong end, int new_prot)
2348 {
2349     if (data->start != -1u) {
2350         int rc = data->fn(data->priv, data->start, end, data->prot);
2351         if (rc != 0) {
2352             return rc;
2353         }
2354     }
2355 
2356     data->start = (new_prot ? end : -1u);
2357     data->prot = new_prot;
2358 
2359     return 0;
2360 }
2361 
2362 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2363                                  target_ulong base, int level, void **lp)
2364 {
2365     target_ulong pa;
2366     int i, rc;
2367 
2368     if (*lp == NULL) {
2369         return walk_memory_regions_end(data, base, 0);
2370     }
2371 
2372     if (level == 0) {
2373         PageDesc *pd = *lp;
2374 
2375         for (i = 0; i < V_L2_SIZE; ++i) {
2376             int prot = pd[i].flags;
2377 
2378             pa = base | (i << TARGET_PAGE_BITS);
2379             if (prot != data->prot) {
2380                 rc = walk_memory_regions_end(data, pa, prot);
2381                 if (rc != 0) {
2382                     return rc;
2383                 }
2384             }
2385         }
2386     } else {
2387         void **pp = *lp;
2388 
2389         for (i = 0; i < V_L2_SIZE; ++i) {
2390             pa = base | ((target_ulong)i <<
2391                 (TARGET_PAGE_BITS + V_L2_BITS * level));
2392             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2393             if (rc != 0) {
2394                 return rc;
2395             }
2396         }
2397     }
2398 
2399     return 0;
2400 }
2401 
2402 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2403 {
2404     struct walk_memory_regions_data data;
2405     uintptr_t i, l1_sz = v_l1_size;
2406 
2407     data.fn = fn;
2408     data.priv = priv;
2409     data.start = -1u;
2410     data.prot = 0;
2411 
2412     for (i = 0; i < l1_sz; i++) {
2413         target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2414         int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2415         if (rc != 0) {
2416             return rc;
2417         }
2418     }
2419 
2420     return walk_memory_regions_end(&data, 0, 0);
2421 }
2422 
2423 static int dump_region(void *priv, target_ulong start,
2424     target_ulong end, unsigned long prot)
2425 {
2426     FILE *f = (FILE *)priv;
2427 
2428     (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2429         " "TARGET_FMT_lx" %c%c%c\n",
2430         start, end, end - start,
2431         ((prot & PAGE_READ) ? 'r' : '-'),
2432         ((prot & PAGE_WRITE) ? 'w' : '-'),
2433         ((prot & PAGE_EXEC) ? 'x' : '-'));
2434 
2435     return 0;
2436 }
2437 
2438 /* dump memory mappings */
2439 void page_dump(FILE *f)
2440 {
2441     const int length = sizeof(target_ulong) * 2;
2442     (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2443             length, "start", length, "end", length, "size", "prot");
2444     walk_memory_regions(f, dump_region);
2445 }
2446 
2447 int page_get_flags(target_ulong address)
2448 {
2449     PageDesc *p;
2450 
2451     p = page_find(address >> TARGET_PAGE_BITS);
2452     if (!p) {
2453         return 0;
2454     }
2455     return p->flags;
2456 }
2457 
2458 /* Modify the flags of a page and invalidate the code if necessary.
2459    The flag PAGE_WRITE_ORG is positioned automatically depending
2460    on PAGE_WRITE.  The mmap_lock should already be held.  */
2461 void page_set_flags(target_ulong start, target_ulong end, int flags)
2462 {
2463     target_ulong addr, len;
2464 
2465     /* This function should never be called with addresses outside the
2466        guest address space.  If this assert fires, it probably indicates
2467        a missing call to h2g_valid.  */
2468 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2469     assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2470 #endif
2471     assert(start < end);
2472     assert_memory_lock();
2473 
2474     start = start & TARGET_PAGE_MASK;
2475     end = TARGET_PAGE_ALIGN(end);
2476 
2477     if (flags & PAGE_WRITE) {
2478         flags |= PAGE_WRITE_ORG;
2479     }
2480 
2481     for (addr = start, len = end - start;
2482          len != 0;
2483          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2484         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2485 
2486         /* If the write protection bit is set, then we invalidate
2487            the code inside.  */
2488         if (!(p->flags & PAGE_WRITE) &&
2489             (flags & PAGE_WRITE) &&
2490             p->first_tb) {
2491             tb_invalidate_phys_page(addr, 0);
2492         }
2493         p->flags = flags;
2494     }
2495 }
2496 
2497 int page_check_range(target_ulong start, target_ulong len, int flags)
2498 {
2499     PageDesc *p;
2500     target_ulong end;
2501     target_ulong addr;
2502 
2503     /* This function should never be called with addresses outside the
2504        guest address space.  If this assert fires, it probably indicates
2505        a missing call to h2g_valid.  */
2506 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2507     assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2508 #endif
2509 
2510     if (len == 0) {
2511         return 0;
2512     }
2513     if (start + len - 1 < start) {
2514         /* We've wrapped around.  */
2515         return -1;
2516     }
2517 
2518     /* must do before we loose bits in the next step */
2519     end = TARGET_PAGE_ALIGN(start + len);
2520     start = start & TARGET_PAGE_MASK;
2521 
2522     for (addr = start, len = end - start;
2523          len != 0;
2524          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2525         p = page_find(addr >> TARGET_PAGE_BITS);
2526         if (!p) {
2527             return -1;
2528         }
2529         if (!(p->flags & PAGE_VALID)) {
2530             return -1;
2531         }
2532 
2533         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2534             return -1;
2535         }
2536         if (flags & PAGE_WRITE) {
2537             if (!(p->flags & PAGE_WRITE_ORG)) {
2538                 return -1;
2539             }
2540             /* unprotect the page if it was put read-only because it
2541                contains translated code */
2542             if (!(p->flags & PAGE_WRITE)) {
2543                 if (!page_unprotect(addr, 0)) {
2544                     return -1;
2545                 }
2546             }
2547         }
2548     }
2549     return 0;
2550 }
2551 
2552 /* called from signal handler: invalidate the code and unprotect the
2553  * page. Return 0 if the fault was not handled, 1 if it was handled,
2554  * and 2 if it was handled but the caller must cause the TB to be
2555  * immediately exited. (We can only return 2 if the 'pc' argument is
2556  * non-zero.)
2557  */
2558 int page_unprotect(target_ulong address, uintptr_t pc)
2559 {
2560     unsigned int prot;
2561     bool current_tb_invalidated;
2562     PageDesc *p;
2563     target_ulong host_start, host_end, addr;
2564 
2565     /* Technically this isn't safe inside a signal handler.  However we
2566        know this only ever happens in a synchronous SEGV handler, so in
2567        practice it seems to be ok.  */
2568     mmap_lock();
2569 
2570     p = page_find(address >> TARGET_PAGE_BITS);
2571     if (!p) {
2572         mmap_unlock();
2573         return 0;
2574     }
2575 
2576     /* if the page was really writable, then we change its
2577        protection back to writable */
2578     if (p->flags & PAGE_WRITE_ORG) {
2579         current_tb_invalidated = false;
2580         if (p->flags & PAGE_WRITE) {
2581             /* If the page is actually marked WRITE then assume this is because
2582              * this thread raced with another one which got here first and
2583              * set the page to PAGE_WRITE and did the TB invalidate for us.
2584              */
2585 #ifdef TARGET_HAS_PRECISE_SMC
2586             TranslationBlock *current_tb = tcg_tb_lookup(pc);
2587             if (current_tb) {
2588                 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2589             }
2590 #endif
2591         } else {
2592             host_start = address & qemu_host_page_mask;
2593             host_end = host_start + qemu_host_page_size;
2594 
2595             prot = 0;
2596             for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2597                 p = page_find(addr >> TARGET_PAGE_BITS);
2598                 p->flags |= PAGE_WRITE;
2599                 prot |= p->flags;
2600 
2601                 /* and since the content will be modified, we must invalidate
2602                    the corresponding translated code. */
2603                 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2604 #ifdef CONFIG_USER_ONLY
2605                 if (DEBUG_TB_CHECK_GATE) {
2606                     tb_invalidate_check(addr);
2607                 }
2608 #endif
2609             }
2610             mprotect((void *)g2h(host_start), qemu_host_page_size,
2611                      prot & PAGE_BITS);
2612         }
2613         mmap_unlock();
2614         /* If current TB was invalidated return to main loop */
2615         return current_tb_invalidated ? 2 : 1;
2616     }
2617     mmap_unlock();
2618     return 0;
2619 }
2620 #endif /* CONFIG_USER_ONLY */
2621 
2622 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
2623 void tcg_flush_softmmu_tlb(CPUState *cs)
2624 {
2625 #ifdef CONFIG_SOFTMMU
2626     tlb_flush(cs);
2627 #endif
2628 }
2629