1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu-common.h"
23 
24 #define NO_CPU_IO_DEFS
25 #include "cpu.h"
26 #include "trace.h"
27 #include "disas/disas.h"
28 #include "exec/exec-all.h"
29 #include "tcg/tcg.h"
30 #if defined(CONFIG_USER_ONLY)
31 #include "qemu.h"
32 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
33 #include <sys/param.h>
34 #if __FreeBSD_version >= 700104
35 #define HAVE_KINFO_GETVMMAP
36 #define sigqueue sigqueue_freebsd  /* avoid redefinition */
37 #include <sys/proc.h>
38 #include <machine/profile.h>
39 #define _KERNEL
40 #include <sys/user.h>
41 #undef _KERNEL
42 #undef sigqueue
43 #include <libutil.h>
44 #endif
45 #endif
46 #else
47 #include "exec/ram_addr.h"
48 #endif
49 
50 #include "exec/cputlb.h"
51 #include "exec/tb-hash.h"
52 #include "translate-all.h"
53 #include "qemu/bitmap.h"
54 #include "qemu/error-report.h"
55 #include "qemu/qemu-print.h"
56 #include "qemu/timer.h"
57 #include "qemu/main-loop.h"
58 #include "exec/log.h"
59 #include "sysemu/cpus.h"
60 #include "sysemu/tcg.h"
61 
62 /* #define DEBUG_TB_INVALIDATE */
63 /* #define DEBUG_TB_FLUSH */
64 /* make various TB consistency checks */
65 /* #define DEBUG_TB_CHECK */
66 
67 #ifdef DEBUG_TB_INVALIDATE
68 #define DEBUG_TB_INVALIDATE_GATE 1
69 #else
70 #define DEBUG_TB_INVALIDATE_GATE 0
71 #endif
72 
73 #ifdef DEBUG_TB_FLUSH
74 #define DEBUG_TB_FLUSH_GATE 1
75 #else
76 #define DEBUG_TB_FLUSH_GATE 0
77 #endif
78 
79 #if !defined(CONFIG_USER_ONLY)
80 /* TB consistency checks only implemented for usermode emulation.  */
81 #undef DEBUG_TB_CHECK
82 #endif
83 
84 #ifdef DEBUG_TB_CHECK
85 #define DEBUG_TB_CHECK_GATE 1
86 #else
87 #define DEBUG_TB_CHECK_GATE 0
88 #endif
89 
90 /* Access to the various translations structures need to be serialised via locks
91  * for consistency.
92  * In user-mode emulation access to the memory related structures are protected
93  * with mmap_lock.
94  * In !user-mode we use per-page locks.
95  */
96 #ifdef CONFIG_SOFTMMU
97 #define assert_memory_lock()
98 #else
99 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
100 #endif
101 
102 #define SMC_BITMAP_USE_THRESHOLD 10
103 
104 typedef struct PageDesc {
105     /* list of TBs intersecting this ram page */
106     uintptr_t first_tb;
107 #ifdef CONFIG_SOFTMMU
108     /* in order to optimize self modifying code, we count the number
109        of lookups we do to a given page to use a bitmap */
110     unsigned long *code_bitmap;
111     unsigned int code_write_count;
112 #else
113     unsigned long flags;
114 #endif
115 #ifndef CONFIG_USER_ONLY
116     QemuSpin lock;
117 #endif
118 } PageDesc;
119 
120 /**
121  * struct page_entry - page descriptor entry
122  * @pd:     pointer to the &struct PageDesc of the page this entry represents
123  * @index:  page index of the page
124  * @locked: whether the page is locked
125  *
126  * This struct helps us keep track of the locked state of a page, without
127  * bloating &struct PageDesc.
128  *
129  * A page lock protects accesses to all fields of &struct PageDesc.
130  *
131  * See also: &struct page_collection.
132  */
133 struct page_entry {
134     PageDesc *pd;
135     tb_page_addr_t index;
136     bool locked;
137 };
138 
139 /**
140  * struct page_collection - tracks a set of pages (i.e. &struct page_entry's)
141  * @tree:   Binary search tree (BST) of the pages, with key == page index
142  * @max:    Pointer to the page in @tree with the highest page index
143  *
144  * To avoid deadlock we lock pages in ascending order of page index.
145  * When operating on a set of pages, we need to keep track of them so that
146  * we can lock them in order and also unlock them later. For this we collect
147  * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the
148  * @tree implementation we use does not provide an O(1) operation to obtain the
149  * highest-ranked element, we use @max to keep track of the inserted page
150  * with the highest index. This is valuable because if a page is not in
151  * the tree and its index is higher than @max's, then we can lock it
152  * without breaking the locking order rule.
153  *
154  * Note on naming: 'struct page_set' would be shorter, but we already have a few
155  * page_set_*() helpers, so page_collection is used instead to avoid confusion.
156  *
157  * See also: page_collection_lock().
158  */
159 struct page_collection {
160     GTree *tree;
161     struct page_entry *max;
162 };
163 
164 /* list iterators for lists of tagged pointers in TranslationBlock */
165 #define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
166     for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
167          tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
168              tb = (TranslationBlock *)((uintptr_t)tb & ~1))
169 
170 #define PAGE_FOR_EACH_TB(pagedesc, tb, n)                       \
171     TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
172 
173 #define TB_FOR_EACH_JMP(head_tb, tb, n)                                 \
174     TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
175 
176 /* In system mode we want L1_MAP to be based on ram offsets,
177    while in user mode we want it to be based on virtual addresses.  */
178 #if !defined(CONFIG_USER_ONLY)
179 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
180 # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
181 #else
182 # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
183 #endif
184 #else
185 # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
186 #endif
187 
188 /* Size of the L2 (and L3, etc) page tables.  */
189 #define V_L2_BITS 10
190 #define V_L2_SIZE (1 << V_L2_BITS)
191 
192 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
193 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
194                   sizeof_field(TranslationBlock, trace_vcpu_dstate)
195                   * BITS_PER_BYTE);
196 
197 /*
198  * L1 Mapping properties
199  */
200 static int v_l1_size;
201 static int v_l1_shift;
202 static int v_l2_levels;
203 
204 /* The bottom level has pointers to PageDesc, and is indexed by
205  * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
206  */
207 #define V_L1_MIN_BITS 4
208 #define V_L1_MAX_BITS (V_L2_BITS + 3)
209 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
210 
211 static void *l1_map[V_L1_MAX_SIZE];
212 
213 /* code generation context */
214 TCGContext tcg_init_ctx;
215 __thread TCGContext *tcg_ctx;
216 TBContext tb_ctx;
217 bool parallel_cpus;
218 
page_table_config_init(void)219 static void page_table_config_init(void)
220 {
221     uint32_t v_l1_bits;
222 
223     assert(TARGET_PAGE_BITS);
224     /* The bits remaining after N lower levels of page tables.  */
225     v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
226     if (v_l1_bits < V_L1_MIN_BITS) {
227         v_l1_bits += V_L2_BITS;
228     }
229 
230     v_l1_size = 1 << v_l1_bits;
231     v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
232     v_l2_levels = v_l1_shift / V_L2_BITS - 1;
233 
234     assert(v_l1_bits <= V_L1_MAX_BITS);
235     assert(v_l1_shift % V_L2_BITS == 0);
236     assert(v_l2_levels >= 0);
237 }
238 
cpu_gen_init(void)239 void cpu_gen_init(void)
240 {
241     tcg_context_init(&tcg_init_ctx);
242 }
243 
244 /* Encode VAL as a signed leb128 sequence at P.
245    Return P incremented past the encoded value.  */
encode_sleb128(uint8_t * p,target_long val)246 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
247 {
248     int more, byte;
249 
250     do {
251         byte = val & 0x7f;
252         val >>= 7;
253         more = !((val == 0 && (byte & 0x40) == 0)
254                  || (val == -1 && (byte & 0x40) != 0));
255         if (more) {
256             byte |= 0x80;
257         }
258         *p++ = byte;
259     } while (more);
260 
261     return p;
262 }
263 
264 /* Decode a signed leb128 sequence at *PP; increment *PP past the
265    decoded value.  Return the decoded value.  */
decode_sleb128(uint8_t ** pp)266 static target_long decode_sleb128(uint8_t **pp)
267 {
268     uint8_t *p = *pp;
269     target_long val = 0;
270     int byte, shift = 0;
271 
272     do {
273         byte = *p++;
274         val |= (target_ulong)(byte & 0x7f) << shift;
275         shift += 7;
276     } while (byte & 0x80);
277     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
278         val |= -(target_ulong)1 << shift;
279     }
280 
281     *pp = p;
282     return val;
283 }
284 
285 /* Encode the data collected about the instructions while compiling TB.
286    Place the data at BLOCK, and return the number of bytes consumed.
287 
288    The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
289    which come from the target's insn_start data, followed by a uintptr_t
290    which comes from the host pc of the end of the code implementing the insn.
291 
292    Each line of the table is encoded as sleb128 deltas from the previous
293    line.  The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
294    That is, the first column is seeded with the guest pc, the last column
295    with the host pc, and the middle columns with zeros.  */
296 
encode_search(TranslationBlock * tb,uint8_t * block)297 static int encode_search(TranslationBlock *tb, uint8_t *block)
298 {
299     uint8_t *highwater = tcg_ctx->code_gen_highwater;
300     uint8_t *p = block;
301     int i, j, n;
302 
303     for (i = 0, n = tb->icount; i < n; ++i) {
304         target_ulong prev;
305 
306         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
307             if (i == 0) {
308                 prev = (j == 0 ? tb->pc : 0);
309             } else {
310                 prev = tcg_ctx->gen_insn_data[i - 1][j];
311             }
312             p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
313         }
314         prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
315         p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
316 
317         /* Test for (pending) buffer overflow.  The assumption is that any
318            one row beginning below the high water mark cannot overrun
319            the buffer completely.  Thus we can test for overflow after
320            encoding a row without having to check during encoding.  */
321         if (unlikely(p > highwater)) {
322             return -1;
323         }
324     }
325 
326     return p - block;
327 }
328 
329 /* The cpu state corresponding to 'searched_pc' is restored.
330  * When reset_icount is true, current TB will be interrupted and
331  * icount should be recalculated.
332  */
cpu_restore_state_from_tb(CPUState * cpu,TranslationBlock * tb,uintptr_t searched_pc,bool reset_icount)333 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
334                                      uintptr_t searched_pc, bool reset_icount)
335 {
336     target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
337     uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
338     CPUArchState *env = cpu->env_ptr;
339     uint8_t *p = tb->tc.ptr + tb->tc.size;
340     int i, j, num_insns = tb->icount;
341 #ifdef CONFIG_PROFILER
342     TCGProfile *prof = &tcg_ctx->prof;
343     int64_t ti = profile_getclock();
344 #endif
345 
346     searched_pc -= GETPC_ADJ;
347 
348     if (searched_pc < host_pc) {
349         return -1;
350     }
351 
352     /* Reconstruct the stored insn data while looking for the point at
353        which the end of the insn exceeds the searched_pc.  */
354     for (i = 0; i < num_insns; ++i) {
355         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
356             data[j] += decode_sleb128(&p);
357         }
358         host_pc += decode_sleb128(&p);
359         if (host_pc > searched_pc) {
360             goto found;
361         }
362     }
363     return -1;
364 
365  found:
366     if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) {
367         assert(use_icount);
368         /* Reset the cycle counter to the start of the block
369            and shift if to the number of actually executed instructions */
370         cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
371     }
372     restore_state_to_opc(env, tb, data);
373 
374 #ifdef CONFIG_PROFILER
375     atomic_set(&prof->restore_time,
376                 prof->restore_time + profile_getclock() - ti);
377     atomic_set(&prof->restore_count, prof->restore_count + 1);
378 #endif
379     return 0;
380 }
381 
cpu_restore_state(CPUState * cpu,uintptr_t host_pc,bool will_exit)382 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
383 {
384     TranslationBlock *tb;
385     bool r = false;
386     uintptr_t check_offset;
387 
388     /* The host_pc has to be in the region of current code buffer. If
389      * it is not we will not be able to resolve it here. The two cases
390      * where host_pc will not be correct are:
391      *
392      *  - fault during translation (instruction fetch)
393      *  - fault from helper (not using GETPC() macro)
394      *
395      * Either way we need return early as we can't resolve it here.
396      *
397      * We are using unsigned arithmetic so if host_pc <
398      * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
399      * above the code_gen_buffer_size
400      */
401     check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
402 
403     if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
404         tb = tcg_tb_lookup(host_pc);
405         if (tb) {
406             cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
407             if (tb_cflags(tb) & CF_NOCACHE) {
408                 /* one-shot translation, invalidate it immediately */
409                 tb_phys_invalidate(tb, -1);
410                 tcg_tb_remove(tb);
411             }
412             r = true;
413         }
414     }
415 
416     return r;
417 }
418 
page_init(void)419 static void page_init(void)
420 {
421     page_size_init();
422     page_table_config_init();
423 
424 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
425     {
426 #ifdef HAVE_KINFO_GETVMMAP
427         struct kinfo_vmentry *freep;
428         int i, cnt;
429 
430         freep = kinfo_getvmmap(getpid(), &cnt);
431         if (freep) {
432             mmap_lock();
433             for (i = 0; i < cnt; i++) {
434                 unsigned long startaddr, endaddr;
435 
436                 startaddr = freep[i].kve_start;
437                 endaddr = freep[i].kve_end;
438                 if (h2g_valid(startaddr)) {
439                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
440 
441                     if (h2g_valid(endaddr)) {
442                         endaddr = h2g(endaddr);
443                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
444                     } else {
445 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
446                         endaddr = ~0ul;
447                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
448 #endif
449                     }
450                 }
451             }
452             free(freep);
453             mmap_unlock();
454         }
455 #else
456         FILE *f;
457 
458         last_brk = (unsigned long)sbrk(0);
459 
460         f = fopen("/compat/linux/proc/self/maps", "r");
461         if (f) {
462             mmap_lock();
463 
464             do {
465                 unsigned long startaddr, endaddr;
466                 int n;
467 
468                 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
469 
470                 if (n == 2 && h2g_valid(startaddr)) {
471                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
472 
473                     if (h2g_valid(endaddr)) {
474                         endaddr = h2g(endaddr);
475                     } else {
476                         endaddr = ~0ul;
477                     }
478                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
479                 }
480             } while (!feof(f));
481 
482             fclose(f);
483             mmap_unlock();
484         }
485 #endif
486     }
487 #endif
488 }
489 
page_find_alloc(tb_page_addr_t index,int alloc)490 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
491 {
492     PageDesc *pd;
493     void **lp;
494     int i;
495 
496     /* Level 1.  Always allocated.  */
497     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
498 
499     /* Level 2..N-1.  */
500     for (i = v_l2_levels; i > 0; i--) {
501         void **p = atomic_rcu_read(lp);
502 
503         if (p == NULL) {
504             void *existing;
505 
506             if (!alloc) {
507                 return NULL;
508             }
509             p = g_new0(void *, V_L2_SIZE);
510             existing = atomic_cmpxchg(lp, NULL, p);
511             if (unlikely(existing)) {
512                 g_free(p);
513                 p = existing;
514             }
515         }
516 
517         lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
518     }
519 
520     pd = atomic_rcu_read(lp);
521     if (pd == NULL) {
522         void *existing;
523 
524         if (!alloc) {
525             return NULL;
526         }
527         pd = g_new0(PageDesc, V_L2_SIZE);
528 #ifndef CONFIG_USER_ONLY
529         {
530             int i;
531 
532             for (i = 0; i < V_L2_SIZE; i++) {
533                 qemu_spin_init(&pd[i].lock);
534             }
535         }
536 #endif
537         existing = atomic_cmpxchg(lp, NULL, pd);
538         if (unlikely(existing)) {
539             g_free(pd);
540             pd = existing;
541         }
542     }
543 
544     return pd + (index & (V_L2_SIZE - 1));
545 }
546 
page_find(tb_page_addr_t index)547 static inline PageDesc *page_find(tb_page_addr_t index)
548 {
549     return page_find_alloc(index, 0);
550 }
551 
552 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
553                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
554 
555 /* In user-mode page locks aren't used; mmap_lock is enough */
556 #ifdef CONFIG_USER_ONLY
557 
558 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
559 
page_lock(PageDesc * pd)560 static inline void page_lock(PageDesc *pd)
561 { }
562 
page_unlock(PageDesc * pd)563 static inline void page_unlock(PageDesc *pd)
564 { }
565 
page_lock_tb(const TranslationBlock * tb)566 static inline void page_lock_tb(const TranslationBlock *tb)
567 { }
568 
page_unlock_tb(const TranslationBlock * tb)569 static inline void page_unlock_tb(const TranslationBlock *tb)
570 { }
571 
572 struct page_collection *
page_collection_lock(tb_page_addr_t start,tb_page_addr_t end)573 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
574 {
575     return NULL;
576 }
577 
page_collection_unlock(struct page_collection * set)578 void page_collection_unlock(struct page_collection *set)
579 { }
580 #else /* !CONFIG_USER_ONLY */
581 
582 #ifdef CONFIG_DEBUG_TCG
583 
584 static __thread GHashTable *ht_pages_locked_debug;
585 
ht_pages_locked_debug_init(void)586 static void ht_pages_locked_debug_init(void)
587 {
588     if (ht_pages_locked_debug) {
589         return;
590     }
591     ht_pages_locked_debug = g_hash_table_new(NULL, NULL);
592 }
593 
page_is_locked(const PageDesc * pd)594 static bool page_is_locked(const PageDesc *pd)
595 {
596     PageDesc *found;
597 
598     ht_pages_locked_debug_init();
599     found = g_hash_table_lookup(ht_pages_locked_debug, pd);
600     return !!found;
601 }
602 
page_lock__debug(PageDesc * pd)603 static void page_lock__debug(PageDesc *pd)
604 {
605     ht_pages_locked_debug_init();
606     g_assert(!page_is_locked(pd));
607     g_hash_table_insert(ht_pages_locked_debug, pd, pd);
608 }
609 
page_unlock__debug(const PageDesc * pd)610 static void page_unlock__debug(const PageDesc *pd)
611 {
612     bool removed;
613 
614     ht_pages_locked_debug_init();
615     g_assert(page_is_locked(pd));
616     removed = g_hash_table_remove(ht_pages_locked_debug, pd);
617     g_assert(removed);
618 }
619 
620 static void
do_assert_page_locked(const PageDesc * pd,const char * file,int line)621 do_assert_page_locked(const PageDesc *pd, const char *file, int line)
622 {
623     if (unlikely(!page_is_locked(pd))) {
624         error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
625                      pd, file, line);
626         abort();
627     }
628 }
629 
630 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
631 
assert_no_pages_locked(void)632 void assert_no_pages_locked(void)
633 {
634     ht_pages_locked_debug_init();
635     g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
636 }
637 
638 #else /* !CONFIG_DEBUG_TCG */
639 
640 #define assert_page_locked(pd)
641 
page_lock__debug(const PageDesc * pd)642 static inline void page_lock__debug(const PageDesc *pd)
643 {
644 }
645 
page_unlock__debug(const PageDesc * pd)646 static inline void page_unlock__debug(const PageDesc *pd)
647 {
648 }
649 
650 #endif /* CONFIG_DEBUG_TCG */
651 
page_lock(PageDesc * pd)652 static inline void page_lock(PageDesc *pd)
653 {
654     page_lock__debug(pd);
655     qemu_spin_lock(&pd->lock);
656 }
657 
page_unlock(PageDesc * pd)658 static inline void page_unlock(PageDesc *pd)
659 {
660     qemu_spin_unlock(&pd->lock);
661     page_unlock__debug(pd);
662 }
663 
664 /* lock the page(s) of a TB in the correct acquisition order */
page_lock_tb(const TranslationBlock * tb)665 static inline void page_lock_tb(const TranslationBlock *tb)
666 {
667     page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
668 }
669 
page_unlock_tb(const TranslationBlock * tb)670 static inline void page_unlock_tb(const TranslationBlock *tb)
671 {
672     PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
673 
674     page_unlock(p1);
675     if (unlikely(tb->page_addr[1] != -1)) {
676         PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
677 
678         if (p2 != p1) {
679             page_unlock(p2);
680         }
681     }
682 }
683 
684 static inline struct page_entry *
page_entry_new(PageDesc * pd,tb_page_addr_t index)685 page_entry_new(PageDesc *pd, tb_page_addr_t index)
686 {
687     struct page_entry *pe = g_malloc(sizeof(*pe));
688 
689     pe->index = index;
690     pe->pd = pd;
691     pe->locked = false;
692     return pe;
693 }
694 
page_entry_destroy(gpointer p)695 static void page_entry_destroy(gpointer p)
696 {
697     struct page_entry *pe = p;
698 
699     g_assert(pe->locked);
700     page_unlock(pe->pd);
701     g_free(pe);
702 }
703 
704 /* returns false on success */
page_entry_trylock(struct page_entry * pe)705 static bool page_entry_trylock(struct page_entry *pe)
706 {
707     bool busy;
708 
709     busy = qemu_spin_trylock(&pe->pd->lock);
710     if (!busy) {
711         g_assert(!pe->locked);
712         pe->locked = true;
713         page_lock__debug(pe->pd);
714     }
715     return busy;
716 }
717 
do_page_entry_lock(struct page_entry * pe)718 static void do_page_entry_lock(struct page_entry *pe)
719 {
720     page_lock(pe->pd);
721     g_assert(!pe->locked);
722     pe->locked = true;
723 }
724 
page_entry_lock(gpointer key,gpointer value,gpointer data)725 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data)
726 {
727     struct page_entry *pe = value;
728 
729     do_page_entry_lock(pe);
730     return FALSE;
731 }
732 
page_entry_unlock(gpointer key,gpointer value,gpointer data)733 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data)
734 {
735     struct page_entry *pe = value;
736 
737     if (pe->locked) {
738         pe->locked = false;
739         page_unlock(pe->pd);
740     }
741     return FALSE;
742 }
743 
744 /*
745  * Trylock a page, and if successful, add the page to a collection.
746  * Returns true ("busy") if the page could not be locked; false otherwise.
747  */
page_trylock_add(struct page_collection * set,tb_page_addr_t addr)748 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr)
749 {
750     tb_page_addr_t index = addr >> TARGET_PAGE_BITS;
751     struct page_entry *pe;
752     PageDesc *pd;
753 
754     pe = g_tree_lookup(set->tree, &index);
755     if (pe) {
756         return false;
757     }
758 
759     pd = page_find(index);
760     if (pd == NULL) {
761         return false;
762     }
763 
764     pe = page_entry_new(pd, index);
765     g_tree_insert(set->tree, &pe->index, pe);
766 
767     /*
768      * If this is either (1) the first insertion or (2) a page whose index
769      * is higher than any other so far, just lock the page and move on.
770      */
771     if (set->max == NULL || pe->index > set->max->index) {
772         set->max = pe;
773         do_page_entry_lock(pe);
774         return false;
775     }
776     /*
777      * Try to acquire out-of-order lock; if busy, return busy so that we acquire
778      * locks in order.
779      */
780     return page_entry_trylock(pe);
781 }
782 
tb_page_addr_cmp(gconstpointer ap,gconstpointer bp,gpointer udata)783 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
784 {
785     tb_page_addr_t a = *(const tb_page_addr_t *)ap;
786     tb_page_addr_t b = *(const tb_page_addr_t *)bp;
787 
788     if (a == b) {
789         return 0;
790     } else if (a < b) {
791         return -1;
792     }
793     return 1;
794 }
795 
796 /*
797  * Lock a range of pages ([@start,@end[) as well as the pages of all
798  * intersecting TBs.
799  * Locking order: acquire locks in ascending order of page index.
800  */
801 struct page_collection *
page_collection_lock(tb_page_addr_t start,tb_page_addr_t end)802 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
803 {
804     struct page_collection *set = g_malloc(sizeof(*set));
805     tb_page_addr_t index;
806     PageDesc *pd;
807 
808     start >>= TARGET_PAGE_BITS;
809     end   >>= TARGET_PAGE_BITS;
810     g_assert(start <= end);
811 
812     set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
813                                 page_entry_destroy);
814     set->max = NULL;
815     assert_no_pages_locked();
816 
817  retry:
818     g_tree_foreach(set->tree, page_entry_lock, NULL);
819 
820     for (index = start; index <= end; index++) {
821         TranslationBlock *tb;
822         int n;
823 
824         pd = page_find(index);
825         if (pd == NULL) {
826             continue;
827         }
828         if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
829             g_tree_foreach(set->tree, page_entry_unlock, NULL);
830             goto retry;
831         }
832         assert_page_locked(pd);
833         PAGE_FOR_EACH_TB(pd, tb, n) {
834             if (page_trylock_add(set, tb->page_addr[0]) ||
835                 (tb->page_addr[1] != -1 &&
836                  page_trylock_add(set, tb->page_addr[1]))) {
837                 /* drop all locks, and reacquire in order */
838                 g_tree_foreach(set->tree, page_entry_unlock, NULL);
839                 goto retry;
840             }
841         }
842     }
843     return set;
844 }
845 
page_collection_unlock(struct page_collection * set)846 void page_collection_unlock(struct page_collection *set)
847 {
848     /* entries are unlocked and freed via page_entry_destroy */
849     g_tree_destroy(set->tree);
850     g_free(set);
851 }
852 
853 #endif /* !CONFIG_USER_ONLY */
854 
page_lock_pair(PageDesc ** ret_p1,tb_page_addr_t phys1,PageDesc ** ret_p2,tb_page_addr_t phys2,int alloc)855 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
856                            PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
857 {
858     PageDesc *p1, *p2;
859     tb_page_addr_t page1;
860     tb_page_addr_t page2;
861 
862     assert_memory_lock();
863     g_assert(phys1 != -1);
864 
865     page1 = phys1 >> TARGET_PAGE_BITS;
866     page2 = phys2 >> TARGET_PAGE_BITS;
867 
868     p1 = page_find_alloc(page1, alloc);
869     if (ret_p1) {
870         *ret_p1 = p1;
871     }
872     if (likely(phys2 == -1)) {
873         page_lock(p1);
874         return;
875     } else if (page1 == page2) {
876         page_lock(p1);
877         if (ret_p2) {
878             *ret_p2 = p1;
879         }
880         return;
881     }
882     p2 = page_find_alloc(page2, alloc);
883     if (ret_p2) {
884         *ret_p2 = p2;
885     }
886     if (page1 < page2) {
887         page_lock(p1);
888         page_lock(p2);
889     } else {
890         page_lock(p2);
891         page_lock(p1);
892     }
893 }
894 
895 /* Minimum size of the code gen buffer.  This number is randomly chosen,
896    but not so small that we can't have a fair number of TB's live.  */
897 #define MIN_CODE_GEN_BUFFER_SIZE     (1 * MiB)
898 
899 /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
900    indicated, this is constrained by the range of direct branches on the
901    host cpu, as used by the TCG implementation of goto_tb.  */
902 #if defined(__x86_64__)
903 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
904 #elif defined(__sparc__)
905 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
906 #elif defined(__powerpc64__)
907 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
908 #elif defined(__powerpc__)
909 # define MAX_CODE_GEN_BUFFER_SIZE  (32 * MiB)
910 #elif defined(__aarch64__)
911 # define MAX_CODE_GEN_BUFFER_SIZE  (2 * GiB)
912 #elif defined(__s390x__)
913   /* We have a +- 4GB range on the branches; leave some slop.  */
914 # define MAX_CODE_GEN_BUFFER_SIZE  (3 * GiB)
915 #elif defined(__mips__)
916   /* We have a 256MB branch region, but leave room to make sure the
917      main executable is also within that region.  */
918 # define MAX_CODE_GEN_BUFFER_SIZE  (128 * MiB)
919 #else
920 # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
921 #endif
922 
923 #if TCG_TARGET_REG_BITS == 32
924 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
925 #ifdef CONFIG_USER_ONLY
926 /*
927  * For user mode on smaller 32 bit systems we may run into trouble
928  * allocating big chunks of data in the right place. On these systems
929  * we utilise a static code generation buffer directly in the binary.
930  */
931 #define USE_STATIC_CODE_GEN_BUFFER
932 #endif
933 #else /* TCG_TARGET_REG_BITS == 64 */
934 #ifdef CONFIG_USER_ONLY
935 /*
936  * As user-mode emulation typically means running multiple instances
937  * of the translator don't go too nuts with our default code gen
938  * buffer lest we make things too hard for the OS.
939  */
940 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
941 #else
942 /*
943  * We expect most system emulation to run one or two guests per host.
944  * Users running large scale system emulation may want to tweak their
945  * runtime setup via the tb-size control on the command line.
946  */
947 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
948 #endif
949 #endif
950 
951 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
952   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
953    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
954 
size_code_gen_buffer(size_t tb_size)955 static inline size_t size_code_gen_buffer(size_t tb_size)
956 {
957     /* Size the buffer.  */
958     if (tb_size == 0) {
959         tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
960     }
961     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
962         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
963     }
964     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
965         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
966     }
967     return tb_size;
968 }
969 
970 #ifdef __mips__
971 /* In order to use J and JAL within the code_gen_buffer, we require
972    that the buffer not cross a 256MB boundary.  */
cross_256mb(void * addr,size_t size)973 static inline bool cross_256mb(void *addr, size_t size)
974 {
975     return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
976 }
977 
978 /* We weren't able to allocate a buffer without crossing that boundary,
979    so make do with the larger portion of the buffer that doesn't cross.
980    Returns the new base of the buffer, and adjusts code_gen_buffer_size.  */
split_cross_256mb(void * buf1,size_t size1)981 static inline void *split_cross_256mb(void *buf1, size_t size1)
982 {
983     void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
984     size_t size2 = buf1 + size1 - buf2;
985 
986     size1 = buf2 - buf1;
987     if (size1 < size2) {
988         size1 = size2;
989         buf1 = buf2;
990     }
991 
992     tcg_ctx->code_gen_buffer_size = size1;
993     return buf1;
994 }
995 #endif
996 
997 #ifdef USE_STATIC_CODE_GEN_BUFFER
998 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
999     __attribute__((aligned(CODE_GEN_ALIGN)));
1000 
alloc_code_gen_buffer(void)1001 static inline void *alloc_code_gen_buffer(void)
1002 {
1003     void *buf = static_code_gen_buffer;
1004     void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
1005     size_t size;
1006 
1007     /* page-align the beginning and end of the buffer */
1008     buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
1009     end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
1010 
1011     size = end - buf;
1012 
1013     /* Honor a command-line option limiting the size of the buffer.  */
1014     if (size > tcg_ctx->code_gen_buffer_size) {
1015         size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
1016                                qemu_real_host_page_size);
1017     }
1018     tcg_ctx->code_gen_buffer_size = size;
1019 
1020 #ifdef __mips__
1021     if (cross_256mb(buf, size)) {
1022         buf = split_cross_256mb(buf, size);
1023         size = tcg_ctx->code_gen_buffer_size;
1024     }
1025 #endif
1026 
1027     if (qemu_mprotect_rwx(buf, size)) {
1028         abort();
1029     }
1030     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1031 
1032     return buf;
1033 }
1034 #elif defined(_WIN32)
alloc_code_gen_buffer(void)1035 static inline void *alloc_code_gen_buffer(void)
1036 {
1037     size_t size = tcg_ctx->code_gen_buffer_size;
1038     return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
1039                         PAGE_EXECUTE_READWRITE);
1040 }
1041 #else
alloc_code_gen_buffer(void)1042 static inline void *alloc_code_gen_buffer(void)
1043 {
1044     int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
1045     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
1046     uintptr_t start = 0;
1047     size_t size = tcg_ctx->code_gen_buffer_size;
1048     void *buf;
1049 
1050     /* Constrain the position of the buffer based on the host cpu.
1051        Note that these addresses are chosen in concert with the
1052        addresses assigned in the relevant linker script file.  */
1053 # if defined(__PIE__) || defined(__PIC__)
1054     /* Don't bother setting a preferred location if we're building
1055        a position-independent executable.  We're more likely to get
1056        an address near the main executable if we let the kernel
1057        choose the address.  */
1058 # elif defined(__x86_64__) && defined(MAP_32BIT)
1059     /* Force the memory down into low memory with the executable.
1060        Leave the choice of exact location with the kernel.  */
1061     flags |= MAP_32BIT;
1062     /* Cannot expect to map more than 800MB in low memory.  */
1063     if (size > 800u * 1024 * 1024) {
1064         tcg_ctx->code_gen_buffer_size = size = 800u * 1024 * 1024;
1065     }
1066 # elif defined(__sparc__)
1067     start = 0x40000000ul;
1068 # elif defined(__s390x__)
1069     start = 0x90000000ul;
1070 # elif defined(__mips__)
1071 #  if _MIPS_SIM == _ABI64
1072     start = 0x128000000ul;
1073 #  else
1074     start = 0x08000000ul;
1075 #  endif
1076 # endif
1077 
1078     buf = mmap((void *)start, size, prot, flags, -1, 0);
1079     if (buf == MAP_FAILED) {
1080         return NULL;
1081     }
1082 
1083 #ifdef __mips__
1084     if (cross_256mb(buf, size)) {
1085         /* Try again, with the original still mapped, to avoid re-acquiring
1086            that 256mb crossing.  This time don't specify an address.  */
1087         size_t size2;
1088         void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
1089         switch ((int)(buf2 != MAP_FAILED)) {
1090         case 1:
1091             if (!cross_256mb(buf2, size)) {
1092                 /* Success!  Use the new buffer.  */
1093                 munmap(buf, size);
1094                 break;
1095             }
1096             /* Failure.  Work with what we had.  */
1097             munmap(buf2, size);
1098             /* fallthru */
1099         default:
1100             /* Split the original buffer.  Free the smaller half.  */
1101             buf2 = split_cross_256mb(buf, size);
1102             size2 = tcg_ctx->code_gen_buffer_size;
1103             if (buf == buf2) {
1104                 munmap(buf + size2, size - size2);
1105             } else {
1106                 munmap(buf, size - size2);
1107             }
1108             size = size2;
1109             break;
1110         }
1111         buf = buf2;
1112     }
1113 #endif
1114 
1115     /* Request large pages for the buffer.  */
1116     qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
1117 
1118     return buf;
1119 }
1120 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
1121 
code_gen_alloc(size_t tb_size)1122 static inline void code_gen_alloc(size_t tb_size)
1123 {
1124     tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
1125     tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
1126     if (tcg_ctx->code_gen_buffer == NULL) {
1127         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
1128         exit(1);
1129     }
1130 }
1131 
tb_cmp(const void * ap,const void * bp)1132 static bool tb_cmp(const void *ap, const void *bp)
1133 {
1134     const TranslationBlock *a = ap;
1135     const TranslationBlock *b = bp;
1136 
1137     return a->pc == b->pc &&
1138         a->cs_base == b->cs_base &&
1139         a->flags == b->flags &&
1140         (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
1141         a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1142         a->page_addr[0] == b->page_addr[0] &&
1143         a->page_addr[1] == b->page_addr[1];
1144 }
1145 
tb_htable_init(void)1146 static void tb_htable_init(void)
1147 {
1148     unsigned int mode = QHT_MODE_AUTO_RESIZE;
1149 
1150     qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1151 }
1152 
1153 /* Must be called before using the QEMU cpus. 'tb_size' is the size
1154    (in bytes) allocated to the translation buffer. Zero means default
1155    size. */
tcg_exec_init(unsigned long tb_size)1156 void tcg_exec_init(unsigned long tb_size)
1157 {
1158     tcg_allowed = true;
1159     cpu_gen_init();
1160     page_init();
1161     tb_htable_init();
1162     code_gen_alloc(tb_size);
1163 #if defined(CONFIG_SOFTMMU)
1164     /* There's no guest base to take into account, so go ahead and
1165        initialize the prologue now.  */
1166     tcg_prologue_init(tcg_ctx);
1167 #endif
1168 }
1169 
1170 /* call with @p->lock held */
invalidate_page_bitmap(PageDesc * p)1171 static inline void invalidate_page_bitmap(PageDesc *p)
1172 {
1173     assert_page_locked(p);
1174 #ifdef CONFIG_SOFTMMU
1175     g_free(p->code_bitmap);
1176     p->code_bitmap = NULL;
1177     p->code_write_count = 0;
1178 #endif
1179 }
1180 
1181 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
page_flush_tb_1(int level,void ** lp)1182 static void page_flush_tb_1(int level, void **lp)
1183 {
1184     int i;
1185 
1186     if (*lp == NULL) {
1187         return;
1188     }
1189     if (level == 0) {
1190         PageDesc *pd = *lp;
1191 
1192         for (i = 0; i < V_L2_SIZE; ++i) {
1193             page_lock(&pd[i]);
1194             pd[i].first_tb = (uintptr_t)NULL;
1195             invalidate_page_bitmap(pd + i);
1196             page_unlock(&pd[i]);
1197         }
1198     } else {
1199         void **pp = *lp;
1200 
1201         for (i = 0; i < V_L2_SIZE; ++i) {
1202             page_flush_tb_1(level - 1, pp + i);
1203         }
1204     }
1205 }
1206 
page_flush_tb(void)1207 static void page_flush_tb(void)
1208 {
1209     int i, l1_sz = v_l1_size;
1210 
1211     for (i = 0; i < l1_sz; i++) {
1212         page_flush_tb_1(v_l2_levels, l1_map + i);
1213     }
1214 }
1215 
tb_host_size_iter(gpointer key,gpointer value,gpointer data)1216 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
1217 {
1218     const TranslationBlock *tb = value;
1219     size_t *size = data;
1220 
1221     *size += tb->tc.size;
1222     return false;
1223 }
1224 
1225 void flush_tcg_on_log_instr_chage(void);
flush_tcg_on_log_instr_chage(void)1226 void flush_tcg_on_log_instr_chage(void) {
1227     warn_report("Calling real %s\r", __func__);
1228     CPUState *cpu;
1229     int cpu_index = 0;
1230     CPU_FOREACH(cpu) {
1231         warn_report("Flushing TCG for CPU %d\r", cpu_index++);
1232         tb_flush(cpu);
1233     }
1234 }
1235 
1236 /* flush all the translation blocks */
do_tb_flush(CPUState * cpu,run_on_cpu_data tb_flush_count)1237 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1238 {
1239     bool did_flush = false;
1240 
1241     mmap_lock();
1242     /* If it is already been done on request of another CPU,
1243      * just retry.
1244      */
1245     if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1246         goto done;
1247     }
1248     did_flush = true;
1249 
1250     if (DEBUG_TB_FLUSH_GATE) {
1251         size_t nb_tbs = tcg_nb_tbs();
1252         size_t host_size = 0;
1253 
1254         tcg_tb_foreach(tb_host_size_iter, &host_size);
1255         printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
1256                tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
1257     }
1258 
1259     CPU_FOREACH(cpu) {
1260         cpu_tb_jmp_cache_clear(cpu);
1261     }
1262 
1263     qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1264     page_flush_tb();
1265 
1266     tcg_region_reset_all();
1267     /* XXX: flush processor icache at this point if cache flush is
1268        expensive */
1269     atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1270 
1271 done:
1272     mmap_unlock();
1273     if (did_flush) {
1274         qemu_plugin_flush_cb();
1275     }
1276 }
1277 
tb_flush(CPUState * cpu)1278 void tb_flush(CPUState *cpu)
1279 {
1280     if (tcg_enabled()) {
1281         unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count);
1282 
1283         if (cpu_in_exclusive_context(cpu)) {
1284             do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1285         } else {
1286             async_safe_run_on_cpu(cpu, do_tb_flush,
1287                                   RUN_ON_CPU_HOST_INT(tb_flush_count));
1288         }
1289     }
1290 }
1291 
1292 /*
1293  * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
1294  * so in order to prevent bit rot we compile them unconditionally in user-mode,
1295  * and let the optimizer get rid of them by wrapping their user-only callers
1296  * with if (DEBUG_TB_CHECK_GATE).
1297  */
1298 #ifdef CONFIG_USER_ONLY
1299 
do_tb_invalidate_check(void * p,uint32_t hash,void * userp)1300 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
1301 {
1302     TranslationBlock *tb = p;
1303     target_ulong addr = *(target_ulong *)userp;
1304 
1305     if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
1306         printf("ERROR invalidate: address=" TARGET_FMT_lx
1307                " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
1308     }
1309 }
1310 
1311 /* verify that all the pages have correct rights for code
1312  *
1313  * Called with mmap_lock held.
1314  */
tb_invalidate_check(target_ulong address)1315 static void tb_invalidate_check(target_ulong address)
1316 {
1317     address &= TARGET_PAGE_MASK;
1318     qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
1319 }
1320 
do_tb_page_check(void * p,uint32_t hash,void * userp)1321 static void do_tb_page_check(void *p, uint32_t hash, void *userp)
1322 {
1323     TranslationBlock *tb = p;
1324     int flags1, flags2;
1325 
1326     flags1 = page_get_flags(tb->pc);
1327     flags2 = page_get_flags(tb->pc + tb->size - 1);
1328     if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
1329         printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
1330                (long)tb->pc, tb->size, flags1, flags2);
1331     }
1332 }
1333 
1334 /* verify that all the pages have correct rights for code */
tb_page_check(void)1335 static void tb_page_check(void)
1336 {
1337     qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
1338 }
1339 
1340 #endif /* CONFIG_USER_ONLY */
1341 
1342 /*
1343  * user-mode: call with mmap_lock held
1344  * !user-mode: call with @pd->lock held
1345  */
tb_page_remove(PageDesc * pd,TranslationBlock * tb)1346 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1347 {
1348     TranslationBlock *tb1;
1349     uintptr_t *pprev;
1350     unsigned int n1;
1351 
1352     assert_page_locked(pd);
1353     pprev = &pd->first_tb;
1354     PAGE_FOR_EACH_TB(pd, tb1, n1) {
1355         if (tb1 == tb) {
1356             *pprev = tb1->page_next[n1];
1357             return;
1358         }
1359         pprev = &tb1->page_next[n1];
1360     }
1361     g_assert_not_reached();
1362 }
1363 
1364 /* remove @orig from its @n_orig-th jump list */
tb_remove_from_jmp_list(TranslationBlock * orig,int n_orig)1365 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1366 {
1367     uintptr_t ptr, ptr_locked;
1368     TranslationBlock *dest;
1369     TranslationBlock *tb;
1370     uintptr_t *pprev;
1371     int n;
1372 
1373     /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1374     ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1375     dest = (TranslationBlock *)(ptr & ~1);
1376     if (dest == NULL) {
1377         return;
1378     }
1379 
1380     qemu_spin_lock(&dest->jmp_lock);
1381     /*
1382      * While acquiring the lock, the jump might have been removed if the
1383      * destination TB was invalidated; check again.
1384      */
1385     ptr_locked = atomic_read(&orig->jmp_dest[n_orig]);
1386     if (ptr_locked != ptr) {
1387         qemu_spin_unlock(&dest->jmp_lock);
1388         /*
1389          * The only possibility is that the jump was unlinked via
1390          * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1391          * because we set the LSB above.
1392          */
1393         g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1394         return;
1395     }
1396     /*
1397      * We first acquired the lock, and since the destination pointer matches,
1398      * we know for sure that @orig is in the jmp list.
1399      */
1400     pprev = &dest->jmp_list_head;
1401     TB_FOR_EACH_JMP(dest, tb, n) {
1402         if (tb == orig && n == n_orig) {
1403             *pprev = tb->jmp_list_next[n];
1404             /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1405             qemu_spin_unlock(&dest->jmp_lock);
1406             return;
1407         }
1408         pprev = &tb->jmp_list_next[n];
1409     }
1410     g_assert_not_reached();
1411 }
1412 
1413 /* reset the jump entry 'n' of a TB so that it is not chained to
1414    another TB */
tb_reset_jump(TranslationBlock * tb,int n)1415 static inline void tb_reset_jump(TranslationBlock *tb, int n)
1416 {
1417     uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1418     tb_set_jmp_target(tb, n, addr);
1419 }
1420 
1421 /* remove any jumps to the TB */
tb_jmp_unlink(TranslationBlock * dest)1422 static inline void tb_jmp_unlink(TranslationBlock *dest)
1423 {
1424     TranslationBlock *tb;
1425     int n;
1426 
1427     qemu_spin_lock(&dest->jmp_lock);
1428 
1429     TB_FOR_EACH_JMP(dest, tb, n) {
1430         tb_reset_jump(tb, n);
1431         atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1432         /* No need to clear the list entry; setting the dest ptr is enough */
1433     }
1434     dest->jmp_list_head = (uintptr_t)NULL;
1435 
1436     qemu_spin_unlock(&dest->jmp_lock);
1437 }
1438 
1439 /*
1440  * In user-mode, call with mmap_lock held.
1441  * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1442  * locks held.
1443  */
do_tb_phys_invalidate(TranslationBlock * tb,bool rm_from_page_list)1444 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1445 {
1446     CPUState *cpu;
1447     PageDesc *p;
1448     uint32_t h;
1449     tb_page_addr_t phys_pc;
1450 
1451     assert_memory_lock();
1452 
1453     /* make sure no further incoming jumps will be chained to this TB */
1454     qemu_spin_lock(&tb->jmp_lock);
1455     atomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1456     qemu_spin_unlock(&tb->jmp_lock);
1457 
1458     /* remove the TB from the hash list */
1459     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1460     h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
1461                      tb->trace_vcpu_dstate);
1462     if (!(tb->cflags & CF_NOCACHE) &&
1463         !qht_remove(&tb_ctx.htable, tb, h)) {
1464         return;
1465     }
1466 
1467     /* remove the TB from the page list */
1468     if (rm_from_page_list) {
1469         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1470         tb_page_remove(p, tb);
1471         invalidate_page_bitmap(p);
1472         if (tb->page_addr[1] != -1) {
1473             p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1474             tb_page_remove(p, tb);
1475             invalidate_page_bitmap(p);
1476         }
1477     }
1478 
1479     /* remove the TB from the hash list */
1480     h = tb_jmp_cache_hash_func(tb->pc);
1481     CPU_FOREACH(cpu) {
1482         if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1483             atomic_set(&cpu->tb_jmp_cache[h], NULL);
1484         }
1485     }
1486 
1487     /* suppress this TB from the two jump lists */
1488     tb_remove_from_jmp_list(tb, 0);
1489     tb_remove_from_jmp_list(tb, 1);
1490 
1491     /* suppress any remaining jumps to this TB */
1492     tb_jmp_unlink(tb);
1493 
1494     atomic_set(&tcg_ctx->tb_phys_invalidate_count,
1495                tcg_ctx->tb_phys_invalidate_count + 1);
1496 }
1497 
tb_phys_invalidate__locked(TranslationBlock * tb)1498 static void tb_phys_invalidate__locked(TranslationBlock *tb)
1499 {
1500     do_tb_phys_invalidate(tb, true);
1501 }
1502 
1503 /* invalidate one TB
1504  *
1505  * Called with mmap_lock held in user-mode.
1506  */
tb_phys_invalidate(TranslationBlock * tb,tb_page_addr_t page_addr)1507 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1508 {
1509     if (page_addr == -1 && tb->page_addr[0] != -1) {
1510         page_lock_tb(tb);
1511         do_tb_phys_invalidate(tb, true);
1512         page_unlock_tb(tb);
1513     } else {
1514         do_tb_phys_invalidate(tb, false);
1515     }
1516 }
1517 
1518 #ifdef CONFIG_SOFTMMU
1519 /* call with @p->lock held */
build_page_bitmap(PageDesc * p)1520 static void build_page_bitmap(PageDesc *p)
1521 {
1522     int n, tb_start, tb_end;
1523     TranslationBlock *tb;
1524 
1525     assert_page_locked(p);
1526     p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
1527 
1528     PAGE_FOR_EACH_TB(p, tb, n) {
1529         /* NOTE: this is subtle as a TB may span two physical pages */
1530         if (n == 0) {
1531             /* NOTE: tb_end may be after the end of the page, but
1532                it is not a problem */
1533             tb_start = tb->pc & ~TARGET_PAGE_MASK;
1534             tb_end = tb_start + tb->size;
1535             if (tb_end > TARGET_PAGE_SIZE) {
1536                 tb_end = TARGET_PAGE_SIZE;
1537              }
1538         } else {
1539             tb_start = 0;
1540             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1541         }
1542         bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
1543     }
1544 }
1545 #endif
1546 
1547 /* add the tb in the target page and protect it if necessary
1548  *
1549  * Called with mmap_lock held for user-mode emulation.
1550  * Called with @p->lock held in !user-mode.
1551  */
tb_page_add(PageDesc * p,TranslationBlock * tb,unsigned int n,tb_page_addr_t page_addr)1552 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1553                                unsigned int n, tb_page_addr_t page_addr)
1554 {
1555 #ifndef CONFIG_USER_ONLY
1556     bool page_already_protected;
1557 #endif
1558 
1559     assert_page_locked(p);
1560 
1561     tb->page_addr[n] = page_addr;
1562     tb->page_next[n] = p->first_tb;
1563 #ifndef CONFIG_USER_ONLY
1564     page_already_protected = p->first_tb != (uintptr_t)NULL;
1565 #endif
1566     p->first_tb = (uintptr_t)tb | n;
1567     invalidate_page_bitmap(p);
1568 
1569 #if defined(CONFIG_USER_ONLY)
1570     if (p->flags & PAGE_WRITE) {
1571         target_ulong addr;
1572         PageDesc *p2;
1573         int prot;
1574 
1575         /* force the host page as non writable (writes will have a
1576            page fault + mprotect overhead) */
1577         page_addr &= qemu_host_page_mask;
1578         prot = 0;
1579         for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1580             addr += TARGET_PAGE_SIZE) {
1581 
1582             p2 = page_find(addr >> TARGET_PAGE_BITS);
1583             if (!p2) {
1584                 continue;
1585             }
1586             prot |= p2->flags;
1587             p2->flags &= ~PAGE_WRITE;
1588           }
1589         mprotect(g2h(page_addr), qemu_host_page_size,
1590                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1591         if (DEBUG_TB_INVALIDATE_GATE) {
1592             printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
1593         }
1594     }
1595 #else
1596     /* if some code is already present, then the pages are already
1597        protected. So we handle the case where only the first TB is
1598        allocated in a physical page */
1599     if (!page_already_protected) {
1600         tlb_protect_code(page_addr);
1601     }
1602 #endif
1603 }
1604 
1605 /* add a new TB and link it to the physical page tables. phys_page2 is
1606  * (-1) to indicate that only one page contains the TB.
1607  *
1608  * Called with mmap_lock held for user-mode emulation.
1609  *
1610  * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1611  * Note that in !user-mode, another thread might have already added a TB
1612  * for the same block of guest code that @tb corresponds to. In that case,
1613  * the caller should discard the original @tb, and use instead the returned TB.
1614  */
1615 static TranslationBlock *
tb_link_page(TranslationBlock * tb,tb_page_addr_t phys_pc,tb_page_addr_t phys_page2)1616 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1617              tb_page_addr_t phys_page2)
1618 {
1619     PageDesc *p;
1620     PageDesc *p2 = NULL;
1621 
1622     assert_memory_lock();
1623 
1624     if (phys_pc == -1) {
1625         /*
1626          * If the TB is not associated with a physical RAM page then
1627          * it must be a temporary one-insn TB, and we have nothing to do
1628          * except fill in the page_addr[] fields.
1629          */
1630         assert(tb->cflags & CF_NOCACHE);
1631         tb->page_addr[0] = tb->page_addr[1] = -1;
1632         return tb;
1633     }
1634 
1635     /*
1636      * Add the TB to the page list, acquiring first the pages's locks.
1637      * We keep the locks held until after inserting the TB in the hash table,
1638      * so that if the insertion fails we know for sure that the TBs are still
1639      * in the page descriptors.
1640      * Note that inserting into the hash table first isn't an option, since
1641      * we can only insert TBs that are fully initialized.
1642      */
1643     page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
1644     tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
1645     if (p2) {
1646         tb_page_add(p2, tb, 1, phys_page2);
1647     } else {
1648         tb->page_addr[1] = -1;
1649     }
1650 
1651     if (!(tb->cflags & CF_NOCACHE)) {
1652         void *existing_tb = NULL;
1653         uint32_t h;
1654 
1655         /* add in the hash table */
1656         h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
1657                          tb->trace_vcpu_dstate);
1658         qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1659 
1660         /* remove TB from the page(s) if we couldn't insert it */
1661         if (unlikely(existing_tb)) {
1662             tb_page_remove(p, tb);
1663             invalidate_page_bitmap(p);
1664             if (p2) {
1665                 tb_page_remove(p2, tb);
1666                 invalidate_page_bitmap(p2);
1667             }
1668             tb = existing_tb;
1669         }
1670     }
1671 
1672     if (p2 && p2 != p) {
1673         page_unlock(p2);
1674     }
1675     page_unlock(p);
1676 
1677 #ifdef CONFIG_USER_ONLY
1678     if (DEBUG_TB_CHECK_GATE) {
1679         tb_page_check();
1680     }
1681 #endif
1682     return tb;
1683 }
1684 
1685 /* Called with mmap_lock held for user mode emulation.  */
tb_gen_code(CPUState * cpu,target_ulong pc,target_ulong cs_base,uint32_t flags,int cflags)1686 TranslationBlock *tb_gen_code(CPUState *cpu,
1687                               target_ulong pc, target_ulong cs_base,
1688                               uint32_t flags, int cflags)
1689 {
1690     CPUArchState *env = cpu->env_ptr;
1691     TranslationBlock *tb, *existing_tb;
1692     tb_page_addr_t phys_pc, phys_page2;
1693     target_ulong virt_page2;
1694     tcg_insn_unit *gen_code_buf;
1695     int gen_code_size, search_size, max_insns;
1696 #ifdef CONFIG_PROFILER
1697     TCGProfile *prof = &tcg_ctx->prof;
1698     int64_t ti;
1699 #endif
1700 
1701     assert_memory_lock();
1702 
1703     phys_pc = get_page_addr_code(env, pc);
1704 
1705     if (phys_pc == -1) {
1706         /* Generate a temporary TB with 1 insn in it */
1707         cflags &= ~CF_COUNT_MASK;
1708         cflags |= CF_NOCACHE | 1;
1709     }
1710 
1711     cflags &= ~CF_CLUSTER_MASK;
1712     cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
1713 
1714     max_insns = cflags & CF_COUNT_MASK;
1715     if (max_insns == 0) {
1716         max_insns = CF_COUNT_MASK;
1717     }
1718     if (max_insns > TCG_MAX_INSNS) {
1719         max_insns = TCG_MAX_INSNS;
1720     }
1721     if (cpu->singlestep_enabled || singlestep) {
1722         max_insns = 1;
1723     }
1724 
1725  buffer_overflow:
1726     tb = tcg_tb_alloc(tcg_ctx);
1727     if (unlikely(!tb)) {
1728         /* flush must be done */
1729         tb_flush(cpu);
1730         mmap_unlock();
1731         /* Make the execution loop process the flush as soon as possible.  */
1732         cpu->exception_index = EXCP_INTERRUPT;
1733         cpu_loop_exit(cpu);
1734     }
1735 
1736     gen_code_buf = tcg_ctx->code_gen_ptr;
1737     tb->tc.ptr = gen_code_buf;
1738     tb->pc = pc;
1739     tb->cs_base = cs_base;
1740     tb->flags = flags;
1741     tb->cflags = cflags;
1742     tb->orig_tb = NULL;
1743     tb->trace_vcpu_dstate = *cpu->trace_dstate;
1744     tcg_ctx->tb_cflags = cflags;
1745  tb_overflow:
1746 
1747 #ifdef CONFIG_PROFILER
1748     /* includes aborted translations because of exceptions */
1749     atomic_set(&prof->tb_count1, prof->tb_count1 + 1);
1750     ti = profile_getclock();
1751 #endif
1752 
1753     tcg_func_start(tcg_ctx);
1754 
1755     tcg_ctx->cpu = env_cpu(env);
1756     gen_intermediate_code(cpu, tb, max_insns);
1757     tcg_ctx->cpu = NULL;
1758 
1759     trace_translate_block(tb, tb->pc, tb->tc.ptr);
1760 
1761     /* generate machine code */
1762     tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1763     tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1764     tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
1765     if (TCG_TARGET_HAS_direct_jump) {
1766         tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
1767         tcg_ctx->tb_jmp_target_addr = NULL;
1768     } else {
1769         tcg_ctx->tb_jmp_insn_offset = NULL;
1770         tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
1771     }
1772 
1773 #ifdef CONFIG_PROFILER
1774     atomic_set(&prof->tb_count, prof->tb_count + 1);
1775     atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti);
1776     ti = profile_getclock();
1777 #endif
1778 
1779     gen_code_size = tcg_gen_code(tcg_ctx, tb);
1780     if (unlikely(gen_code_size < 0)) {
1781         switch (gen_code_size) {
1782         case -1:
1783             /*
1784              * Overflow of code_gen_buffer, or the current slice of it.
1785              *
1786              * TODO: We don't need to re-do gen_intermediate_code, nor
1787              * should we re-do the tcg optimization currently hidden
1788              * inside tcg_gen_code.  All that should be required is to
1789              * flush the TBs, allocate a new TB, re-initialize it per
1790              * above, and re-do the actual code generation.
1791              */
1792             goto buffer_overflow;
1793 
1794         case -2:
1795             /*
1796              * The code generated for the TranslationBlock is too large.
1797              * The maximum size allowed by the unwind info is 64k.
1798              * There may be stricter constraints from relocations
1799              * in the tcg backend.
1800              *
1801              * Try again with half as many insns as we attempted this time.
1802              * If a single insn overflows, there's a bug somewhere...
1803              */
1804             max_insns = tb->icount;
1805             assert(max_insns > 1);
1806             max_insns /= 2;
1807             goto tb_overflow;
1808 
1809         default:
1810             g_assert_not_reached();
1811         }
1812     }
1813     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
1814     if (unlikely(search_size < 0)) {
1815         goto buffer_overflow;
1816     }
1817     tb->tc.size = gen_code_size;
1818 
1819 #ifdef CONFIG_PROFILER
1820     atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
1821     atomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
1822     atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
1823     atomic_set(&prof->search_out_len, prof->search_out_len + search_size);
1824 #endif
1825 
1826 #ifdef DEBUG_DISAS
1827     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1828         qemu_log_in_addr_range(tb->pc)) {
1829         FILE *logfile = qemu_log_lock();
1830         qemu_log("OUT: [size=%d]\n", gen_code_size);
1831         if (tcg_ctx->data_gen_ptr) {
1832             size_t code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
1833             size_t data_size = gen_code_size - code_size;
1834             size_t i;
1835 
1836             log_disas(tb->tc.ptr, code_size);
1837 
1838             for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1839                 if (sizeof(tcg_target_ulong) == 8) {
1840                     qemu_log("0x%08" PRIxPTR ":  .quad  0x%016" PRIx64 "\n",
1841                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1842                              *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
1843                 } else {
1844                     qemu_log("0x%08" PRIxPTR ":  .long  0x%08x\n",
1845                              (uintptr_t)tcg_ctx->data_gen_ptr + i,
1846                              *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
1847                 }
1848             }
1849         } else {
1850             log_disas(tb->tc.ptr, gen_code_size);
1851         }
1852         qemu_log("\n");
1853         qemu_log_flush();
1854         qemu_log_unlock(logfile);
1855     }
1856 #endif
1857 
1858     atomic_set(&tcg_ctx->code_gen_ptr, (void *)
1859         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1860                  CODE_GEN_ALIGN));
1861 
1862     /* init jump list */
1863     qemu_spin_init(&tb->jmp_lock);
1864     tb->jmp_list_head = (uintptr_t)NULL;
1865     tb->jmp_list_next[0] = (uintptr_t)NULL;
1866     tb->jmp_list_next[1] = (uintptr_t)NULL;
1867     tb->jmp_dest[0] = (uintptr_t)NULL;
1868     tb->jmp_dest[1] = (uintptr_t)NULL;
1869 
1870     /* init original jump addresses which have been set during tcg_gen_code() */
1871     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1872         tb_reset_jump(tb, 0);
1873     }
1874     if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1875         tb_reset_jump(tb, 1);
1876     }
1877 
1878     /* check next page if needed */
1879     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1880     phys_page2 = -1;
1881     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1882         phys_page2 = get_page_addr_code(env, virt_page2);
1883     }
1884     /*
1885      * No explicit memory barrier is required -- tb_link_page() makes the
1886      * TB visible in a consistent state.
1887      */
1888     existing_tb = tb_link_page(tb, phys_pc, phys_page2);
1889     /* if the TB already exists, discard what we just translated */
1890     if (unlikely(existing_tb != tb)) {
1891         uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
1892 
1893         orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize);
1894         atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned);
1895         return existing_tb;
1896     }
1897     tcg_tb_insert(tb);
1898     return tb;
1899 }
1900 
1901 /*
1902  * @p must be non-NULL.
1903  * user-mode: call with mmap_lock held.
1904  * !user-mode: call with all @pages locked.
1905  */
1906 static void
tb_invalidate_phys_page_range__locked(struct page_collection * pages,PageDesc * p,tb_page_addr_t start,tb_page_addr_t end,uintptr_t retaddr)1907 tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1908                                       PageDesc *p, tb_page_addr_t start,
1909                                       tb_page_addr_t end,
1910                                       uintptr_t retaddr)
1911 {
1912     TranslationBlock *tb;
1913     tb_page_addr_t tb_start, tb_end;
1914     int n;
1915 #ifdef TARGET_HAS_PRECISE_SMC
1916     CPUState *cpu = current_cpu;
1917     CPUArchState *env = NULL;
1918     bool current_tb_not_found = retaddr != 0;
1919     bool current_tb_modified = false;
1920     TranslationBlock *current_tb = NULL;
1921     target_ulong current_pc = 0;
1922     target_ulong current_cs_base = 0;
1923     uint32_t current_flags = 0;
1924 #endif /* TARGET_HAS_PRECISE_SMC */
1925 
1926     assert_page_locked(p);
1927 
1928 #if defined(TARGET_HAS_PRECISE_SMC)
1929     if (cpu != NULL) {
1930         env = cpu->env_ptr;
1931     }
1932 #endif
1933 
1934     /* we remove all the TBs in the range [start, end[ */
1935     /* XXX: see if in some cases it could be faster to invalidate all
1936        the code */
1937     PAGE_FOR_EACH_TB(p, tb, n) {
1938         assert_page_locked(p);
1939         /* NOTE: this is subtle as a TB may span two physical pages */
1940         if (n == 0) {
1941             /* NOTE: tb_end may be after the end of the page, but
1942                it is not a problem */
1943             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1944             tb_end = tb_start + tb->size;
1945         } else {
1946             tb_start = tb->page_addr[1];
1947             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1948         }
1949         if (!(tb_end <= start || tb_start >= end)) {
1950 #ifdef TARGET_HAS_PRECISE_SMC
1951             if (current_tb_not_found) {
1952                 current_tb_not_found = false;
1953                 /* now we have a real cpu fault */
1954                 current_tb = tcg_tb_lookup(retaddr);
1955             }
1956             if (current_tb == tb &&
1957                 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1958                 /*
1959                  * If we are modifying the current TB, we must stop
1960                  * its execution. We could be more precise by checking
1961                  * that the modification is after the current PC, but it
1962                  * would require a specialized function to partially
1963                  * restore the CPU state.
1964                  */
1965                 current_tb_modified = true;
1966                 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
1967                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1968                                      &current_flags);
1969             }
1970 #endif /* TARGET_HAS_PRECISE_SMC */
1971             tb_phys_invalidate__locked(tb);
1972         }
1973     }
1974 #if !defined(CONFIG_USER_ONLY)
1975     /* if no code remaining, no need to continue to use slow writes */
1976     if (!p->first_tb) {
1977         invalidate_page_bitmap(p);
1978         tlb_unprotect_code(start);
1979     }
1980 #endif
1981 #ifdef TARGET_HAS_PRECISE_SMC
1982     if (current_tb_modified) {
1983         page_collection_unlock(pages);
1984         /* Force execution of one insn next time.  */
1985         cpu->cflags_next_tb = 1 | curr_cflags();
1986         mmap_unlock();
1987         cpu_loop_exit_noexc(cpu);
1988     }
1989 #endif
1990 }
1991 
1992 /*
1993  * Invalidate all TBs which intersect with the target physical address range
1994  * [start;end[. NOTE: start and end must refer to the *same* physical page.
1995  * 'is_cpu_write_access' should be true if called from a real cpu write
1996  * access: the virtual CPU will exit the current TB if code is modified inside
1997  * this TB.
1998  *
1999  * Called with mmap_lock held for user-mode emulation
2000  */
tb_invalidate_phys_page_range(tb_page_addr_t start,tb_page_addr_t end)2001 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
2002 {
2003     struct page_collection *pages;
2004     PageDesc *p;
2005 
2006     assert_memory_lock();
2007 
2008     p = page_find(start >> TARGET_PAGE_BITS);
2009     if (p == NULL) {
2010         return;
2011     }
2012     pages = page_collection_lock(start, end);
2013     tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
2014     page_collection_unlock(pages);
2015 }
2016 
2017 /*
2018  * Invalidate all TBs which intersect with the target physical address range
2019  * [start;end[. NOTE: start and end may refer to *different* physical pages.
2020  * 'is_cpu_write_access' should be true if called from a real cpu write
2021  * access: the virtual CPU will exit the current TB if code is modified inside
2022  * this TB.
2023  *
2024  * Called with mmap_lock held for user-mode emulation.
2025  */
2026 #ifdef CONFIG_SOFTMMU
tb_invalidate_phys_range(ram_addr_t start,ram_addr_t end)2027 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
2028 #else
2029 void tb_invalidate_phys_range(target_ulong start, target_ulong end)
2030 #endif
2031 {
2032     struct page_collection *pages;
2033     tb_page_addr_t next;
2034 
2035     assert_memory_lock();
2036 
2037     pages = page_collection_lock(start, end);
2038     for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2039          start < end;
2040          start = next, next += TARGET_PAGE_SIZE) {
2041         PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
2042         tb_page_addr_t bound = MIN(next, end);
2043 
2044         if (pd == NULL) {
2045             continue;
2046         }
2047         tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
2048     }
2049     page_collection_unlock(pages);
2050 }
2051 
2052 #ifdef CONFIG_SOFTMMU
2053 /* len must be <= 8 and start must be a multiple of len.
2054  * Called via softmmu_template.h when code areas are written to with
2055  * iothread mutex not held.
2056  *
2057  * Call with all @pages in the range [@start, @start + len[ locked.
2058  */
tb_invalidate_phys_page_fast(struct page_collection * pages,tb_page_addr_t start,int len,uintptr_t retaddr)2059 void tb_invalidate_phys_page_fast(struct page_collection *pages,
2060                                   tb_page_addr_t start, int len,
2061                                   uintptr_t retaddr)
2062 {
2063     PageDesc *p;
2064 
2065     assert_memory_lock();
2066 
2067     p = page_find(start >> TARGET_PAGE_BITS);
2068     if (!p) {
2069         return;
2070     }
2071 
2072     assert_page_locked(p);
2073     if (!p->code_bitmap &&
2074         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
2075         build_page_bitmap(p);
2076     }
2077     if (p->code_bitmap) {
2078         unsigned int nr;
2079         unsigned long b;
2080 
2081         nr = start & ~TARGET_PAGE_MASK;
2082         b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
2083         if (b & ((1 << len) - 1)) {
2084             goto do_invalidate;
2085         }
2086     } else {
2087     do_invalidate:
2088         tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
2089                                               retaddr);
2090     }
2091 }
2092 #else
2093 /* Called with mmap_lock held. If pc is not 0 then it indicates the
2094  * host PC of the faulting store instruction that caused this invalidate.
2095  * Returns true if the caller needs to abort execution of the current
2096  * TB (because it was modified by this store and the guest CPU has
2097  * precise-SMC semantics).
2098  */
tb_invalidate_phys_page(tb_page_addr_t addr,uintptr_t pc)2099 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
2100 {
2101     TranslationBlock *tb;
2102     PageDesc *p;
2103     int n;
2104 #ifdef TARGET_HAS_PRECISE_SMC
2105     TranslationBlock *current_tb = NULL;
2106     CPUState *cpu = current_cpu;
2107     CPUArchState *env = NULL;
2108     int current_tb_modified = 0;
2109     target_ulong current_pc = 0;
2110     target_ulong current_cs_base = 0;
2111     uint32_t current_flags = 0;
2112 #endif
2113 
2114     assert_memory_lock();
2115 
2116     addr &= TARGET_PAGE_MASK;
2117     p = page_find(addr >> TARGET_PAGE_BITS);
2118     if (!p) {
2119         return false;
2120     }
2121 
2122 #ifdef TARGET_HAS_PRECISE_SMC
2123     if (p->first_tb && pc != 0) {
2124         current_tb = tcg_tb_lookup(pc);
2125     }
2126     if (cpu != NULL) {
2127         env = cpu->env_ptr;
2128     }
2129 #endif
2130     assert_page_locked(p);
2131     PAGE_FOR_EACH_TB(p, tb, n) {
2132 #ifdef TARGET_HAS_PRECISE_SMC
2133         if (current_tb == tb &&
2134             (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
2135                 /* If we are modifying the current TB, we must stop
2136                    its execution. We could be more precise by checking
2137                    that the modification is after the current PC, but it
2138                    would require a specialized function to partially
2139                    restore the CPU state */
2140 
2141             current_tb_modified = 1;
2142             cpu_restore_state_from_tb(cpu, current_tb, pc, true);
2143             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
2144                                  &current_flags);
2145         }
2146 #endif /* TARGET_HAS_PRECISE_SMC */
2147         tb_phys_invalidate(tb, addr);
2148     }
2149     p->first_tb = (uintptr_t)NULL;
2150 #ifdef TARGET_HAS_PRECISE_SMC
2151     if (current_tb_modified) {
2152         /* Force execution of one insn next time.  */
2153         cpu->cflags_next_tb = 1 | curr_cflags();
2154         return true;
2155     }
2156 #endif
2157 
2158     return false;
2159 }
2160 #endif
2161 
2162 /* user-mode: call with mmap_lock held */
tb_check_watchpoint(CPUState * cpu,uintptr_t retaddr)2163 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
2164 {
2165     TranslationBlock *tb;
2166 
2167     assert_memory_lock();
2168 
2169     tb = tcg_tb_lookup(retaddr);
2170     if (tb) {
2171         /* We can use retranslation to find the PC.  */
2172         cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2173         tb_phys_invalidate(tb, -1);
2174     } else {
2175         /* The exception probably happened in a helper.  The CPU state should
2176            have been saved before calling it. Fetch the PC from there.  */
2177         CPUArchState *env = cpu->env_ptr;
2178         target_ulong pc, cs_base;
2179         tb_page_addr_t addr;
2180         uint32_t flags;
2181 
2182         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
2183         addr = get_page_addr_code(env, pc);
2184         if (addr != -1) {
2185             tb_invalidate_phys_range(addr, addr + 1);
2186         }
2187     }
2188 }
2189 
2190 #ifndef CONFIG_USER_ONLY
2191 /* in deterministic execution mode, instructions doing device I/Os
2192  * must be at the end of the TB.
2193  *
2194  * Called by softmmu_template.h, with iothread mutex not held.
2195  */
cpu_io_recompile(CPUState * cpu,uintptr_t retaddr)2196 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
2197 {
2198 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
2199     CPUArchState *env = cpu->env_ptr;
2200 #endif
2201     TranslationBlock *tb;
2202     uint32_t n;
2203 
2204     tb = tcg_tb_lookup(retaddr);
2205     if (!tb) {
2206         cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
2207                   (void *)retaddr);
2208     }
2209     cpu_restore_state_from_tb(cpu, tb, retaddr, true);
2210 
2211     /* On MIPS and SH, delay slot instructions can only be restarted if
2212        they were already the first instruction in the TB.  If this is not
2213        the first instruction in a TB then re-execute the preceding
2214        branch.  */
2215     n = 1;
2216 #if defined(TARGET_MIPS)
2217     if ((env->hflags & MIPS_HFLAG_BMASK) != 0
2218         && PC_ADDR(env) != tb->pc) {
2219 #ifdef TARGET_CHERI
2220         env->active_tc.PCC._cr_cursor -=
2221 #else
2222         env->active_tc.PC -=
2223 #endif
2224             (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
2225         cpu_neg(cpu)->icount_decr.u16.low++;
2226         env->hflags &= ~MIPS_HFLAG_BMASK;
2227         n = 2;
2228     }
2229 #elif defined(TARGET_SH4)
2230     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
2231         && env->pc != tb->pc) {
2232         env->pc -= 2;
2233         cpu_neg(cpu)->icount_decr.u16.low++;
2234         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
2235         n = 2;
2236     }
2237 #endif
2238 
2239     /* Generate a new TB executing the I/O insn.  */
2240     cpu->cflags_next_tb = curr_cflags() | CF_LAST_IO | n;
2241 
2242     if (tb_cflags(tb) & CF_NOCACHE) {
2243         if (tb->orig_tb) {
2244             /* Invalidate original TB if this TB was generated in
2245              * cpu_exec_nocache() */
2246             tb_phys_invalidate(tb->orig_tb, -1);
2247         }
2248         tcg_tb_remove(tb);
2249     }
2250 
2251     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2252      * the first in the TB) then we end up generating a whole new TB and
2253      *  repeating the fault, which is horribly inefficient.
2254      *  Better would be to execute just this insn uncached, or generate a
2255      *  second new TB.
2256      */
2257     cpu_loop_exit_noexc(cpu);
2258 }
2259 
tb_jmp_cache_clear_page(CPUState * cpu,target_ulong page_addr)2260 static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
2261 {
2262     unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
2263 
2264     for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
2265         atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
2266     }
2267 }
2268 
tb_flush_jmp_cache(CPUState * cpu,target_ulong addr)2269 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
2270 {
2271     /* Discard jump cache entries for any tb which might potentially
2272        overlap the flushed page.  */
2273     tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
2274     tb_jmp_cache_clear_page(cpu, addr);
2275 }
2276 
print_qht_statistics(struct qht_stats hst)2277 static void print_qht_statistics(struct qht_stats hst)
2278 {
2279     uint32_t hgram_opts;
2280     size_t hgram_bins;
2281     char *hgram;
2282 
2283     if (!hst.head_buckets) {
2284         return;
2285     }
2286     qemu_printf("TB hash buckets     %zu/%zu (%0.2f%% head buckets used)\n",
2287                 hst.used_head_buckets, hst.head_buckets,
2288                 (double)hst.used_head_buckets / hst.head_buckets * 100);
2289 
2290     hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
2291     hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
2292     if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
2293         hgram_opts |= QDIST_PR_NODECIMAL;
2294     }
2295     hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
2296     qemu_printf("TB hash occupancy   %0.2f%% avg chain occ. Histogram: %s\n",
2297                 qdist_avg(&hst.occupancy) * 100, hgram);
2298     g_free(hgram);
2299 
2300     hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
2301     hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
2302     if (hgram_bins > 10) {
2303         hgram_bins = 10;
2304     } else {
2305         hgram_bins = 0;
2306         hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
2307     }
2308     hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
2309     qemu_printf("TB hash avg chain   %0.3f buckets. Histogram: %s\n",
2310                 qdist_avg(&hst.chain), hgram);
2311     g_free(hgram);
2312 }
2313 
2314 struct tb_tree_stats {
2315     size_t nb_tbs;
2316     size_t host_size;
2317     size_t target_size;
2318     size_t max_target_size;
2319     size_t direct_jmp_count;
2320     size_t direct_jmp2_count;
2321     size_t cross_page;
2322 };
2323 
tb_tree_stats_iter(gpointer key,gpointer value,gpointer data)2324 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
2325 {
2326     const TranslationBlock *tb = value;
2327     struct tb_tree_stats *tst = data;
2328 
2329     tst->nb_tbs++;
2330     tst->host_size += tb->tc.size;
2331     tst->target_size += tb->size;
2332     if (tb->size > tst->max_target_size) {
2333         tst->max_target_size = tb->size;
2334     }
2335     if (tb->page_addr[1] != -1) {
2336         tst->cross_page++;
2337     }
2338     if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
2339         tst->direct_jmp_count++;
2340         if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
2341             tst->direct_jmp2_count++;
2342         }
2343     }
2344     return false;
2345 }
2346 
dump_exec_info(void)2347 void dump_exec_info(void)
2348 {
2349     struct tb_tree_stats tst = {};
2350     struct qht_stats hst;
2351     size_t nb_tbs, flush_full, flush_part, flush_elide;
2352 
2353     tcg_tb_foreach(tb_tree_stats_iter, &tst);
2354     nb_tbs = tst.nb_tbs;
2355     /* XXX: avoid using doubles ? */
2356     qemu_printf("Translation buffer state:\n");
2357     /*
2358      * Report total code size including the padding and TB structs;
2359      * otherwise users might think "-tb-size" is not honoured.
2360      * For avg host size we use the precise numbers from tb_tree_stats though.
2361      */
2362     qemu_printf("gen code size       %zu/%zu\n",
2363                 tcg_code_size(), tcg_code_capacity());
2364     qemu_printf("TB count            %zu\n", nb_tbs);
2365     qemu_printf("TB avg target size  %zu max=%zu bytes\n",
2366                 nb_tbs ? tst.target_size / nb_tbs : 0,
2367                 tst.max_target_size);
2368     qemu_printf("TB avg host size    %zu bytes (expansion ratio: %0.1f)\n",
2369                 nb_tbs ? tst.host_size / nb_tbs : 0,
2370                 tst.target_size ? (double)tst.host_size / tst.target_size : 0);
2371     qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page,
2372                 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
2373     qemu_printf("direct jump count   %zu (%zu%%) (2 jumps=%zu %zu%%)\n",
2374                 tst.direct_jmp_count,
2375                 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
2376                 tst.direct_jmp2_count,
2377                 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
2378 
2379     qht_statistics_init(&tb_ctx.htable, &hst);
2380     print_qht_statistics(hst);
2381     qht_statistics_destroy(&hst);
2382 
2383     qemu_printf("\nStatistics:\n");
2384     qemu_printf("TB flush count      %u\n",
2385                 atomic_read(&tb_ctx.tb_flush_count));
2386     qemu_printf("TB invalidate count %zu\n",
2387                 tcg_tb_phys_invalidate_count());
2388 
2389     tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
2390     qemu_printf("TLB full flushes    %zu\n", flush_full);
2391     qemu_printf("TLB partial flushes %zu\n", flush_part);
2392     qemu_printf("TLB elided flushes  %zu\n", flush_elide);
2393     tcg_dump_info();
2394 }
2395 
dump_opcount_info(void)2396 void dump_opcount_info(void)
2397 {
2398     tcg_dump_op_count();
2399 }
2400 
2401 #else /* CONFIG_USER_ONLY */
2402 
cpu_interrupt(CPUState * cpu,int mask)2403 void cpu_interrupt(CPUState *cpu, int mask)
2404 {
2405     g_assert(qemu_mutex_iothread_locked());
2406     cpu->interrupt_request |= mask;
2407     atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
2408 }
2409 
2410 /*
2411  * Walks guest process memory "regions" one by one
2412  * and calls callback function 'fn' for each region.
2413  */
2414 struct walk_memory_regions_data {
2415     walk_memory_regions_fn fn;
2416     void *priv;
2417     target_ulong start;
2418     int prot;
2419 };
2420 
walk_memory_regions_end(struct walk_memory_regions_data * data,target_ulong end,int new_prot)2421 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2422                                    target_ulong end, int new_prot)
2423 {
2424     if (data->start != -1u) {
2425         int rc = data->fn(data->priv, data->start, end, data->prot);
2426         if (rc != 0) {
2427             return rc;
2428         }
2429     }
2430 
2431     data->start = (new_prot ? end : -1u);
2432     data->prot = new_prot;
2433 
2434     return 0;
2435 }
2436 
walk_memory_regions_1(struct walk_memory_regions_data * data,target_ulong base,int level,void ** lp)2437 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2438                                  target_ulong base, int level, void **lp)
2439 {
2440     target_ulong pa;
2441     int i, rc;
2442 
2443     if (*lp == NULL) {
2444         return walk_memory_regions_end(data, base, 0);
2445     }
2446 
2447     if (level == 0) {
2448         PageDesc *pd = *lp;
2449 
2450         for (i = 0; i < V_L2_SIZE; ++i) {
2451             int prot = pd[i].flags;
2452 
2453             pa = base | (i << TARGET_PAGE_BITS);
2454             if (prot != data->prot) {
2455                 rc = walk_memory_regions_end(data, pa, prot);
2456                 if (rc != 0) {
2457                     return rc;
2458                 }
2459             }
2460         }
2461     } else {
2462         void **pp = *lp;
2463 
2464         for (i = 0; i < V_L2_SIZE; ++i) {
2465             pa = base | ((target_ulong)i <<
2466                 (TARGET_PAGE_BITS + V_L2_BITS * level));
2467             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2468             if (rc != 0) {
2469                 return rc;
2470             }
2471         }
2472     }
2473 
2474     return 0;
2475 }
2476 
walk_memory_regions(void * priv,walk_memory_regions_fn fn)2477 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2478 {
2479     struct walk_memory_regions_data data;
2480     uintptr_t i, l1_sz = v_l1_size;
2481 
2482     data.fn = fn;
2483     data.priv = priv;
2484     data.start = -1u;
2485     data.prot = 0;
2486 
2487     for (i = 0; i < l1_sz; i++) {
2488         target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2489         int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
2490         if (rc != 0) {
2491             return rc;
2492         }
2493     }
2494 
2495     return walk_memory_regions_end(&data, 0, 0);
2496 }
2497 
dump_region(void * priv,target_ulong start,target_ulong end,unsigned long prot)2498 static int dump_region(void *priv, target_ulong start,
2499     target_ulong end, unsigned long prot)
2500 {
2501     FILE *f = (FILE *)priv;
2502 
2503     (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2504         " "TARGET_FMT_lx" %c%c%c\n",
2505         start, end, end - start,
2506         ((prot & PAGE_READ) ? 'r' : '-'),
2507         ((prot & PAGE_WRITE) ? 'w' : '-'),
2508         ((prot & PAGE_EXEC) ? 'x' : '-'));
2509 
2510     return 0;
2511 }
2512 
2513 /* dump memory mappings */
page_dump(FILE * f)2514 void page_dump(FILE *f)
2515 {
2516     const int length = sizeof(target_ulong) * 2;
2517     (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2518             length, "start", length, "end", length, "size", "prot");
2519     walk_memory_regions(f, dump_region);
2520 }
2521 
page_get_flags(target_ulong address)2522 int page_get_flags(target_ulong address)
2523 {
2524     PageDesc *p;
2525 
2526     p = page_find(address >> TARGET_PAGE_BITS);
2527     if (!p) {
2528         return 0;
2529     }
2530     return p->flags;
2531 }
2532 
2533 /* Modify the flags of a page and invalidate the code if necessary.
2534    The flag PAGE_WRITE_ORG is positioned automatically depending
2535    on PAGE_WRITE.  The mmap_lock should already be held.  */
page_set_flags(target_ulong start,target_ulong end,int flags)2536 void page_set_flags(target_ulong start, target_ulong end, int flags)
2537 {
2538     target_ulong addr, len;
2539 
2540     /* This function should never be called with addresses outside the
2541        guest address space.  If this assert fires, it probably indicates
2542        a missing call to h2g_valid.  */
2543 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2544     assert(end <= ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2545 #endif
2546     assert(start < end);
2547     assert_memory_lock();
2548 
2549     start = start & TARGET_PAGE_MASK;
2550     end = TARGET_PAGE_ALIGN(end);
2551 
2552     if (flags & PAGE_WRITE) {
2553         flags |= PAGE_WRITE_ORG;
2554     }
2555 
2556     for (addr = start, len = end - start;
2557          len != 0;
2558          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2559         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2560 
2561         /* If the write protection bit is set, then we invalidate
2562            the code inside.  */
2563         if (!(p->flags & PAGE_WRITE) &&
2564             (flags & PAGE_WRITE) &&
2565             p->first_tb) {
2566             tb_invalidate_phys_page(addr, 0);
2567         }
2568         p->flags = flags;
2569     }
2570 }
2571 
page_check_range(target_ulong start,target_ulong len,int flags)2572 int page_check_range(target_ulong start, target_ulong len, int flags)
2573 {
2574     PageDesc *p;
2575     target_ulong end;
2576     target_ulong addr;
2577 
2578     /* This function should never be called with addresses outside the
2579        guest address space.  If this assert fires, it probably indicates
2580        a missing call to h2g_valid.  */
2581 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2582     assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2583 #endif
2584 
2585     if (len == 0) {
2586         return 0;
2587     }
2588     if (start + len - 1 < start) {
2589         /* We've wrapped around.  */
2590         return -1;
2591     }
2592 
2593     /* must do before we loose bits in the next step */
2594     end = TARGET_PAGE_ALIGN(start + len);
2595     start = start & TARGET_PAGE_MASK;
2596 
2597     for (addr = start, len = end - start;
2598          len != 0;
2599          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2600         p = page_find(addr >> TARGET_PAGE_BITS);
2601         if (!p) {
2602             return -1;
2603         }
2604         if (!(p->flags & PAGE_VALID)) {
2605             return -1;
2606         }
2607 
2608         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2609             return -1;
2610         }
2611         if (flags & PAGE_WRITE) {
2612             if (!(p->flags & PAGE_WRITE_ORG)) {
2613                 return -1;
2614             }
2615             /* unprotect the page if it was put read-only because it
2616                contains translated code */
2617             if (!(p->flags & PAGE_WRITE)) {
2618                 if (!page_unprotect(addr, 0)) {
2619                     return -1;
2620                 }
2621             }
2622         }
2623     }
2624     return 0;
2625 }
2626 
2627 /* called from signal handler: invalidate the code and unprotect the
2628  * page. Return 0 if the fault was not handled, 1 if it was handled,
2629  * and 2 if it was handled but the caller must cause the TB to be
2630  * immediately exited. (We can only return 2 if the 'pc' argument is
2631  * non-zero.)
2632  */
page_unprotect(target_ulong address,uintptr_t pc)2633 int page_unprotect(target_ulong address, uintptr_t pc)
2634 {
2635     unsigned int prot;
2636     bool current_tb_invalidated;
2637     PageDesc *p;
2638     target_ulong host_start, host_end, addr;
2639 
2640     /* Technically this isn't safe inside a signal handler.  However we
2641        know this only ever happens in a synchronous SEGV handler, so in
2642        practice it seems to be ok.  */
2643     mmap_lock();
2644 
2645     p = page_find(address >> TARGET_PAGE_BITS);
2646     if (!p) {
2647         mmap_unlock();
2648         return 0;
2649     }
2650 
2651     /* if the page was really writable, then we change its
2652        protection back to writable */
2653     if (p->flags & PAGE_WRITE_ORG) {
2654         current_tb_invalidated = false;
2655         if (p->flags & PAGE_WRITE) {
2656             /* If the page is actually marked WRITE then assume this is because
2657              * this thread raced with another one which got here first and
2658              * set the page to PAGE_WRITE and did the TB invalidate for us.
2659              */
2660 #ifdef TARGET_HAS_PRECISE_SMC
2661             TranslationBlock *current_tb = tcg_tb_lookup(pc);
2662             if (current_tb) {
2663                 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
2664             }
2665 #endif
2666         } else {
2667             host_start = address & qemu_host_page_mask;
2668             host_end = host_start + qemu_host_page_size;
2669 
2670             prot = 0;
2671             for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) {
2672                 p = page_find(addr >> TARGET_PAGE_BITS);
2673                 p->flags |= PAGE_WRITE;
2674                 prot |= p->flags;
2675 
2676                 /* and since the content will be modified, we must invalidate
2677                    the corresponding translated code. */
2678                 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
2679 #ifdef CONFIG_USER_ONLY
2680                 if (DEBUG_TB_CHECK_GATE) {
2681                     tb_invalidate_check(addr);
2682                 }
2683 #endif
2684             }
2685             mprotect((void *)g2h(host_start), qemu_host_page_size,
2686                      prot & PAGE_BITS);
2687         }
2688         mmap_unlock();
2689         /* If current TB was invalidated return to main loop */
2690         return current_tb_invalidated ? 2 : 1;
2691     }
2692     mmap_unlock();
2693     return 0;
2694 }
2695 #endif /* CONFIG_USER_ONLY */
2696 
2697 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
tcg_flush_softmmu_tlb(CPUState * cs)2698 void tcg_flush_softmmu_tlb(CPUState *cs)
2699 {
2700 #ifdef CONFIG_SOFTMMU
2701     tlb_flush(cs);
2702 #endif
2703 }
2704