1 /*
2 * Internal execution defines for qemu (target specific)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * SPDX-License-Identifier: LGPL-2.1-or-later
7 */
8
9 #ifndef ACCEL_TCG_INTERNAL_TARGET_H
10 #define ACCEL_TCG_INTERNAL_TARGET_H
11
12 #include "exec/exec-all.h"
13 #include "exec/translate-all.h"
14
15 /*
16 * Access to the various translations structures need to be serialised
17 * via locks for consistency. In user-mode emulation access to the
18 * memory related structures are protected with mmap_lock.
19 * In !user-mode we use per-page locks.
20 */
21 #ifdef CONFIG_USER_ONLY
22 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
23 #else
24 #define assert_memory_lock()
25 #endif
26
27 #if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
28 void assert_no_pages_locked(void);
29 #else
assert_no_pages_locked(void)30 static inline void assert_no_pages_locked(void) { }
31 #endif
32
33 #ifdef CONFIG_USER_ONLY
page_table_config_init(void)34 static inline void page_table_config_init(void) { }
35 #else
36 void page_table_config_init(void);
37 #endif
38
39 #ifdef CONFIG_USER_ONLY
40 /*
41 * For user-only, page_protect sets the page read-only.
42 * Since most execution is already on read-only pages, and we'd need to
43 * account for other TBs on the same page, defer undoing any page protection
44 * until we receive the write fault.
45 */
tb_lock_page0(tb_page_addr_t p0)46 static inline void tb_lock_page0(tb_page_addr_t p0)
47 {
48 page_protect(p0);
49 }
50
tb_lock_page1(tb_page_addr_t p0,tb_page_addr_t p1)51 static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
52 {
53 page_protect(p1);
54 }
55
tb_unlock_page1(tb_page_addr_t p0,tb_page_addr_t p1)56 static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
tb_unlock_pages(TranslationBlock * tb)57 static inline void tb_unlock_pages(TranslationBlock *tb) { }
58 #else
59 void tb_lock_page0(tb_page_addr_t);
60 void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
61 void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
62 void tb_unlock_pages(TranslationBlock *);
63 #endif
64
65 #ifdef CONFIG_SOFTMMU
66 void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
67 unsigned size,
68 uintptr_t retaddr);
69 G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
70 #endif /* CONFIG_SOFTMMU */
71
72 bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
73
74 /* Return the current PC from CPU, which may be cached in TB. */
log_pc(CPUState * cpu,const TranslationBlock * tb)75 static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
76 {
77 if (tb_cflags(tb) & CF_PCREL) {
78 return cpu->cc->get_pc(cpu);
79 } else {
80 return tb->pc;
81 }
82 }
83
84 /**
85 * tcg_req_mo:
86 * @type: TCGBar
87 *
88 * Filter @type to the barrier that is required for the guest
89 * memory ordering vs the host memory ordering. A non-zero
90 * result indicates that some barrier is required.
91 *
92 * If TCG_GUEST_DEFAULT_MO is not defined, assume that the
93 * guest requires strict ordering.
94 *
95 * This is a macro so that it's constant even without optimization.
96 */
97 #ifdef TCG_GUEST_DEFAULT_MO
98 # define tcg_req_mo(type) \
99 ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
100 #else
101 # define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
102 #endif
103
104 /**
105 * cpu_req_mo:
106 * @type: TCGBar
107 *
108 * If tcg_req_mo indicates a barrier for @type is required
109 * for the guest memory model, issue a host memory barrier.
110 */
111 #define cpu_req_mo(type) \
112 do { \
113 if (tcg_req_mo(type)) { \
114 smp_mb(); \
115 } \
116 } while (0)
117
118 #endif /* ACCEL_TCG_INTERNAL_H */
119