xref: /qemu/include/exec/cpu-all.h (revision 16aa8eaa)
1 /*
2  * defines common to all virtual CPUs
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifndef CPU_ALL_H
20 #define CPU_ALL_H
21 
22 #include "exec/cpu-common.h"
23 #include "exec/memory.h"
24 #include "exec/tswap.h"
25 #include "hw/core/cpu.h"
26 
27 /* some important defines:
28  *
29  * HOST_BIG_ENDIAN : whether the host cpu is big endian and
30  * otherwise little endian.
31  *
32  * TARGET_BIG_ENDIAN : same for the target cpu
33  */
34 
35 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
36 #define BSWAP_NEEDED
37 #endif
38 
39 /* Target-endianness CPU memory access functions. These fit into the
40  * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
41  */
42 #if TARGET_BIG_ENDIAN
43 #define lduw_p(p) lduw_be_p(p)
44 #define ldsw_p(p) ldsw_be_p(p)
45 #define ldl_p(p) ldl_be_p(p)
46 #define ldq_p(p) ldq_be_p(p)
47 #define stw_p(p, v) stw_be_p(p, v)
48 #define stl_p(p, v) stl_be_p(p, v)
49 #define stq_p(p, v) stq_be_p(p, v)
50 #define ldn_p(p, sz) ldn_be_p(p, sz)
51 #define stn_p(p, sz, v) stn_be_p(p, sz, v)
52 #else
53 #define lduw_p(p) lduw_le_p(p)
54 #define ldsw_p(p) ldsw_le_p(p)
55 #define ldl_p(p) ldl_le_p(p)
56 #define ldq_p(p) ldq_le_p(p)
57 #define stw_p(p, v) stw_le_p(p, v)
58 #define stl_p(p, v) stl_le_p(p, v)
59 #define stq_p(p, v) stq_le_p(p, v)
60 #define ldn_p(p, sz) ldn_le_p(p, sz)
61 #define stn_p(p, sz, v) stn_le_p(p, sz, v)
62 #endif
63 
64 /* MMU memory access macros */
65 
66 #if defined(CONFIG_USER_ONLY)
67 #include "exec/user/abitypes.h"
68 
69 /*
70  * If non-zero, the guest virtual address space is a contiguous subset
71  * of the host virtual address space, i.e. '-R reserved_va' is in effect
72  * either from the command-line or by default.  The value is the last
73  * byte of the guest address space e.g. UINT32_MAX.
74  *
75  * If zero, the host and guest virtual address spaces are intermingled.
76  */
77 extern unsigned long reserved_va;
78 
79 /*
80  * Limit the guest addresses as best we can.
81  *
82  * When not using -R reserved_va, we cannot really limit the guest
83  * to less address space than the host.  For 32-bit guests, this
84  * acts as a sanity check that we're not giving the guest an address
85  * that it cannot even represent.  For 64-bit guests... the address
86  * might not be what the real kernel would give, but it is at least
87  * representable in the guest.
88  *
89  * TODO: Improve address allocation to avoid this problem, and to
90  * avoid setting bits at the top of guest addresses that might need
91  * to be used for tags.
92  */
93 #define GUEST_ADDR_MAX_                                                 \
94     ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ?  \
95      UINT32_MAX : ~0ul)
96 #define GUEST_ADDR_MAX    (reserved_va ? : GUEST_ADDR_MAX_)
97 
98 #else
99 
100 #include "exec/hwaddr.h"
101 
102 #define SUFFIX
103 #define ARG1         as
104 #define ARG1_DECL    AddressSpace *as
105 #define TARGET_ENDIANNESS
106 #include "exec/memory_ldst.h.inc"
107 
108 #define SUFFIX       _cached_slow
109 #define ARG1         cache
110 #define ARG1_DECL    MemoryRegionCache *cache
111 #define TARGET_ENDIANNESS
112 #include "exec/memory_ldst.h.inc"
113 
stl_phys_notdirty(AddressSpace * as,hwaddr addr,uint32_t val)114 static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
115 {
116     address_space_stl_notdirty(as, addr, val,
117                                MEMTXATTRS_UNSPECIFIED, NULL);
118 }
119 
120 #define SUFFIX
121 #define ARG1         as
122 #define ARG1_DECL    AddressSpace *as
123 #define TARGET_ENDIANNESS
124 #include "exec/memory_ldst_phys.h.inc"
125 
126 /* Inline fast path for direct RAM access.  */
127 #define ENDIANNESS
128 #include "exec/memory_ldst_cached.h.inc"
129 
130 #define SUFFIX       _cached
131 #define ARG1         cache
132 #define ARG1_DECL    MemoryRegionCache *cache
133 #define TARGET_ENDIANNESS
134 #include "exec/memory_ldst_phys.h.inc"
135 #endif
136 
137 /* page related stuff */
138 
139 #ifdef TARGET_PAGE_BITS_VARY
140 # include "exec/page-vary.h"
141 extern const TargetPageBits target_page;
142 #ifdef CONFIG_DEBUG_TCG
143 #define TARGET_PAGE_BITS   ({ assert(target_page.decided); target_page.bits; })
144 #define TARGET_PAGE_MASK   ({ assert(target_page.decided); \
145                               (target_long)target_page.mask; })
146 #else
147 #define TARGET_PAGE_BITS   target_page.bits
148 #define TARGET_PAGE_MASK   ((target_long)target_page.mask)
149 #endif
150 #define TARGET_PAGE_SIZE   (-(int)TARGET_PAGE_MASK)
151 #else
152 #define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
153 #define TARGET_PAGE_SIZE   (1 << TARGET_PAGE_BITS)
154 #define TARGET_PAGE_MASK   ((target_long)-1 << TARGET_PAGE_BITS)
155 #endif
156 
157 #define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
158 
159 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
160 /* FIXME: Code that sets/uses this is broken and needs to go away.  */
161 #define PAGE_RESERVED  0x0100
162 #endif
163 /*
164  * For linux-user, indicates that the page is mapped with the same semantics
165  * in both guest and host.
166  */
167 #define PAGE_PASSTHROUGH 0x0800
168 
169 #if defined(CONFIG_USER_ONLY)
170 void page_dump(FILE *f);
171 
172 typedef int (*walk_memory_regions_fn)(void *, target_ulong,
173                                       target_ulong, unsigned long);
174 int walk_memory_regions(void *, walk_memory_regions_fn);
175 
176 int page_get_flags(target_ulong address);
177 void page_set_flags(target_ulong start, target_ulong last, int flags);
178 void page_reset_target_data(target_ulong start, target_ulong last);
179 
180 /**
181  * page_check_range
182  * @start: first byte of range
183  * @len: length of range
184  * @flags: flags required for each page
185  *
186  * Return true if every page in [@start, @start+@len) has @flags set.
187  * Return false if any page is unmapped.  Thus testing flags == 0 is
188  * equivalent to testing for flags == PAGE_VALID.
189  */
190 bool page_check_range(target_ulong start, target_ulong last, int flags);
191 
192 /**
193  * page_check_range_empty:
194  * @start: first byte of range
195  * @last: last byte of range
196  * Context: holding mmap lock
197  *
198  * Return true if the entire range [@start, @last] is unmapped.
199  * The memory lock must be held so that the caller will can ensure
200  * the result stays true until a new mapping can be installed.
201  */
202 bool page_check_range_empty(target_ulong start, target_ulong last);
203 
204 /**
205  * page_find_range_empty
206  * @min: first byte of search range
207  * @max: last byte of search range
208  * @len: size of the hole required
209  * @align: alignment of the hole required (power of 2)
210  *
211  * If there is a range [x, x+@len) within [@min, @max] such that
212  * x % @align == 0, then return x.  Otherwise return -1.
213  * The memory lock must be held, as the caller will want to ensure
214  * the returned range stays empty until a new mapping can be installed.
215  */
216 target_ulong page_find_range_empty(target_ulong min, target_ulong max,
217                                    target_ulong len, target_ulong align);
218 
219 /**
220  * page_get_target_data(address)
221  * @address: guest virtual address
222  *
223  * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
224  * with the guest page at @address, allocating it if necessary.  The
225  * caller should already have verified that the address is valid.
226  *
227  * The memory will be freed when the guest page is deallocated,
228  * e.g. with the munmap system call.
229  */
230 void *page_get_target_data(target_ulong address)
231     __attribute__((returns_nonnull));
232 #endif
233 
234 CPUArchState *cpu_copy(CPUArchState *env);
235 
236 /* Flags for use in ENV->INTERRUPT_PENDING.
237 
238    The numbers assigned here are non-sequential in order to preserve
239    binary compatibility with the vmstate dump.  Bit 0 (0x0001) was
240    previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
241    the vmstate dump.  */
242 
243 /* External hardware interrupt pending.  This is typically used for
244    interrupts from devices.  */
245 #define CPU_INTERRUPT_HARD        0x0002
246 
247 /* Exit the current TB.  This is typically used when some system-level device
248    makes some change to the memory mapping.  E.g. the a20 line change.  */
249 #define CPU_INTERRUPT_EXITTB      0x0004
250 
251 /* Halt the CPU.  */
252 #define CPU_INTERRUPT_HALT        0x0020
253 
254 /* Debug event pending.  */
255 #define CPU_INTERRUPT_DEBUG       0x0080
256 
257 /* Reset signal.  */
258 #define CPU_INTERRUPT_RESET       0x0400
259 
260 /* Several target-specific external hardware interrupts.  Each target/cpu.h
261    should define proper names based on these defines.  */
262 #define CPU_INTERRUPT_TGT_EXT_0   0x0008
263 #define CPU_INTERRUPT_TGT_EXT_1   0x0010
264 #define CPU_INTERRUPT_TGT_EXT_2   0x0040
265 #define CPU_INTERRUPT_TGT_EXT_3   0x0200
266 #define CPU_INTERRUPT_TGT_EXT_4   0x1000
267 
268 /* Several target-specific internal interrupts.  These differ from the
269    preceding target-specific interrupts in that they are intended to
270    originate from within the cpu itself, typically in response to some
271    instruction being executed.  These, therefore, are not masked while
272    single-stepping within the debugger.  */
273 #define CPU_INTERRUPT_TGT_INT_0   0x0100
274 #define CPU_INTERRUPT_TGT_INT_1   0x0800
275 #define CPU_INTERRUPT_TGT_INT_2   0x2000
276 
277 /* First unused bit: 0x4000.  */
278 
279 /* The set of all bits that should be masked when single-stepping.  */
280 #define CPU_INTERRUPT_SSTEP_MASK \
281     (CPU_INTERRUPT_HARD          \
282      | CPU_INTERRUPT_TGT_EXT_0   \
283      | CPU_INTERRUPT_TGT_EXT_1   \
284      | CPU_INTERRUPT_TGT_EXT_2   \
285      | CPU_INTERRUPT_TGT_EXT_3   \
286      | CPU_INTERRUPT_TGT_EXT_4)
287 
288 #ifdef CONFIG_USER_ONLY
289 
290 /*
291  * Allow some level of source compatibility with softmmu.  We do not
292  * support any of the more exotic features, so only invalid pages may
293  * be signaled by probe_access_flags().
294  */
295 #define TLB_INVALID_MASK    (1 << (TARGET_PAGE_BITS_MIN - 1))
296 #define TLB_MMIO            (1 << (TARGET_PAGE_BITS_MIN - 2))
297 #define TLB_WATCHPOINT      0
298 
cpu_mmu_index(CPUState * cs,bool ifetch)299 static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
300 {
301     return MMU_USER_IDX;
302 }
303 #else
304 
305 /*
306  * Flags stored in the low bits of the TLB virtual address.
307  * These are defined so that fast path ram access is all zeros.
308  * The flags all must be between TARGET_PAGE_BITS and
309  * maximum address alignment bit.
310  *
311  * Use TARGET_PAGE_BITS_MIN so that these bits are constant
312  * when TARGET_PAGE_BITS_VARY is in effect.
313  *
314  * The count, if not the placement of these bits is known
315  * to tcg/tcg-op-ldst.c, check_max_alignment().
316  */
317 /* Zero if TLB entry is valid.  */
318 #define TLB_INVALID_MASK    (1 << (TARGET_PAGE_BITS_MIN - 1))
319 /* Set if TLB entry references a clean RAM page.  The iotlb entry will
320    contain the page physical address.  */
321 #define TLB_NOTDIRTY        (1 << (TARGET_PAGE_BITS_MIN - 2))
322 /* Set if TLB entry is an IO callback.  */
323 #define TLB_MMIO            (1 << (TARGET_PAGE_BITS_MIN - 3))
324 /* Set if TLB entry writes ignored.  */
325 #define TLB_DISCARD_WRITE   (1 << (TARGET_PAGE_BITS_MIN - 4))
326 /* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
327 #define TLB_FORCE_SLOW      (1 << (TARGET_PAGE_BITS_MIN - 5))
328 
329 /*
330  * Use this mask to check interception with an alignment mask
331  * in a TCG backend.
332  */
333 #define TLB_FLAGS_MASK \
334     (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
335     | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
336 
337 /*
338  * Flags stored in CPUTLBEntryFull.slow_flags[x].
339  * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
340  */
341 /* Set if TLB entry requires byte swap.  */
342 #define TLB_BSWAP            (1 << 0)
343 /* Set if TLB entry contains a watchpoint.  */
344 #define TLB_WATCHPOINT       (1 << 1)
345 /* Set if TLB entry requires aligned accesses.  */
346 #define TLB_CHECK_ALIGNED    (1 << 2)
347 
348 #define TLB_SLOW_FLAGS_MASK  (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
349 
350 /* The two sets of flags must not overlap. */
351 QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
352 
353 /**
354  * tlb_hit_page: return true if page aligned @addr is a hit against the
355  * TLB entry @tlb_addr
356  *
357  * @addr: virtual address to test (must be page aligned)
358  * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
359  */
tlb_hit_page(uint64_t tlb_addr,vaddr addr)360 static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr)
361 {
362     return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
363 }
364 
365 /**
366  * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
367  *
368  * @addr: virtual address to test (need not be page aligned)
369  * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
370  */
tlb_hit(uint64_t tlb_addr,vaddr addr)371 static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr)
372 {
373     return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
374 }
375 
376 #endif /* !CONFIG_USER_ONLY */
377 
378 /* Validate correct placement of CPUArchState. */
379 #include "cpu.h"
380 QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0);
381 QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState));
382 
383 #endif /* CPU_ALL_H */
384