xref: /qemu/include/exec/exec-all.h (revision 6ce80fd8)
1 /*
2  * internal execution defines for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
22 
23 #include "qemu-common.h"
24 #include "exec/tb-context.h"
25 #include "sysemu/cpus.h"
26 
27 /* allow to see translation results - the slowdown should be negligible, so we leave it */
28 #define DEBUG_DISAS
29 
30 /* Page tracking code uses ram addresses in system mode, and virtual
31    addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
32    type.  */
33 #if defined(CONFIG_USER_ONLY)
34 typedef abi_ulong tb_page_addr_t;
35 #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
36 #else
37 typedef ram_addr_t tb_page_addr_t;
38 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
39 #endif
40 
41 #include "qemu/log.h"
42 
43 void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb);
44 void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
45                           target_ulong *data);
46 
47 void cpu_gen_init(void);
48 
49 /**
50  * cpu_restore_state:
51  * @cpu: the vCPU state is to be restore to
52  * @searched_pc: the host PC the fault occurred at
53  * @will_exit: true if the TB executed will be interrupted after some
54                cpu adjustments. Required for maintaining the correct
55                icount valus
56  * @return: true if state was restored, false otherwise
57  *
58  * Attempt to restore the state for a fault occurring in translated
59  * code. If the searched_pc is not in translated code no state is
60  * restored and the function returns false.
61  */
62 bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
63 
64 void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
65 void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
66 TranslationBlock *tb_gen_code(CPUState *cpu,
67                               target_ulong pc, target_ulong cs_base,
68                               uint32_t flags,
69                               int cflags);
70 
71 void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
72 void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
73 void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
74 
75 #if !defined(CONFIG_USER_ONLY)
76 void cpu_reloading_memory_map(void);
77 /**
78  * cpu_address_space_init:
79  * @cpu: CPU to add this address space to
80  * @asidx: integer index of this address space
81  * @prefix: prefix to be used as name of address space
82  * @mr: the root memory region of address space
83  *
84  * Add the specified address space to the CPU's cpu_ases list.
85  * The address space added with @asidx 0 is the one used for the
86  * convenience pointer cpu->as.
87  * The target-specific code which registers ASes is responsible
88  * for defining what semantics address space 0, 1, 2, etc have.
89  *
90  * Before the first call to this function, the caller must set
91  * cpu->num_ases to the total number of address spaces it needs
92  * to support.
93  *
94  * Note that with KVM only one address space is supported.
95  */
96 void cpu_address_space_init(CPUState *cpu, int asidx,
97                             const char *prefix, MemoryRegion *mr);
98 #endif
99 
100 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
101 /* cputlb.c */
102 /**
103  * tlb_flush_page:
104  * @cpu: CPU whose TLB should be flushed
105  * @addr: virtual address of page to be flushed
106  *
107  * Flush one page from the TLB of the specified CPU, for all
108  * MMU indexes.
109  */
110 void tlb_flush_page(CPUState *cpu, target_ulong addr);
111 /**
112  * tlb_flush_page_all_cpus:
113  * @cpu: src CPU of the flush
114  * @addr: virtual address of page to be flushed
115  *
116  * Flush one page from the TLB of the specified CPU, for all
117  * MMU indexes.
118  */
119 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
120 /**
121  * tlb_flush_page_all_cpus_synced:
122  * @cpu: src CPU of the flush
123  * @addr: virtual address of page to be flushed
124  *
125  * Flush one page from the TLB of the specified CPU, for all MMU
126  * indexes like tlb_flush_page_all_cpus except the source vCPUs work
127  * is scheduled as safe work meaning all flushes will be complete once
128  * the source vCPUs safe work is complete. This will depend on when
129  * the guests translation ends the TB.
130  */
131 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
132 /**
133  * tlb_flush:
134  * @cpu: CPU whose TLB should be flushed
135  *
136  * Flush the entire TLB for the specified CPU. Most CPU architectures
137  * allow the implementation to drop entries from the TLB at any time
138  * so this is generally safe. If more selective flushing is required
139  * use one of the other functions for efficiency.
140  */
141 void tlb_flush(CPUState *cpu);
142 /**
143  * tlb_flush_all_cpus:
144  * @cpu: src CPU of the flush
145  */
146 void tlb_flush_all_cpus(CPUState *src_cpu);
147 /**
148  * tlb_flush_all_cpus_synced:
149  * @cpu: src CPU of the flush
150  *
151  * Like tlb_flush_all_cpus except this except the source vCPUs work is
152  * scheduled as safe work meaning all flushes will be complete once
153  * the source vCPUs safe work is complete. This will depend on when
154  * the guests translation ends the TB.
155  */
156 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
157 /**
158  * tlb_flush_page_by_mmuidx:
159  * @cpu: CPU whose TLB should be flushed
160  * @addr: virtual address of page to be flushed
161  * @idxmap: bitmap of MMU indexes to flush
162  *
163  * Flush one page from the TLB of the specified CPU, for the specified
164  * MMU indexes.
165  */
166 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
167                               uint16_t idxmap);
168 /**
169  * tlb_flush_page_by_mmuidx_all_cpus:
170  * @cpu: Originating CPU of the flush
171  * @addr: virtual address of page to be flushed
172  * @idxmap: bitmap of MMU indexes to flush
173  *
174  * Flush one page from the TLB of all CPUs, for the specified
175  * MMU indexes.
176  */
177 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
178                                        uint16_t idxmap);
179 /**
180  * tlb_flush_page_by_mmuidx_all_cpus_synced:
181  * @cpu: Originating CPU of the flush
182  * @addr: virtual address of page to be flushed
183  * @idxmap: bitmap of MMU indexes to flush
184  *
185  * Flush one page from the TLB of all CPUs, for the specified MMU
186  * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
187  * vCPUs work is scheduled as safe work meaning all flushes will be
188  * complete once  the source vCPUs safe work is complete. This will
189  * depend on when the guests translation ends the TB.
190  */
191 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
192                                               uint16_t idxmap);
193 /**
194  * tlb_flush_by_mmuidx:
195  * @cpu: CPU whose TLB should be flushed
196  * @wait: If true ensure synchronisation by exiting the cpu_loop
197  * @idxmap: bitmap of MMU indexes to flush
198  *
199  * Flush all entries from the TLB of the specified CPU, for the specified
200  * MMU indexes.
201  */
202 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
203 /**
204  * tlb_flush_by_mmuidx_all_cpus:
205  * @cpu: Originating CPU of the flush
206  * @idxmap: bitmap of MMU indexes to flush
207  *
208  * Flush all entries from all TLBs of all CPUs, for the specified
209  * MMU indexes.
210  */
211 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
212 /**
213  * tlb_flush_by_mmuidx_all_cpus_synced:
214  * @cpu: Originating CPU of the flush
215  * @idxmap: bitmap of MMU indexes to flush
216  *
217  * Flush all entries from all TLBs of all CPUs, for the specified
218  * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
219  * vCPUs work is scheduled as safe work meaning all flushes will be
220  * complete once  the source vCPUs safe work is complete. This will
221  * depend on when the guests translation ends the TB.
222  */
223 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
224 /**
225  * tlb_set_page_with_attrs:
226  * @cpu: CPU to add this TLB entry for
227  * @vaddr: virtual address of page to add entry for
228  * @paddr: physical address of the page
229  * @attrs: memory transaction attributes
230  * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
231  * @mmu_idx: MMU index to insert TLB entry for
232  * @size: size of the page in bytes
233  *
234  * Add an entry to this CPU's TLB (a mapping from virtual address
235  * @vaddr to physical address @paddr) with the specified memory
236  * transaction attributes. This is generally called by the target CPU
237  * specific code after it has been called through the tlb_fill()
238  * entry point and performed a successful page table walk to find
239  * the physical address and attributes for the virtual address
240  * which provoked the TLB miss.
241  *
242  * At most one entry for a given virtual address is permitted. Only a
243  * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
244  * used by tlb_flush_page.
245  */
246 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
247                              hwaddr paddr, MemTxAttrs attrs,
248                              int prot, int mmu_idx, target_ulong size);
249 /* tlb_set_page:
250  *
251  * This function is equivalent to calling tlb_set_page_with_attrs()
252  * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
253  * as a convenience for CPUs which don't use memory transaction attributes.
254  */
255 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
256                   hwaddr paddr, int prot,
257                   int mmu_idx, target_ulong size);
258 void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
259                  uintptr_t retaddr);
260 #else
261 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
262 {
263 }
264 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
265 {
266 }
267 static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
268                                                   target_ulong addr)
269 {
270 }
271 static inline void tlb_flush(CPUState *cpu)
272 {
273 }
274 static inline void tlb_flush_all_cpus(CPUState *src_cpu)
275 {
276 }
277 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
278 {
279 }
280 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
281                                             target_ulong addr, uint16_t idxmap)
282 {
283 }
284 
285 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
286 {
287 }
288 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
289                                                      target_ulong addr,
290                                                      uint16_t idxmap)
291 {
292 }
293 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
294                                                             target_ulong addr,
295                                                             uint16_t idxmap)
296 {
297 }
298 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
299 {
300 }
301 
302 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
303                                                        uint16_t idxmap)
304 {
305 }
306 #endif
307 
308 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
309 
310 /* Estimated block size for TB allocation.  */
311 /* ??? The following is based on a 2015 survey of x86_64 host output.
312    Better would seem to be some sort of dynamically sized TB array,
313    adapting to the block sizes actually being produced.  */
314 #if defined(CONFIG_SOFTMMU)
315 #define CODE_GEN_AVG_BLOCK_SIZE 400
316 #else
317 #define CODE_GEN_AVG_BLOCK_SIZE 150
318 #endif
319 
320 /*
321  * Translation Cache-related fields of a TB.
322  * This struct exists just for convenience; we keep track of TB's in a binary
323  * search tree, and the only fields needed to compare TB's in the tree are
324  * @ptr and @size.
325  * Note: the address of search data can be obtained by adding @size to @ptr.
326  */
327 struct tb_tc {
328     void *ptr;    /* pointer to the translated code */
329     size_t size;
330 };
331 
332 struct TranslationBlock {
333     target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
334     target_ulong cs_base; /* CS base for this block */
335     uint32_t flags; /* flags defining in which context the code was generated */
336     uint16_t size;      /* size of target code for this block (1 <=
337                            size <= TARGET_PAGE_SIZE) */
338     uint16_t icount;
339     uint32_t cflags;    /* compile flags */
340 #define CF_COUNT_MASK  0x00007fff
341 #define CF_LAST_IO     0x00008000 /* Last insn may be an IO access.  */
342 #define CF_NOCACHE     0x00010000 /* To be freed after execution */
343 #define CF_USE_ICOUNT  0x00020000
344 #define CF_INVALID     0x00040000 /* TB is stale. Set with @jmp_lock held */
345 #define CF_PARALLEL    0x00080000 /* Generate code for a parallel context */
346 /* cflags' mask for hashing/comparison */
347 #define CF_HASH_MASK   \
348     (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL)
349 
350     /* Per-vCPU dynamic tracing state used to generate this TB */
351     uint32_t trace_vcpu_dstate;
352 
353     struct tb_tc tc;
354 
355     /* original tb when cflags has CF_NOCACHE */
356     struct TranslationBlock *orig_tb;
357     /* first and second physical page containing code. The lower bit
358        of the pointer tells the index in page_next[].
359        The list is protected by the TB's page('s) lock(s) */
360     uintptr_t page_next[2];
361     tb_page_addr_t page_addr[2];
362 
363     /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
364     QemuSpin jmp_lock;
365 
366     /* The following data are used to directly call another TB from
367      * the code of this one. This can be done either by emitting direct or
368      * indirect native jump instructions. These jumps are reset so that the TB
369      * just continues its execution. The TB can be linked to another one by
370      * setting one of the jump targets (or patching the jump instruction). Only
371      * two of such jumps are supported.
372      */
373     uint16_t jmp_reset_offset[2]; /* offset of original jump target */
374 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
375     uintptr_t jmp_target_arg[2];  /* target address or offset */
376 
377     /*
378      * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
379      * Each TB can have two outgoing jumps, and therefore can participate
380      * in two lists. The list entries are kept in jmp_list_next[2]. The least
381      * significant bit (LSB) of the pointers in these lists is used to encode
382      * which of the two list entries is to be used in the pointed TB.
383      *
384      * List traversals are protected by jmp_lock. The destination TB of each
385      * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
386      * can be acquired from any origin TB.
387      *
388      * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
389      * being invalidated, so that no further outgoing jumps from it can be set.
390      *
391      * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
392      * to a destination TB that has CF_INVALID set.
393      */
394     uintptr_t jmp_list_head;
395     uintptr_t jmp_list_next[2];
396     uintptr_t jmp_dest[2];
397 };
398 
399 extern bool parallel_cpus;
400 
401 /* Hide the atomic_read to make code a little easier on the eyes */
402 static inline uint32_t tb_cflags(const TranslationBlock *tb)
403 {
404     return atomic_read(&tb->cflags);
405 }
406 
407 /* current cflags for hashing/comparison */
408 static inline uint32_t curr_cflags(void)
409 {
410     return (parallel_cpus ? CF_PARALLEL : 0)
411          | (use_icount ? CF_USE_ICOUNT : 0);
412 }
413 
414 /* TranslationBlock invalidate API */
415 #if defined(CONFIG_USER_ONLY)
416 void tb_invalidate_phys_addr(target_ulong addr);
417 void tb_invalidate_phys_range(target_ulong start, target_ulong end);
418 #else
419 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
420 #endif
421 void tb_flush(CPUState *cpu);
422 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
423 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
424                                    target_ulong cs_base, uint32_t flags,
425                                    uint32_t cf_mask);
426 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
427 
428 /* GETPC is the true target of the return instruction that we'll execute.  */
429 #if defined(CONFIG_TCG_INTERPRETER)
430 extern uintptr_t tci_tb_ptr;
431 # define GETPC() tci_tb_ptr
432 #else
433 # define GETPC() \
434     ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
435 #endif
436 
437 /* The true return address will often point to a host insn that is part of
438    the next translated guest insn.  Adjust the address backward to point to
439    the middle of the call insn.  Subtracting one would do the job except for
440    several compressed mode architectures (arm, mips) which set the low bit
441    to indicate the compressed mode; subtracting two works around that.  It
442    is also the case that there are no host isas that contain a call insn
443    smaller than 4 bytes, so we don't worry about special-casing this.  */
444 #define GETPC_ADJ   2
445 
446 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
447 void assert_no_pages_locked(void);
448 #else
449 static inline void assert_no_pages_locked(void)
450 {
451 }
452 #endif
453 
454 #if !defined(CONFIG_USER_ONLY)
455 
456 /**
457  * iotlb_to_section:
458  * @cpu: CPU performing the access
459  * @index: TCG CPU IOTLB entry
460  *
461  * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
462  * it refers to. @index will have been initially created and returned
463  * by memory_region_section_get_iotlb().
464  */
465 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
466                                              hwaddr index, MemTxAttrs attrs);
467 
468 void tlb_fill(CPUState *cpu, target_ulong addr, int size,
469               MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
470 
471 #endif
472 
473 #if defined(CONFIG_USER_ONLY)
474 void mmap_lock(void);
475 void mmap_unlock(void);
476 bool have_mmap_lock(void);
477 
478 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
479 {
480     return addr;
481 }
482 #else
483 static inline void mmap_lock(void) {}
484 static inline void mmap_unlock(void) {}
485 
486 /* cputlb.c */
487 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
488 
489 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
490 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
491 
492 /* exec.c */
493 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
494 
495 MemoryRegionSection *
496 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
497                                   hwaddr *xlat, hwaddr *plen,
498                                   MemTxAttrs attrs, int *prot);
499 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
500                                        MemoryRegionSection *section,
501                                        target_ulong vaddr,
502                                        hwaddr paddr, hwaddr xlat,
503                                        int prot,
504                                        target_ulong *address);
505 #endif
506 
507 /* vl.c */
508 extern int singlestep;
509 
510 #endif
511