xref: /qemu/include/exec/exec-all.h (revision b2a3cbb8)
1 /*
2  * internal execution defines for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
22 
23 #include "cpu.h"
24 #ifdef CONFIG_TCG
25 #include "exec/cpu_ldst.h"
26 #endif
27 
28 /* allow to see translation results - the slowdown should be negligible, so we leave it */
29 #define DEBUG_DISAS
30 
31 /* Page tracking code uses ram addresses in system mode, and virtual
32    addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
33    type.  */
34 #if defined(CONFIG_USER_ONLY)
35 typedef abi_ulong tb_page_addr_t;
36 #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
37 #else
38 typedef ram_addr_t tb_page_addr_t;
39 #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
40 #endif
41 
42 /**
43  * cpu_unwind_state_data:
44  * @cpu: the cpu context
45  * @host_pc: the host pc within the translation
46  * @data: output data
47  *
48  * Attempt to load the the unwind state for a host pc occurring in
49  * translated code.  If @host_pc is not in translated code, the
50  * function returns false; otherwise @data is loaded.
51  * This is the same unwind info as given to restore_state_to_opc.
52  */
53 bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data);
54 
55 /**
56  * cpu_restore_state:
57  * @cpu: the cpu context
58  * @host_pc: the host pc within the translation
59  * @return: true if state was restored, false otherwise
60  *
61  * Attempt to restore the state for a fault occurring in translated
62  * code. If @host_pc is not in translated code no state is
63  * restored and the function returns false.
64  */
65 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc);
66 
67 G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu);
68 G_NORETURN void cpu_loop_exit(CPUState *cpu);
69 G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
70 G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
71 
72 /**
73  * cpu_loop_exit_requested:
74  * @cpu: The CPU state to be tested
75  *
76  * Indicate if somebody asked for a return of the CPU to the main loop
77  * (e.g., via cpu_exit() or cpu_interrupt()).
78  *
79  * This is helpful for architectures that support interruptible
80  * instructions. After writing back all state to registers/memory, this
81  * call can be used to check if it makes sense to return to the main loop
82  * or to continue executing the interruptible instruction.
83  */
84 static inline bool cpu_loop_exit_requested(CPUState *cpu)
85 {
86     return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
87 }
88 
89 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
90 /* cputlb.c */
91 /**
92  * tlb_init - initialize a CPU's TLB
93  * @cpu: CPU whose TLB should be initialized
94  */
95 void tlb_init(CPUState *cpu);
96 /**
97  * tlb_destroy - destroy a CPU's TLB
98  * @cpu: CPU whose TLB should be destroyed
99  */
100 void tlb_destroy(CPUState *cpu);
101 /**
102  * tlb_flush_page:
103  * @cpu: CPU whose TLB should be flushed
104  * @addr: virtual address of page to be flushed
105  *
106  * Flush one page from the TLB of the specified CPU, for all
107  * MMU indexes.
108  */
109 void tlb_flush_page(CPUState *cpu, target_ulong addr);
110 /**
111  * tlb_flush_page_all_cpus:
112  * @cpu: src CPU of the flush
113  * @addr: virtual address of page to be flushed
114  *
115  * Flush one page from the TLB of the specified CPU, for all
116  * MMU indexes.
117  */
118 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
119 /**
120  * tlb_flush_page_all_cpus_synced:
121  * @cpu: src CPU of the flush
122  * @addr: virtual address of page to be flushed
123  *
124  * Flush one page from the TLB of the specified CPU, for all MMU
125  * indexes like tlb_flush_page_all_cpus except the source vCPUs work
126  * is scheduled as safe work meaning all flushes will be complete once
127  * the source vCPUs safe work is complete. This will depend on when
128  * the guests translation ends the TB.
129  */
130 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
131 /**
132  * tlb_flush:
133  * @cpu: CPU whose TLB should be flushed
134  *
135  * Flush the entire TLB for the specified CPU. Most CPU architectures
136  * allow the implementation to drop entries from the TLB at any time
137  * so this is generally safe. If more selective flushing is required
138  * use one of the other functions for efficiency.
139  */
140 void tlb_flush(CPUState *cpu);
141 /**
142  * tlb_flush_all_cpus:
143  * @cpu: src CPU of the flush
144  */
145 void tlb_flush_all_cpus(CPUState *src_cpu);
146 /**
147  * tlb_flush_all_cpus_synced:
148  * @cpu: src CPU of the flush
149  *
150  * Like tlb_flush_all_cpus except this except the source vCPUs work is
151  * scheduled as safe work meaning all flushes will be complete once
152  * the source vCPUs safe work is complete. This will depend on when
153  * the guests translation ends the TB.
154  */
155 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
156 /**
157  * tlb_flush_page_by_mmuidx:
158  * @cpu: CPU whose TLB should be flushed
159  * @addr: virtual address of page to be flushed
160  * @idxmap: bitmap of MMU indexes to flush
161  *
162  * Flush one page from the TLB of the specified CPU, for the specified
163  * MMU indexes.
164  */
165 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
166                               uint16_t idxmap);
167 /**
168  * tlb_flush_page_by_mmuidx_all_cpus:
169  * @cpu: Originating CPU of the flush
170  * @addr: virtual address of page to be flushed
171  * @idxmap: bitmap of MMU indexes to flush
172  *
173  * Flush one page from the TLB of all CPUs, for the specified
174  * MMU indexes.
175  */
176 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
177                                        uint16_t idxmap);
178 /**
179  * tlb_flush_page_by_mmuidx_all_cpus_synced:
180  * @cpu: Originating CPU of the flush
181  * @addr: virtual address of page to be flushed
182  * @idxmap: bitmap of MMU indexes to flush
183  *
184  * Flush one page from the TLB of all CPUs, for the specified MMU
185  * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
186  * vCPUs work is scheduled as safe work meaning all flushes will be
187  * complete once  the source vCPUs safe work is complete. This will
188  * depend on when the guests translation ends the TB.
189  */
190 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
191                                               uint16_t idxmap);
192 /**
193  * tlb_flush_by_mmuidx:
194  * @cpu: CPU whose TLB should be flushed
195  * @wait: If true ensure synchronisation by exiting the cpu_loop
196  * @idxmap: bitmap of MMU indexes to flush
197  *
198  * Flush all entries from the TLB of the specified CPU, for the specified
199  * MMU indexes.
200  */
201 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
202 /**
203  * tlb_flush_by_mmuidx_all_cpus:
204  * @cpu: Originating CPU of the flush
205  * @idxmap: bitmap of MMU indexes to flush
206  *
207  * Flush all entries from all TLBs of all CPUs, for the specified
208  * MMU indexes.
209  */
210 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
211 /**
212  * tlb_flush_by_mmuidx_all_cpus_synced:
213  * @cpu: Originating CPU of the flush
214  * @idxmap: bitmap of MMU indexes to flush
215  *
216  * Flush all entries from all TLBs of all CPUs, for the specified
217  * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
218  * vCPUs work is scheduled as safe work meaning all flushes will be
219  * complete once  the source vCPUs safe work is complete. This will
220  * depend on when the guests translation ends the TB.
221  */
222 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
223 
224 /**
225  * tlb_flush_page_bits_by_mmuidx
226  * @cpu: CPU whose TLB should be flushed
227  * @addr: virtual address of page to be flushed
228  * @idxmap: bitmap of mmu indexes to flush
229  * @bits: number of significant bits in address
230  *
231  * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
232  */
233 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
234                                    uint16_t idxmap, unsigned bits);
235 
236 /* Similarly, with broadcast and syncing. */
237 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
238                                             uint16_t idxmap, unsigned bits);
239 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
240     (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
241 
242 /**
243  * tlb_flush_range_by_mmuidx
244  * @cpu: CPU whose TLB should be flushed
245  * @addr: virtual address of the start of the range to be flushed
246  * @len: length of range to be flushed
247  * @idxmap: bitmap of mmu indexes to flush
248  * @bits: number of significant bits in address
249  *
250  * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
251  * comparing only the low @bits worth of each virtual page.
252  */
253 void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
254                                target_ulong len, uint16_t idxmap,
255                                unsigned bits);
256 
257 /* Similarly, with broadcast and syncing. */
258 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
259                                         target_ulong len, uint16_t idxmap,
260                                         unsigned bits);
261 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
262                                                target_ulong addr,
263                                                target_ulong len,
264                                                uint16_t idxmap,
265                                                unsigned bits);
266 
267 /**
268  * tlb_set_page_full:
269  * @cpu: CPU context
270  * @mmu_idx: mmu index of the tlb to modify
271  * @vaddr: virtual address of the entry to add
272  * @full: the details of the tlb entry
273  *
274  * Add an entry to @cpu tlb index @mmu_idx.  All of the fields of
275  * @full must be filled, except for xlat_section, and constitute
276  * the complete description of the translated page.
277  *
278  * This is generally called by the target tlb_fill function after
279  * having performed a successful page table walk to find the physical
280  * address and attributes for the translation.
281  *
282  * At most one entry for a given virtual address is permitted. Only a
283  * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
284  * used by tlb_flush_page.
285  */
286 void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
287                        CPUTLBEntryFull *full);
288 
289 /**
290  * tlb_set_page_with_attrs:
291  * @cpu: CPU to add this TLB entry for
292  * @vaddr: virtual address of page to add entry for
293  * @paddr: physical address of the page
294  * @attrs: memory transaction attributes
295  * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
296  * @mmu_idx: MMU index to insert TLB entry for
297  * @size: size of the page in bytes
298  *
299  * Add an entry to this CPU's TLB (a mapping from virtual address
300  * @vaddr to physical address @paddr) with the specified memory
301  * transaction attributes. This is generally called by the target CPU
302  * specific code after it has been called through the tlb_fill()
303  * entry point and performed a successful page table walk to find
304  * the physical address and attributes for the virtual address
305  * which provoked the TLB miss.
306  *
307  * At most one entry for a given virtual address is permitted. Only a
308  * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
309  * used by tlb_flush_page.
310  */
311 void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
312                              hwaddr paddr, MemTxAttrs attrs,
313                              int prot, int mmu_idx, target_ulong size);
314 /* tlb_set_page:
315  *
316  * This function is equivalent to calling tlb_set_page_with_attrs()
317  * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
318  * as a convenience for CPUs which don't use memory transaction attributes.
319  */
320 void tlb_set_page(CPUState *cpu, target_ulong vaddr,
321                   hwaddr paddr, int prot,
322                   int mmu_idx, target_ulong size);
323 #else
324 static inline void tlb_init(CPUState *cpu)
325 {
326 }
327 static inline void tlb_destroy(CPUState *cpu)
328 {
329 }
330 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
331 {
332 }
333 static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
334 {
335 }
336 static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
337                                                   target_ulong addr)
338 {
339 }
340 static inline void tlb_flush(CPUState *cpu)
341 {
342 }
343 static inline void tlb_flush_all_cpus(CPUState *src_cpu)
344 {
345 }
346 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
347 {
348 }
349 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
350                                             target_ulong addr, uint16_t idxmap)
351 {
352 }
353 
354 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
355 {
356 }
357 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
358                                                      target_ulong addr,
359                                                      uint16_t idxmap)
360 {
361 }
362 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
363                                                             target_ulong addr,
364                                                             uint16_t idxmap)
365 {
366 }
367 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
368 {
369 }
370 
371 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
372                                                        uint16_t idxmap)
373 {
374 }
375 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
376                                                  target_ulong addr,
377                                                  uint16_t idxmap,
378                                                  unsigned bits)
379 {
380 }
381 static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
382                                                           target_ulong addr,
383                                                           uint16_t idxmap,
384                                                           unsigned bits)
385 {
386 }
387 static inline void
388 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
389                                               uint16_t idxmap, unsigned bits)
390 {
391 }
392 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
393                                              target_ulong len, uint16_t idxmap,
394                                              unsigned bits)
395 {
396 }
397 static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
398                                                       target_ulong addr,
399                                                       target_ulong len,
400                                                       uint16_t idxmap,
401                                                       unsigned bits)
402 {
403 }
404 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
405                                                              target_ulong addr,
406                                                              target_long len,
407                                                              uint16_t idxmap,
408                                                              unsigned bits)
409 {
410 }
411 #endif
412 /**
413  * probe_access:
414  * @env: CPUArchState
415  * @addr: guest virtual address to look up
416  * @size: size of the access
417  * @access_type: read, write or execute permission
418  * @mmu_idx: MMU index to use for lookup
419  * @retaddr: return address for unwinding
420  *
421  * Look up the guest virtual address @addr.  Raise an exception if the
422  * page does not satisfy @access_type.  Raise an exception if the
423  * access (@addr, @size) hits a watchpoint.  For writes, mark a clean
424  * page as dirty.
425  *
426  * Finally, return the host address for a page that is backed by RAM,
427  * or NULL if the page requires I/O.
428  */
429 void *probe_access(CPUArchState *env, target_ulong addr, int size,
430                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
431 
432 static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
433                                 int mmu_idx, uintptr_t retaddr)
434 {
435     return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
436 }
437 
438 static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
439                                int mmu_idx, uintptr_t retaddr)
440 {
441     return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
442 }
443 
444 /**
445  * probe_access_flags:
446  * @env: CPUArchState
447  * @addr: guest virtual address to look up
448  * @access_type: read, write or execute permission
449  * @mmu_idx: MMU index to use for lookup
450  * @nonfault: suppress the fault
451  * @phost: return value for host address
452  * @retaddr: return address for unwinding
453  *
454  * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
455  * the page, and storing the host address for RAM in @phost.
456  *
457  * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
458  * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
459  * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
460  * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
461  */
462 int probe_access_flags(CPUArchState *env, target_ulong addr,
463                        MMUAccessType access_type, int mmu_idx,
464                        bool nonfault, void **phost, uintptr_t retaddr);
465 
466 #ifndef CONFIG_USER_ONLY
467 /**
468  * probe_access_full:
469  * Like probe_access_flags, except also return into @pfull.
470  *
471  * The CPUTLBEntryFull structure returned via @pfull is transient
472  * and must be consumed or copied immediately, before any further
473  * access or changes to TLB @mmu_idx.
474  */
475 int probe_access_full(CPUArchState *env, target_ulong addr,
476                       MMUAccessType access_type, int mmu_idx,
477                       bool nonfault, void **phost,
478                       CPUTLBEntryFull **pfull, uintptr_t retaddr);
479 #endif
480 
481 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
482 
483 /* Estimated block size for TB allocation.  */
484 /* ??? The following is based on a 2015 survey of x86_64 host output.
485    Better would seem to be some sort of dynamically sized TB array,
486    adapting to the block sizes actually being produced.  */
487 #if defined(CONFIG_SOFTMMU)
488 #define CODE_GEN_AVG_BLOCK_SIZE 400
489 #else
490 #define CODE_GEN_AVG_BLOCK_SIZE 150
491 #endif
492 
493 /*
494  * Translation Cache-related fields of a TB.
495  * This struct exists just for convenience; we keep track of TB's in a binary
496  * search tree, and the only fields needed to compare TB's in the tree are
497  * @ptr and @size.
498  * Note: the address of search data can be obtained by adding @size to @ptr.
499  */
500 struct tb_tc {
501     const void *ptr;    /* pointer to the translated code */
502     size_t size;
503 };
504 
505 struct TranslationBlock {
506 #if !TARGET_TB_PCREL
507     /*
508      * Guest PC corresponding to this block.  This must be the true
509      * virtual address.  Therefore e.g. x86 stores EIP + CS_BASE, and
510      * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
511      * privilege, must store those bits elsewhere.
512      *
513      * If TARGET_TB_PCREL, the opcodes for the TranslationBlock are
514      * written such that the TB is associated only with the physical
515      * page and may be run in any virtual address context.  In this case,
516      * PC must always be taken from ENV in a target-specific manner.
517      * Unwind information is taken as offsets from the page, to be
518      * deposited into the "current" PC.
519      */
520     target_ulong pc;
521 #endif
522 
523     /*
524      * Target-specific data associated with the TranslationBlock, e.g.:
525      * x86: the original user, the Code Segment virtual base,
526      * arm: an extension of tb->flags,
527      * s390x: instruction data for EXECUTE,
528      * sparc: the next pc of the instruction queue (for delay slots).
529      */
530     target_ulong cs_base;
531 
532     uint32_t flags; /* flags defining in which context the code was generated */
533     uint32_t cflags;    /* compile flags */
534 
535 /* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
536 #define CF_COUNT_MASK    0x000001ff
537 #define CF_NO_GOTO_TB    0x00000200 /* Do not chain with goto_tb */
538 #define CF_NO_GOTO_PTR   0x00000400 /* Do not chain with goto_ptr */
539 #define CF_SINGLE_STEP   0x00000800 /* gdbstub single-step in effect */
540 #define CF_LAST_IO       0x00008000 /* Last insn may be an IO access.  */
541 #define CF_MEMI_ONLY     0x00010000 /* Only instrument memory ops */
542 #define CF_USE_ICOUNT    0x00020000
543 #define CF_INVALID       0x00040000 /* TB is stale. Set with @jmp_lock held */
544 #define CF_PARALLEL      0x00080000 /* Generate code for a parallel context */
545 #define CF_NOIRQ         0x00100000 /* Generate an uninterruptible TB */
546 #define CF_CLUSTER_MASK  0xff000000 /* Top 8 bits are cluster ID */
547 #define CF_CLUSTER_SHIFT 24
548 
549     /* Per-vCPU dynamic tracing state used to generate this TB */
550     uint32_t trace_vcpu_dstate;
551 
552     /*
553      * Above fields used for comparing
554      */
555 
556     /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
557     uint16_t size;
558     uint16_t icount;
559 
560     struct tb_tc tc;
561 
562     /* first and second physical page containing code. The lower bit
563        of the pointer tells the index in page_next[].
564        The list is protected by the TB's page('s) lock(s) */
565     uintptr_t page_next[2];
566     tb_page_addr_t page_addr[2];
567 
568     /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
569     QemuSpin jmp_lock;
570 
571     /* The following data are used to directly call another TB from
572      * the code of this one. This can be done either by emitting direct or
573      * indirect native jump instructions. These jumps are reset so that the TB
574      * just continues its execution. The TB can be linked to another one by
575      * setting one of the jump targets (or patching the jump instruction). Only
576      * two of such jumps are supported.
577      */
578     uint16_t jmp_reset_offset[2]; /* offset of original jump target */
579 #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
580     uintptr_t jmp_target_arg[2];  /* target address or offset */
581 
582     /*
583      * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
584      * Each TB can have two outgoing jumps, and therefore can participate
585      * in two lists. The list entries are kept in jmp_list_next[2]. The least
586      * significant bit (LSB) of the pointers in these lists is used to encode
587      * which of the two list entries is to be used in the pointed TB.
588      *
589      * List traversals are protected by jmp_lock. The destination TB of each
590      * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
591      * can be acquired from any origin TB.
592      *
593      * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
594      * being invalidated, so that no further outgoing jumps from it can be set.
595      *
596      * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
597      * to a destination TB that has CF_INVALID set.
598      */
599     uintptr_t jmp_list_head;
600     uintptr_t jmp_list_next[2];
601     uintptr_t jmp_dest[2];
602 };
603 
604 /* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */
605 static inline target_ulong tb_pc(const TranslationBlock *tb)
606 {
607 #if TARGET_TB_PCREL
608     qemu_build_not_reached();
609 #else
610     return tb->pc;
611 #endif
612 }
613 
614 /* Hide the qatomic_read to make code a little easier on the eyes */
615 static inline uint32_t tb_cflags(const TranslationBlock *tb)
616 {
617     return qatomic_read(&tb->cflags);
618 }
619 
620 static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
621 {
622     return tb->page_addr[0];
623 }
624 
625 static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
626 {
627     return tb->page_addr[1];
628 }
629 
630 static inline void tb_set_page_addr0(TranslationBlock *tb,
631                                      tb_page_addr_t addr)
632 {
633     tb->page_addr[0] = addr;
634 }
635 
636 static inline void tb_set_page_addr1(TranslationBlock *tb,
637                                      tb_page_addr_t addr)
638 {
639     tb->page_addr[1] = addr;
640 }
641 
642 /* current cflags for hashing/comparison */
643 uint32_t curr_cflags(CPUState *cpu);
644 
645 /* TranslationBlock invalidate API */
646 #if defined(CONFIG_USER_ONLY)
647 void tb_invalidate_phys_addr(target_ulong addr);
648 #else
649 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
650 #endif
651 void tb_flush(CPUState *cpu);
652 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
653 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end);
654 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
655 
656 /* GETPC is the true target of the return instruction that we'll execute.  */
657 #if defined(CONFIG_TCG_INTERPRETER)
658 extern __thread uintptr_t tci_tb_ptr;
659 # define GETPC() tci_tb_ptr
660 #else
661 # define GETPC() \
662     ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
663 #endif
664 
665 /* The true return address will often point to a host insn that is part of
666    the next translated guest insn.  Adjust the address backward to point to
667    the middle of the call insn.  Subtracting one would do the job except for
668    several compressed mode architectures (arm, mips) which set the low bit
669    to indicate the compressed mode; subtracting two works around that.  It
670    is also the case that there are no host isas that contain a call insn
671    smaller than 4 bytes, so we don't worry about special-casing this.  */
672 #define GETPC_ADJ   2
673 
674 #if !defined(CONFIG_USER_ONLY)
675 
676 /**
677  * iotlb_to_section:
678  * @cpu: CPU performing the access
679  * @index: TCG CPU IOTLB entry
680  *
681  * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
682  * it refers to. @index will have been initially created and returned
683  * by memory_region_section_get_iotlb().
684  */
685 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
686                                              hwaddr index, MemTxAttrs attrs);
687 #endif
688 
689 /**
690  * get_page_addr_code_hostp()
691  * @env: CPUArchState
692  * @addr: guest virtual address of guest code
693  *
694  * See get_page_addr_code() (full-system version) for documentation on the
695  * return value.
696  *
697  * Sets *@hostp (when @hostp is non-NULL) as follows.
698  * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
699  * to the host address where @addr's content is kept.
700  *
701  * Note: this function can trigger an exception.
702  */
703 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
704                                         void **hostp);
705 
706 /**
707  * get_page_addr_code()
708  * @env: CPUArchState
709  * @addr: guest virtual address of guest code
710  *
711  * If we cannot translate and execute from the entire RAM page, or if
712  * the region is not backed by RAM, returns -1. Otherwise, returns the
713  * ram_addr_t corresponding to the guest code at @addr.
714  *
715  * Note: this function can trigger an exception.
716  */
717 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
718                                                 target_ulong addr)
719 {
720     return get_page_addr_code_hostp(env, addr, NULL);
721 }
722 
723 #if defined(CONFIG_USER_ONLY)
724 void mmap_lock(void);
725 void mmap_unlock(void);
726 bool have_mmap_lock(void);
727 
728 /**
729  * adjust_signal_pc:
730  * @pc: raw pc from the host signal ucontext_t.
731  * @is_write: host memory operation was write, or read-modify-write.
732  *
733  * Alter @pc as required for unwinding.  Return the type of the
734  * guest memory access -- host reads may be for guest execution.
735  */
736 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
737 
738 /**
739  * handle_sigsegv_accerr_write:
740  * @cpu: the cpu context
741  * @old_set: the sigset_t from the signal ucontext_t
742  * @host_pc: the host pc, adjusted for the signal
743  * @host_addr: the host address of the fault
744  *
745  * Return true if the write fault has been handled, and should be re-tried.
746  */
747 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
748                                  uintptr_t host_pc, abi_ptr guest_addr);
749 
750 /**
751  * cpu_loop_exit_sigsegv:
752  * @cpu: the cpu context
753  * @addr: the guest address of the fault
754  * @access_type: access was read/write/execute
755  * @maperr: true for invalid page, false for permission fault
756  * @ra: host pc for unwinding
757  *
758  * Use the TCGCPUOps hook to record cpu state, do guest operating system
759  * specific things to raise SIGSEGV, and jump to the main cpu loop.
760  */
761 G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
762                                       MMUAccessType access_type,
763                                       bool maperr, uintptr_t ra);
764 
765 /**
766  * cpu_loop_exit_sigbus:
767  * @cpu: the cpu context
768  * @addr: the guest address of the alignment fault
769  * @access_type: access was read/write/execute
770  * @ra: host pc for unwinding
771  *
772  * Use the TCGCPUOps hook to record cpu state, do guest operating system
773  * specific things to raise SIGBUS, and jump to the main cpu loop.
774  */
775 G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
776                                      MMUAccessType access_type,
777                                      uintptr_t ra);
778 
779 #else
780 static inline void mmap_lock(void) {}
781 static inline void mmap_unlock(void) {}
782 
783 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
784 void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
785 
786 MemoryRegionSection *
787 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
788                                   hwaddr *xlat, hwaddr *plen,
789                                   MemTxAttrs attrs, int *prot);
790 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
791                                        MemoryRegionSection *section);
792 #endif
793 
794 #endif
795