xref: /qemu/include/exec/exec-all.h (revision 4abc8923)
1 /*
2  * internal execution defines for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef EXEC_ALL_H
21 #define EXEC_ALL_H
22 
23 #include "cpu.h"
24 #if defined(CONFIG_USER_ONLY)
25 #include "exec/abi_ptr.h"
26 #include "exec/cpu_ldst.h"
27 #endif
28 #include "exec/mmu-access-type.h"
29 #include "exec/translation-block.h"
30 #include "qemu/clang-tsa.h"
31 
32 /**
33  * cpu_loop_exit_requested:
34  * @cpu: The CPU state to be tested
35  *
36  * Indicate if somebody asked for a return of the CPU to the main loop
37  * (e.g., via cpu_exit() or cpu_interrupt()).
38  *
39  * This is helpful for architectures that support interruptible
40  * instructions. After writing back all state to registers/memory, this
41  * call can be used to check if it makes sense to return to the main loop
42  * or to continue executing the interruptible instruction.
43  */
44 static inline bool cpu_loop_exit_requested(CPUState *cpu)
45 {
46     return (int32_t)qatomic_read(&cpu->neg.icount_decr.u32) < 0;
47 }
48 
49 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
50 /* cputlb.c */
51 /**
52  * tlb_init - initialize a CPU's TLB
53  * @cpu: CPU whose TLB should be initialized
54  */
55 void tlb_init(CPUState *cpu);
56 /**
57  * tlb_destroy - destroy a CPU's TLB
58  * @cpu: CPU whose TLB should be destroyed
59  */
60 void tlb_destroy(CPUState *cpu);
61 /**
62  * tlb_flush_page:
63  * @cpu: CPU whose TLB should be flushed
64  * @addr: virtual address of page to be flushed
65  *
66  * Flush one page from the TLB of the specified CPU, for all
67  * MMU indexes.
68  */
69 void tlb_flush_page(CPUState *cpu, vaddr addr);
70 /**
71  * tlb_flush_page_all_cpus:
72  * @cpu: src CPU of the flush
73  * @addr: virtual address of page to be flushed
74  *
75  * Flush one page from the TLB of the specified CPU, for all
76  * MMU indexes.
77  */
78 void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
79 /**
80  * tlb_flush_page_all_cpus_synced:
81  * @cpu: src CPU of the flush
82  * @addr: virtual address of page to be flushed
83  *
84  * Flush one page from the TLB of the specified CPU, for all MMU
85  * indexes like tlb_flush_page_all_cpus except the source vCPUs work
86  * is scheduled as safe work meaning all flushes will be complete once
87  * the source vCPUs safe work is complete. This will depend on when
88  * the guests translation ends the TB.
89  */
90 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
91 /**
92  * tlb_flush:
93  * @cpu: CPU whose TLB should be flushed
94  *
95  * Flush the entire TLB for the specified CPU. Most CPU architectures
96  * allow the implementation to drop entries from the TLB at any time
97  * so this is generally safe. If more selective flushing is required
98  * use one of the other functions for efficiency.
99  */
100 void tlb_flush(CPUState *cpu);
101 /**
102  * tlb_flush_all_cpus:
103  * @cpu: src CPU of the flush
104  */
105 void tlb_flush_all_cpus(CPUState *src_cpu);
106 /**
107  * tlb_flush_all_cpus_synced:
108  * @cpu: src CPU of the flush
109  *
110  * Like tlb_flush_all_cpus except this except the source vCPUs work is
111  * scheduled as safe work meaning all flushes will be complete once
112  * the source vCPUs safe work is complete. This will depend on when
113  * the guests translation ends the TB.
114  */
115 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
116 /**
117  * tlb_flush_page_by_mmuidx:
118  * @cpu: CPU whose TLB should be flushed
119  * @addr: virtual address of page to be flushed
120  * @idxmap: bitmap of MMU indexes to flush
121  *
122  * Flush one page from the TLB of the specified CPU, for the specified
123  * MMU indexes.
124  */
125 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
126                               uint16_t idxmap);
127 /**
128  * tlb_flush_page_by_mmuidx_all_cpus:
129  * @cpu: Originating CPU of the flush
130  * @addr: virtual address of page to be flushed
131  * @idxmap: bitmap of MMU indexes to flush
132  *
133  * Flush one page from the TLB of all CPUs, for the specified
134  * MMU indexes.
135  */
136 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
137                                        uint16_t idxmap);
138 /**
139  * tlb_flush_page_by_mmuidx_all_cpus_synced:
140  * @cpu: Originating CPU of the flush
141  * @addr: virtual address of page to be flushed
142  * @idxmap: bitmap of MMU indexes to flush
143  *
144  * Flush one page from the TLB of all CPUs, for the specified MMU
145  * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
146  * vCPUs work is scheduled as safe work meaning all flushes will be
147  * complete once  the source vCPUs safe work is complete. This will
148  * depend on when the guests translation ends the TB.
149  */
150 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
151                                               uint16_t idxmap);
152 /**
153  * tlb_flush_by_mmuidx:
154  * @cpu: CPU whose TLB should be flushed
155  * @wait: If true ensure synchronisation by exiting the cpu_loop
156  * @idxmap: bitmap of MMU indexes to flush
157  *
158  * Flush all entries from the TLB of the specified CPU, for the specified
159  * MMU indexes.
160  */
161 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
162 /**
163  * tlb_flush_by_mmuidx_all_cpus:
164  * @cpu: Originating CPU of the flush
165  * @idxmap: bitmap of MMU indexes to flush
166  *
167  * Flush all entries from all TLBs of all CPUs, for the specified
168  * MMU indexes.
169  */
170 void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
171 /**
172  * tlb_flush_by_mmuidx_all_cpus_synced:
173  * @cpu: Originating CPU of the flush
174  * @idxmap: bitmap of MMU indexes to flush
175  *
176  * Flush all entries from all TLBs of all CPUs, for the specified
177  * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
178  * vCPUs work is scheduled as safe work meaning all flushes will be
179  * complete once  the source vCPUs safe work is complete. This will
180  * depend on when the guests translation ends the TB.
181  */
182 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
183 
184 /**
185  * tlb_flush_page_bits_by_mmuidx
186  * @cpu: CPU whose TLB should be flushed
187  * @addr: virtual address of page to be flushed
188  * @idxmap: bitmap of mmu indexes to flush
189  * @bits: number of significant bits in address
190  *
191  * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
192  */
193 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
194                                    uint16_t idxmap, unsigned bits);
195 
196 /* Similarly, with broadcast and syncing. */
197 void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
198                                             uint16_t idxmap, unsigned bits);
199 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
200     (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
201 
202 /**
203  * tlb_flush_range_by_mmuidx
204  * @cpu: CPU whose TLB should be flushed
205  * @addr: virtual address of the start of the range to be flushed
206  * @len: length of range to be flushed
207  * @idxmap: bitmap of mmu indexes to flush
208  * @bits: number of significant bits in address
209  *
210  * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
211  * comparing only the low @bits worth of each virtual page.
212  */
213 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
214                                vaddr len, uint16_t idxmap,
215                                unsigned bits);
216 
217 /* Similarly, with broadcast and syncing. */
218 void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
219                                         vaddr len, uint16_t idxmap,
220                                         unsigned bits);
221 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
222                                                vaddr addr,
223                                                vaddr len,
224                                                uint16_t idxmap,
225                                                unsigned bits);
226 
227 /**
228  * tlb_set_page_full:
229  * @cpu: CPU context
230  * @mmu_idx: mmu index of the tlb to modify
231  * @addr: virtual address of the entry to add
232  * @full: the details of the tlb entry
233  *
234  * Add an entry to @cpu tlb index @mmu_idx.  All of the fields of
235  * @full must be filled, except for xlat_section, and constitute
236  * the complete description of the translated page.
237  *
238  * This is generally called by the target tlb_fill function after
239  * having performed a successful page table walk to find the physical
240  * address and attributes for the translation.
241  *
242  * At most one entry for a given virtual address is permitted. Only a
243  * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
244  * used by tlb_flush_page.
245  */
246 void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
247                        CPUTLBEntryFull *full);
248 
249 /**
250  * tlb_set_page_with_attrs:
251  * @cpu: CPU to add this TLB entry for
252  * @addr: virtual address of page to add entry for
253  * @paddr: physical address of the page
254  * @attrs: memory transaction attributes
255  * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
256  * @mmu_idx: MMU index to insert TLB entry for
257  * @size: size of the page in bytes
258  *
259  * Add an entry to this CPU's TLB (a mapping from virtual address
260  * @addr to physical address @paddr) with the specified memory
261  * transaction attributes. This is generally called by the target CPU
262  * specific code after it has been called through the tlb_fill()
263  * entry point and performed a successful page table walk to find
264  * the physical address and attributes for the virtual address
265  * which provoked the TLB miss.
266  *
267  * At most one entry for a given virtual address is permitted. Only a
268  * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
269  * used by tlb_flush_page.
270  */
271 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
272                              hwaddr paddr, MemTxAttrs attrs,
273                              int prot, int mmu_idx, vaddr size);
274 /* tlb_set_page:
275  *
276  * This function is equivalent to calling tlb_set_page_with_attrs()
277  * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
278  * as a convenience for CPUs which don't use memory transaction attributes.
279  */
280 void tlb_set_page(CPUState *cpu, vaddr addr,
281                   hwaddr paddr, int prot,
282                   int mmu_idx, vaddr size);
283 #else
284 static inline void tlb_init(CPUState *cpu)
285 {
286 }
287 static inline void tlb_destroy(CPUState *cpu)
288 {
289 }
290 static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
291 {
292 }
293 static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
294 {
295 }
296 static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
297 {
298 }
299 static inline void tlb_flush(CPUState *cpu)
300 {
301 }
302 static inline void tlb_flush_all_cpus(CPUState *src_cpu)
303 {
304 }
305 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
306 {
307 }
308 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
309                                             vaddr addr, uint16_t idxmap)
310 {
311 }
312 
313 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
314 {
315 }
316 static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
317                                                      vaddr addr,
318                                                      uint16_t idxmap)
319 {
320 }
321 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
322                                                             vaddr addr,
323                                                             uint16_t idxmap)
324 {
325 }
326 static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
327 {
328 }
329 
330 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
331                                                        uint16_t idxmap)
332 {
333 }
334 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
335                                                  vaddr addr,
336                                                  uint16_t idxmap,
337                                                  unsigned bits)
338 {
339 }
340 static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
341                                                           vaddr addr,
342                                                           uint16_t idxmap,
343                                                           unsigned bits)
344 {
345 }
346 static inline void
347 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
348                                               uint16_t idxmap, unsigned bits)
349 {
350 }
351 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
352                                              vaddr len, uint16_t idxmap,
353                                              unsigned bits)
354 {
355 }
356 static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
357                                                       vaddr addr,
358                                                       vaddr len,
359                                                       uint16_t idxmap,
360                                                       unsigned bits)
361 {
362 }
363 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
364                                                              vaddr addr,
365                                                              vaddr len,
366                                                              uint16_t idxmap,
367                                                              unsigned bits)
368 {
369 }
370 #endif
371 /**
372  * probe_access:
373  * @env: CPUArchState
374  * @addr: guest virtual address to look up
375  * @size: size of the access
376  * @access_type: read, write or execute permission
377  * @mmu_idx: MMU index to use for lookup
378  * @retaddr: return address for unwinding
379  *
380  * Look up the guest virtual address @addr.  Raise an exception if the
381  * page does not satisfy @access_type.  Raise an exception if the
382  * access (@addr, @size) hits a watchpoint.  For writes, mark a clean
383  * page as dirty.
384  *
385  * Finally, return the host address for a page that is backed by RAM,
386  * or NULL if the page requires I/O.
387  */
388 void *probe_access(CPUArchState *env, vaddr addr, int size,
389                    MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
390 
391 static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
392                                 int mmu_idx, uintptr_t retaddr)
393 {
394     return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
395 }
396 
397 static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
398                                int mmu_idx, uintptr_t retaddr)
399 {
400     return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
401 }
402 
403 /**
404  * probe_access_flags:
405  * @env: CPUArchState
406  * @addr: guest virtual address to look up
407  * @size: size of the access
408  * @access_type: read, write or execute permission
409  * @mmu_idx: MMU index to use for lookup
410  * @nonfault: suppress the fault
411  * @phost: return value for host address
412  * @retaddr: return address for unwinding
413  *
414  * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
415  * the page, and storing the host address for RAM in @phost.
416  *
417  * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
418  * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
419  * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
420  * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
421  */
422 int probe_access_flags(CPUArchState *env, vaddr addr, int size,
423                        MMUAccessType access_type, int mmu_idx,
424                        bool nonfault, void **phost, uintptr_t retaddr);
425 
426 #ifndef CONFIG_USER_ONLY
427 /**
428  * probe_access_full:
429  * Like probe_access_flags, except also return into @pfull.
430  *
431  * The CPUTLBEntryFull structure returned via @pfull is transient
432  * and must be consumed or copied immediately, before any further
433  * access or changes to TLB @mmu_idx.
434  */
435 int probe_access_full(CPUArchState *env, vaddr addr, int size,
436                       MMUAccessType access_type, int mmu_idx,
437                       bool nonfault, void **phost,
438                       CPUTLBEntryFull **pfull, uintptr_t retaddr);
439 
440 /**
441  * probe_access_mmu() - Like probe_access_full except cannot fault and
442  * doesn't trigger instrumentation.
443  *
444  * @env: CPUArchState
445  * @vaddr: virtual address to probe
446  * @size: size of the probe
447  * @access_type: read, write or execute permission
448  * @mmu_idx: softmmu index
449  * @phost: ptr to return value host address or NULL
450  * @pfull: ptr to return value CPUTLBEntryFull structure or NULL
451  *
452  * The CPUTLBEntryFull structure returned via @pfull is transient
453  * and must be consumed or copied immediately, before any further
454  * access or changes to TLB @mmu_idx.
455  *
456  * Returns: TLB flags as per probe_access_flags()
457  */
458 int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
459                           MMUAccessType access_type, int mmu_idx,
460                           void **phost, CPUTLBEntryFull **pfull);
461 
462 #endif
463 
464 static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
465 {
466 #ifdef CONFIG_USER_ONLY
467     return tb->itree.start;
468 #else
469     return tb->page_addr[0];
470 #endif
471 }
472 
473 static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
474 {
475 #ifdef CONFIG_USER_ONLY
476     tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
477     return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
478 #else
479     return tb->page_addr[1];
480 #endif
481 }
482 
483 static inline void tb_set_page_addr0(TranslationBlock *tb,
484                                      tb_page_addr_t addr)
485 {
486 #ifdef CONFIG_USER_ONLY
487     tb->itree.start = addr;
488     /*
489      * To begin, we record an interval of one byte.  When the translation
490      * loop encounters a second page, the interval will be extended to
491      * include the first byte of the second page, which is sufficient to
492      * allow tb_page_addr1() above to work properly.  The final corrected
493      * interval will be set by tb_page_add() from tb->size before the
494      * node is added to the interval tree.
495      */
496     tb->itree.last = addr;
497 #else
498     tb->page_addr[0] = addr;
499 #endif
500 }
501 
502 static inline void tb_set_page_addr1(TranslationBlock *tb,
503                                      tb_page_addr_t addr)
504 {
505 #ifdef CONFIG_USER_ONLY
506     /* Extend the interval to the first byte of the second page.  See above. */
507     tb->itree.last = addr;
508 #else
509     tb->page_addr[1] = addr;
510 #endif
511 }
512 
513 /* TranslationBlock invalidate API */
514 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
515 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
516 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
517 
518 /* GETPC is the true target of the return instruction that we'll execute.  */
519 #if defined(CONFIG_TCG_INTERPRETER)
520 extern __thread uintptr_t tci_tb_ptr;
521 # define GETPC() tci_tb_ptr
522 #else
523 # define GETPC() \
524     ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
525 #endif
526 
527 /* The true return address will often point to a host insn that is part of
528    the next translated guest insn.  Adjust the address backward to point to
529    the middle of the call insn.  Subtracting one would do the job except for
530    several compressed mode architectures (arm, mips) which set the low bit
531    to indicate the compressed mode; subtracting two works around that.  It
532    is also the case that there are no host isas that contain a call insn
533    smaller than 4 bytes, so we don't worry about special-casing this.  */
534 #define GETPC_ADJ   2
535 
536 #if !defined(CONFIG_USER_ONLY)
537 
538 /**
539  * iotlb_to_section:
540  * @cpu: CPU performing the access
541  * @index: TCG CPU IOTLB entry
542  *
543  * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
544  * it refers to. @index will have been initially created and returned
545  * by memory_region_section_get_iotlb().
546  */
547 struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
548                                              hwaddr index, MemTxAttrs attrs);
549 #endif
550 
551 /**
552  * get_page_addr_code_hostp()
553  * @env: CPUArchState
554  * @addr: guest virtual address of guest code
555  *
556  * See get_page_addr_code() (full-system version) for documentation on the
557  * return value.
558  *
559  * Sets *@hostp (when @hostp is non-NULL) as follows.
560  * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
561  * to the host address where @addr's content is kept.
562  *
563  * Note: this function can trigger an exception.
564  */
565 tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
566                                         void **hostp);
567 
568 /**
569  * get_page_addr_code()
570  * @env: CPUArchState
571  * @addr: guest virtual address of guest code
572  *
573  * If we cannot translate and execute from the entire RAM page, or if
574  * the region is not backed by RAM, returns -1. Otherwise, returns the
575  * ram_addr_t corresponding to the guest code at @addr.
576  *
577  * Note: this function can trigger an exception.
578  */
579 static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
580                                                 vaddr addr)
581 {
582     return get_page_addr_code_hostp(env, addr, NULL);
583 }
584 
585 #if defined(CONFIG_USER_ONLY)
586 void TSA_NO_TSA mmap_lock(void);
587 void TSA_NO_TSA mmap_unlock(void);
588 bool have_mmap_lock(void);
589 
590 static inline void mmap_unlock_guard(void *unused)
591 {
592     mmap_unlock();
593 }
594 
595 #define WITH_MMAP_LOCK_GUARD()                                            \
596     for (int _mmap_lock_iter __attribute__((cleanup(mmap_unlock_guard)))  \
597          = (mmap_lock(), 0); _mmap_lock_iter == 0; _mmap_lock_iter = 1)
598 
599 /**
600  * adjust_signal_pc:
601  * @pc: raw pc from the host signal ucontext_t.
602  * @is_write: host memory operation was write, or read-modify-write.
603  *
604  * Alter @pc as required for unwinding.  Return the type of the
605  * guest memory access -- host reads may be for guest execution.
606  */
607 MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write);
608 
609 /**
610  * handle_sigsegv_accerr_write:
611  * @cpu: the cpu context
612  * @old_set: the sigset_t from the signal ucontext_t
613  * @host_pc: the host pc, adjusted for the signal
614  * @host_addr: the host address of the fault
615  *
616  * Return true if the write fault has been handled, and should be re-tried.
617  */
618 bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
619                                  uintptr_t host_pc, abi_ptr guest_addr);
620 
621 /**
622  * cpu_loop_exit_sigsegv:
623  * @cpu: the cpu context
624  * @addr: the guest address of the fault
625  * @access_type: access was read/write/execute
626  * @maperr: true for invalid page, false for permission fault
627  * @ra: host pc for unwinding
628  *
629  * Use the TCGCPUOps hook to record cpu state, do guest operating system
630  * specific things to raise SIGSEGV, and jump to the main cpu loop.
631  */
632 G_NORETURN void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
633                                       MMUAccessType access_type,
634                                       bool maperr, uintptr_t ra);
635 
636 /**
637  * cpu_loop_exit_sigbus:
638  * @cpu: the cpu context
639  * @addr: the guest address of the alignment fault
640  * @access_type: access was read/write/execute
641  * @ra: host pc for unwinding
642  *
643  * Use the TCGCPUOps hook to record cpu state, do guest operating system
644  * specific things to raise SIGBUS, and jump to the main cpu loop.
645  */
646 G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
647                                      MMUAccessType access_type,
648                                      uintptr_t ra);
649 
650 #else
651 static inline void mmap_lock(void) {}
652 static inline void mmap_unlock(void) {}
653 #define WITH_MMAP_LOCK_GUARD()
654 
655 void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
656 void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
657 
658 MemoryRegionSection *
659 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
660                                   hwaddr *xlat, hwaddr *plen,
661                                   MemTxAttrs attrs, int *prot);
662 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
663                                        MemoryRegionSection *section);
664 #endif
665 
666 #endif
667