xref: /qemu/accel/tcg/cpu-exec.c (revision 4a1babe5)
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qapi/error.h"
23 #include "qapi/type-helpers.h"
24 #include "hw/core/tcg-cpu-ops.h"
25 #include "trace.h"
26 #include "disas/disas.h"
27 #include "exec/exec-all.h"
28 #include "tcg/tcg.h"
29 #include "qemu/atomic.h"
30 #include "qemu/rcu.h"
31 #include "exec/log.h"
32 #include "qemu/main-loop.h"
33 #include "sysemu/cpus.h"
34 #include "exec/cpu-all.h"
35 #include "sysemu/cpu-timers.h"
36 #include "exec/replay-core.h"
37 #include "sysemu/tcg.h"
38 #include "exec/helper-proto-common.h"
39 #include "tb-jmp-cache.h"
40 #include "tb-hash.h"
41 #include "tb-context.h"
42 #include "internal-common.h"
43 #include "internal-target.h"
44 
45 /* -icount align implementation. */
46 
47 typedef struct SyncClocks {
48     int64_t diff_clk;
49     int64_t last_cpu_icount;
50     int64_t realtime_clock;
51 } SyncClocks;
52 
53 #if !defined(CONFIG_USER_ONLY)
54 /* Allow the guest to have a max 3ms advance.
55  * The difference between the 2 clocks could therefore
56  * oscillate around 0.
57  */
58 #define VM_CLOCK_ADVANCE 3000000
59 #define THRESHOLD_REDUCE 1.5
60 #define MAX_DELAY_PRINT_RATE 2000000000LL
61 #define MAX_NB_PRINTS 100
62 
63 int64_t max_delay;
64 int64_t max_advance;
65 
66 static void align_clocks(SyncClocks *sc, CPUState *cpu)
67 {
68     int64_t cpu_icount;
69 
70     if (!icount_align_option) {
71         return;
72     }
73 
74     cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
75     sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
76     sc->last_cpu_icount = cpu_icount;
77 
78     if (sc->diff_clk > VM_CLOCK_ADVANCE) {
79 #ifndef _WIN32
80         struct timespec sleep_delay, rem_delay;
81         sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
82         sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
83         if (nanosleep(&sleep_delay, &rem_delay) < 0) {
84             sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
85         } else {
86             sc->diff_clk = 0;
87         }
88 #else
89         Sleep(sc->diff_clk / SCALE_MS);
90         sc->diff_clk = 0;
91 #endif
92     }
93 }
94 
95 static void print_delay(const SyncClocks *sc)
96 {
97     static float threshold_delay;
98     static int64_t last_realtime_clock;
99     static int nb_prints;
100 
101     if (icount_align_option &&
102         sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
103         nb_prints < MAX_NB_PRINTS) {
104         if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
105             (-sc->diff_clk / (float)1000000000LL <
106              (threshold_delay - THRESHOLD_REDUCE))) {
107             threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
108             qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
109                         threshold_delay - 1,
110                         threshold_delay);
111             nb_prints++;
112             last_realtime_clock = sc->realtime_clock;
113         }
114     }
115 }
116 
117 static void init_delay_params(SyncClocks *sc, CPUState *cpu)
118 {
119     if (!icount_align_option) {
120         return;
121     }
122     sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
123     sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
124     sc->last_cpu_icount
125         = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
126     if (sc->diff_clk < max_delay) {
127         max_delay = sc->diff_clk;
128     }
129     if (sc->diff_clk > max_advance) {
130         max_advance = sc->diff_clk;
131     }
132 
133     /* Print every 2s max if the guest is late. We limit the number
134        of printed messages to NB_PRINT_MAX(currently 100) */
135     print_delay(sc);
136 }
137 #else
138 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
139 {
140 }
141 
142 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
143 {
144 }
145 #endif /* CONFIG USER ONLY */
146 
147 uint32_t curr_cflags(CPUState *cpu)
148 {
149     uint32_t cflags = cpu->tcg_cflags;
150 
151     /*
152      * Record gdb single-step.  We should be exiting the TB by raising
153      * EXCP_DEBUG, but to simplify other tests, disable chaining too.
154      *
155      * For singlestep and -d nochain, suppress goto_tb so that
156      * we can log -d cpu,exec after every TB.
157      */
158     if (unlikely(cpu->singlestep_enabled)) {
159         cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
160     } else if (qatomic_read(&one_insn_per_tb)) {
161         cflags |= CF_NO_GOTO_TB | 1;
162     } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
163         cflags |= CF_NO_GOTO_TB;
164     }
165 
166     return cflags;
167 }
168 
169 struct tb_desc {
170     vaddr pc;
171     uint64_t cs_base;
172     CPUArchState *env;
173     tb_page_addr_t page_addr0;
174     uint32_t flags;
175     uint32_t cflags;
176 };
177 
178 static bool tb_lookup_cmp(const void *p, const void *d)
179 {
180     const TranslationBlock *tb = p;
181     const struct tb_desc *desc = d;
182 
183     if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) &&
184         tb_page_addr0(tb) == desc->page_addr0 &&
185         tb->cs_base == desc->cs_base &&
186         tb->flags == desc->flags &&
187         tb_cflags(tb) == desc->cflags) {
188         /* check next page if needed */
189         tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
190         if (tb_phys_page1 == -1) {
191             return true;
192         } else {
193             tb_page_addr_t phys_page1;
194             vaddr virt_page1;
195 
196             /*
197              * We know that the first page matched, and an otherwise valid TB
198              * encountered an incomplete instruction at the end of that page,
199              * therefore we know that generating a new TB from the current PC
200              * must also require reading from the next page -- even if the
201              * second pages do not match, and therefore the resulting insn
202              * is different for the new TB.  Therefore any exception raised
203              * here by the faulting lookup is not premature.
204              */
205             virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
206             phys_page1 = get_page_addr_code(desc->env, virt_page1);
207             if (tb_phys_page1 == phys_page1) {
208                 return true;
209             }
210         }
211     }
212     return false;
213 }
214 
215 static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
216                                           uint64_t cs_base, uint32_t flags,
217                                           uint32_t cflags)
218 {
219     tb_page_addr_t phys_pc;
220     struct tb_desc desc;
221     uint32_t h;
222 
223     desc.env = cpu_env(cpu);
224     desc.cs_base = cs_base;
225     desc.flags = flags;
226     desc.cflags = cflags;
227     desc.pc = pc;
228     phys_pc = get_page_addr_code(desc.env, pc);
229     if (phys_pc == -1) {
230         return NULL;
231     }
232     desc.page_addr0 = phys_pc;
233     h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
234                      flags, cs_base, cflags);
235     return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
236 }
237 
238 /* Might cause an exception, so have a longjmp destination ready */
239 static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
240                                           uint64_t cs_base, uint32_t flags,
241                                           uint32_t cflags)
242 {
243     TranslationBlock *tb;
244     CPUJumpCache *jc;
245     uint32_t hash;
246 
247     /* we should never be trying to look up an INVALID tb */
248     tcg_debug_assert(!(cflags & CF_INVALID));
249 
250     hash = tb_jmp_cache_hash_func(pc);
251     jc = cpu->tb_jmp_cache;
252 
253     tb = qatomic_read(&jc->array[hash].tb);
254     if (likely(tb &&
255                jc->array[hash].pc == pc &&
256                tb->cs_base == cs_base &&
257                tb->flags == flags &&
258                tb_cflags(tb) == cflags)) {
259         goto hit;
260     }
261 
262     tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
263     if (tb == NULL) {
264         return NULL;
265     }
266 
267     jc->array[hash].pc = pc;
268     qatomic_set(&jc->array[hash].tb, tb);
269 
270 hit:
271     /*
272      * As long as tb is not NULL, the contents are consistent.  Therefore,
273      * the virtual PC has to match for non-CF_PCREL translations.
274      */
275     assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc);
276     return tb;
277 }
278 
279 static void log_cpu_exec(vaddr pc, CPUState *cpu,
280                          const TranslationBlock *tb)
281 {
282     if (qemu_log_in_addr_range(pc)) {
283         qemu_log_mask(CPU_LOG_EXEC,
284                       "Trace %d: %p [%08" PRIx64
285                       "/%016" VADDR_PRIx "/%08x/%08x] %s\n",
286                       cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
287                       tb->flags, tb->cflags, lookup_symbol(pc));
288 
289         if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
290             FILE *logfile = qemu_log_trylock();
291             if (logfile) {
292                 int flags = 0;
293 
294                 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
295                     flags |= CPU_DUMP_FPU;
296                 }
297 #if defined(TARGET_I386)
298                 flags |= CPU_DUMP_CCOP;
299 #endif
300                 if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
301                     flags |= CPU_DUMP_VPU;
302                 }
303                 cpu_dump_state(cpu, logfile, flags);
304                 qemu_log_unlock(logfile);
305             }
306         }
307     }
308 }
309 
310 static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
311                                        uint32_t *cflags)
312 {
313     CPUBreakpoint *bp;
314     bool match_page = false;
315 
316     /*
317      * Singlestep overrides breakpoints.
318      * This requirement is visible in the record-replay tests, where
319      * we would fail to make forward progress in reverse-continue.
320      *
321      * TODO: gdb singlestep should only override gdb breakpoints,
322      * so that one could (gdb) singlestep into the guest kernel's
323      * architectural breakpoint handler.
324      */
325     if (cpu->singlestep_enabled) {
326         return false;
327     }
328 
329     QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
330         /*
331          * If we have an exact pc match, trigger the breakpoint.
332          * Otherwise, note matches within the page.
333          */
334         if (pc == bp->pc) {
335             bool match_bp = false;
336 
337             if (bp->flags & BP_GDB) {
338                 match_bp = true;
339             } else if (bp->flags & BP_CPU) {
340 #ifdef CONFIG_USER_ONLY
341                 g_assert_not_reached();
342 #else
343                 const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
344                 assert(tcg_ops->debug_check_breakpoint);
345                 match_bp = tcg_ops->debug_check_breakpoint(cpu);
346 #endif
347             }
348 
349             if (match_bp) {
350                 cpu->exception_index = EXCP_DEBUG;
351                 return true;
352             }
353         } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) {
354             match_page = true;
355         }
356     }
357 
358     /*
359      * Within the same page as a breakpoint, single-step,
360      * returning to helper_lookup_tb_ptr after each insn looking
361      * for the actual breakpoint.
362      *
363      * TODO: Perhaps better to record all of the TBs associated
364      * with a given virtual page that contains a breakpoint, and
365      * then invalidate them when a new overlapping breakpoint is
366      * set on the page.  Non-overlapping TBs would not be
367      * invalidated, nor would any TB need to be invalidated as
368      * breakpoints are removed.
369      */
370     if (match_page) {
371         *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
372     }
373     return false;
374 }
375 
376 static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
377                                          uint32_t *cflags)
378 {
379     return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
380         check_for_breakpoints_slow(cpu, pc, cflags);
381 }
382 
383 /**
384  * helper_lookup_tb_ptr: quick check for next tb
385  * @env: current cpu state
386  *
387  * Look for an existing TB matching the current cpu state.
388  * If found, return the code pointer.  If not found, return
389  * the tcg epilogue so that we return into cpu_tb_exec.
390  */
391 const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
392 {
393     CPUState *cpu = env_cpu(env);
394     TranslationBlock *tb;
395     vaddr pc;
396     uint64_t cs_base;
397     uint32_t flags, cflags;
398 
399     /*
400      * By definition we've just finished a TB, so I/O is OK.
401      * Avoid the possibility of calling cpu_io_recompile() if
402      * a page table walk triggered by tb_lookup() calling
403      * probe_access_internal() happens to touch an MMIO device.
404      * The next TB, if we chain to it, will clear the flag again.
405      */
406     cpu->neg.can_do_io = true;
407     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
408 
409     cflags = curr_cflags(cpu);
410     if (check_for_breakpoints(cpu, pc, &cflags)) {
411         cpu_loop_exit(cpu);
412     }
413 
414     tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
415     if (tb == NULL) {
416         return tcg_code_gen_epilogue;
417     }
418 
419     if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
420         log_cpu_exec(pc, cpu, tb);
421     }
422 
423     return tb->tc.ptr;
424 }
425 
426 /* Execute a TB, and fix up the CPU state afterwards if necessary */
427 /*
428  * Disable CFI checks.
429  * TCG creates binary blobs at runtime, with the transformed code.
430  * A TB is a blob of binary code, created at runtime and called with an
431  * indirect function call. Since such function did not exist at compile time,
432  * the CFI runtime has no way to verify its signature and would fail.
433  * TCG is not considered a security-sensitive part of QEMU so this does not
434  * affect the impact of CFI in environment with high security requirements
435  */
436 static inline TranslationBlock * QEMU_DISABLE_CFI
437 cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
438 {
439     uintptr_t ret;
440     TranslationBlock *last_tb;
441     const void *tb_ptr = itb->tc.ptr;
442 
443     if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
444         log_cpu_exec(log_pc(cpu, itb), cpu, itb);
445     }
446 
447     qemu_thread_jit_execute();
448     ret = tcg_qemu_tb_exec(cpu_env(cpu), tb_ptr);
449     cpu->neg.can_do_io = true;
450     qemu_plugin_disable_mem_helpers(cpu);
451     /*
452      * TODO: Delay swapping back to the read-write region of the TB
453      * until we actually need to modify the TB.  The read-only copy,
454      * coming from the rx region, shares the same host TLB entry as
455      * the code that executed the exit_tb opcode that arrived here.
456      * If we insist on touching both the RX and the RW pages, we
457      * double the host TLB pressure.
458      */
459     last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK));
460     *tb_exit = ret & TB_EXIT_MASK;
461 
462     trace_exec_tb_exit(last_tb, *tb_exit);
463 
464     if (*tb_exit > TB_EXIT_IDX1) {
465         /* We didn't start executing this TB (eg because the instruction
466          * counter hit zero); we must restore the guest PC to the address
467          * of the start of the TB.
468          */
469         CPUClass *cc = cpu->cc;
470         const TCGCPUOps *tcg_ops = cc->tcg_ops;
471 
472         if (tcg_ops->synchronize_from_tb) {
473             tcg_ops->synchronize_from_tb(cpu, last_tb);
474         } else {
475             tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
476             assert(cc->set_pc);
477             cc->set_pc(cpu, last_tb->pc);
478         }
479         if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
480             vaddr pc = log_pc(cpu, last_tb);
481             if (qemu_log_in_addr_range(pc)) {
482                 qemu_log("Stopped execution of TB chain before %p [%016"
483                          VADDR_PRIx "] %s\n",
484                          last_tb->tc.ptr, pc, lookup_symbol(pc));
485             }
486         }
487     }
488 
489     /*
490      * If gdb single-step, and we haven't raised another exception,
491      * raise a debug exception.  Single-step with another exception
492      * is handled in cpu_handle_exception.
493      */
494     if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) {
495         cpu->exception_index = EXCP_DEBUG;
496         cpu_loop_exit(cpu);
497     }
498 
499     return last_tb;
500 }
501 
502 
503 static void cpu_exec_enter(CPUState *cpu)
504 {
505     const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
506 
507     if (tcg_ops->cpu_exec_enter) {
508         tcg_ops->cpu_exec_enter(cpu);
509     }
510 }
511 
512 static void cpu_exec_exit(CPUState *cpu)
513 {
514     const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
515 
516     if (tcg_ops->cpu_exec_exit) {
517         tcg_ops->cpu_exec_exit(cpu);
518     }
519 }
520 
521 static void cpu_exec_longjmp_cleanup(CPUState *cpu)
522 {
523     /* Non-buggy compilers preserve this; assert the correct value. */
524     g_assert(cpu == current_cpu);
525 
526 #ifdef CONFIG_USER_ONLY
527     clear_helper_retaddr();
528     if (have_mmap_lock()) {
529         mmap_unlock();
530     }
531 #else
532     /*
533      * For softmmu, a tlb_fill fault during translation will land here,
534      * and we need to release any page locks held.  In system mode we
535      * have one tcg_ctx per thread, so we know it was this cpu doing
536      * the translation.
537      *
538      * Alternative 1: Install a cleanup to be called via an exception
539      * handling safe longjmp.  It seems plausible that all our hosts
540      * support such a thing.  We'd have to properly register unwind info
541      * for the JIT for EH, rather that just for GDB.
542      *
543      * Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to
544      * capture the cpu_loop_exit longjmp, perform the cleanup, and
545      * jump again to arrive here.
546      */
547     if (tcg_ctx->gen_tb) {
548         tb_unlock_pages(tcg_ctx->gen_tb);
549         tcg_ctx->gen_tb = NULL;
550     }
551 #endif
552     if (bql_locked()) {
553         bql_unlock();
554     }
555     assert_no_pages_locked();
556 }
557 
558 void cpu_exec_step_atomic(CPUState *cpu)
559 {
560     CPUArchState *env = cpu_env(cpu);
561     TranslationBlock *tb;
562     vaddr pc;
563     uint64_t cs_base;
564     uint32_t flags, cflags;
565     int tb_exit;
566 
567     if (sigsetjmp(cpu->jmp_env, 0) == 0) {
568         start_exclusive();
569         g_assert(cpu == current_cpu);
570         g_assert(!cpu->running);
571         cpu->running = true;
572 
573         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
574 
575         cflags = curr_cflags(cpu);
576         /* Execute in a serial context. */
577         cflags &= ~CF_PARALLEL;
578         /* After 1 insn, return and release the exclusive lock. */
579         cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
580         /*
581          * No need to check_for_breakpoints here.
582          * We only arrive in cpu_exec_step_atomic after beginning execution
583          * of an insn that includes an atomic operation we can't handle.
584          * Any breakpoint for this insn will have been recognized earlier.
585          */
586 
587         tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
588         if (tb == NULL) {
589             mmap_lock();
590             tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
591             mmap_unlock();
592         }
593 
594         cpu_exec_enter(cpu);
595         /* execute the generated code */
596         trace_exec_tb(tb, pc);
597         cpu_tb_exec(cpu, tb, &tb_exit);
598         cpu_exec_exit(cpu);
599     } else {
600         cpu_exec_longjmp_cleanup(cpu);
601     }
602 
603     /*
604      * As we start the exclusive region before codegen we must still
605      * be in the region if we longjump out of either the codegen or
606      * the execution.
607      */
608     g_assert(cpu_in_exclusive_context(cpu));
609     cpu->running = false;
610     end_exclusive();
611 }
612 
613 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
614 {
615     /*
616      * Get the rx view of the structure, from which we find the
617      * executable code address, and tb_target_set_jmp_target can
618      * produce a pc-relative displacement to jmp_target_addr[n].
619      */
620     const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb);
621     uintptr_t offset = tb->jmp_insn_offset[n];
622     uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset;
623     uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
624 
625     tb->jmp_target_addr[n] = addr;
626     tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw);
627 }
628 
629 static inline void tb_add_jump(TranslationBlock *tb, int n,
630                                TranslationBlock *tb_next)
631 {
632     uintptr_t old;
633 
634     qemu_thread_jit_write();
635     assert(n < ARRAY_SIZE(tb->jmp_list_next));
636     qemu_spin_lock(&tb_next->jmp_lock);
637 
638     /* make sure the destination TB is valid */
639     if (tb_next->cflags & CF_INVALID) {
640         goto out_unlock_next;
641     }
642     /* Atomically claim the jump destination slot only if it was NULL */
643     old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL,
644                           (uintptr_t)tb_next);
645     if (old) {
646         goto out_unlock_next;
647     }
648 
649     /* patch the native jump address */
650     tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
651 
652     /* add in TB jmp list */
653     tb->jmp_list_next[n] = tb_next->jmp_list_head;
654     tb_next->jmp_list_head = (uintptr_t)tb | n;
655 
656     qemu_spin_unlock(&tb_next->jmp_lock);
657 
658     qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n",
659                   tb->tc.ptr, n, tb_next->tc.ptr);
660     return;
661 
662  out_unlock_next:
663     qemu_spin_unlock(&tb_next->jmp_lock);
664     return;
665 }
666 
667 static inline bool cpu_handle_halt(CPUState *cpu)
668 {
669 #ifndef CONFIG_USER_ONLY
670     if (cpu->halted) {
671         const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
672 
673         if (tcg_ops->cpu_exec_halt) {
674             tcg_ops->cpu_exec_halt(cpu);
675         }
676         if (!cpu_has_work(cpu)) {
677             return true;
678         }
679 
680         cpu->halted = 0;
681     }
682 #endif /* !CONFIG_USER_ONLY */
683 
684     return false;
685 }
686 
687 static inline void cpu_handle_debug_exception(CPUState *cpu)
688 {
689     const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
690     CPUWatchpoint *wp;
691 
692     if (!cpu->watchpoint_hit) {
693         QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
694             wp->flags &= ~BP_WATCHPOINT_HIT;
695         }
696     }
697 
698     if (tcg_ops->debug_excp_handler) {
699         tcg_ops->debug_excp_handler(cpu);
700     }
701 }
702 
703 static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
704 {
705     if (cpu->exception_index < 0) {
706 #ifndef CONFIG_USER_ONLY
707         if (replay_has_exception()
708             && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
709             /* Execute just one insn to trigger exception pending in the log */
710             cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
711                 | CF_NOIRQ | 1;
712         }
713 #endif
714         return false;
715     }
716 
717     if (cpu->exception_index >= EXCP_INTERRUPT) {
718         /* exit request from the cpu execution loop */
719         *ret = cpu->exception_index;
720         if (*ret == EXCP_DEBUG) {
721             cpu_handle_debug_exception(cpu);
722         }
723         cpu->exception_index = -1;
724         return true;
725     }
726 
727 #if defined(CONFIG_USER_ONLY)
728     /*
729      * If user mode only, we simulate a fake exception which will be
730      * handled outside the cpu execution loop.
731      */
732 #if defined(TARGET_I386)
733     const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
734     tcg_ops->fake_user_interrupt(cpu);
735 #endif /* TARGET_I386 */
736     *ret = cpu->exception_index;
737     cpu->exception_index = -1;
738     return true;
739 #else
740     if (replay_exception()) {
741         const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
742 
743         bql_lock();
744         tcg_ops->do_interrupt(cpu);
745         bql_unlock();
746         cpu->exception_index = -1;
747 
748         if (unlikely(cpu->singlestep_enabled)) {
749             /*
750              * After processing the exception, ensure an EXCP_DEBUG is
751              * raised when single-stepping so that GDB doesn't miss the
752              * next instruction.
753              */
754             *ret = EXCP_DEBUG;
755             cpu_handle_debug_exception(cpu);
756             return true;
757         }
758     } else if (!replay_has_interrupt()) {
759         /* give a chance to iothread in replay mode */
760         *ret = EXCP_INTERRUPT;
761         return true;
762     }
763 #endif
764 
765     return false;
766 }
767 
768 static inline bool icount_exit_request(CPUState *cpu)
769 {
770     if (!icount_enabled()) {
771         return false;
772     }
773     if (cpu->cflags_next_tb != -1 && !(cpu->cflags_next_tb & CF_USE_ICOUNT)) {
774         return false;
775     }
776     return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0;
777 }
778 
779 static inline bool cpu_handle_interrupt(CPUState *cpu,
780                                         TranslationBlock **last_tb)
781 {
782     /*
783      * If we have requested custom cflags with CF_NOIRQ we should
784      * skip checking here. Any pending interrupts will get picked up
785      * by the next TB we execute under normal cflags.
786      */
787     if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) {
788         return false;
789     }
790 
791     /* Clear the interrupt flag now since we're processing
792      * cpu->interrupt_request and cpu->exit_request.
793      * Ensure zeroing happens before reading cpu->exit_request or
794      * cpu->interrupt_request (see also smp_wmb in cpu_exit())
795      */
796     qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
797 
798     if (unlikely(qatomic_read(&cpu->interrupt_request))) {
799         int interrupt_request;
800         bql_lock();
801         interrupt_request = cpu->interrupt_request;
802         if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
803             /* Mask out external interrupts for this step. */
804             interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
805         }
806         if (interrupt_request & CPU_INTERRUPT_DEBUG) {
807             cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
808             cpu->exception_index = EXCP_DEBUG;
809             bql_unlock();
810             return true;
811         }
812 #if !defined(CONFIG_USER_ONLY)
813         if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
814             /* Do nothing */
815         } else if (interrupt_request & CPU_INTERRUPT_HALT) {
816             replay_interrupt();
817             cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
818             cpu->halted = 1;
819             cpu->exception_index = EXCP_HLT;
820             bql_unlock();
821             return true;
822         }
823 #if defined(TARGET_I386)
824         else if (interrupt_request & CPU_INTERRUPT_INIT) {
825             X86CPU *x86_cpu = X86_CPU(cpu);
826             CPUArchState *env = &x86_cpu->env;
827             replay_interrupt();
828             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
829             do_cpu_init(x86_cpu);
830             cpu->exception_index = EXCP_HALTED;
831             bql_unlock();
832             return true;
833         }
834 #else
835         else if (interrupt_request & CPU_INTERRUPT_RESET) {
836             replay_interrupt();
837             cpu_reset(cpu);
838             bql_unlock();
839             return true;
840         }
841 #endif /* !TARGET_I386 */
842         /* The target hook has 3 exit conditions:
843            False when the interrupt isn't processed,
844            True when it is, and we should restart on a new TB,
845            and via longjmp via cpu_loop_exit.  */
846         else {
847             const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
848 
849             if (tcg_ops->cpu_exec_interrupt &&
850                 tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
851                 if (!tcg_ops->need_replay_interrupt ||
852                     tcg_ops->need_replay_interrupt(interrupt_request)) {
853                     replay_interrupt();
854                 }
855                 /*
856                  * After processing the interrupt, ensure an EXCP_DEBUG is
857                  * raised when single-stepping so that GDB doesn't miss the
858                  * next instruction.
859                  */
860                 if (unlikely(cpu->singlestep_enabled)) {
861                     cpu->exception_index = EXCP_DEBUG;
862                     bql_unlock();
863                     return true;
864                 }
865                 cpu->exception_index = -1;
866                 *last_tb = NULL;
867             }
868             /* The target hook may have updated the 'cpu->interrupt_request';
869              * reload the 'interrupt_request' value */
870             interrupt_request = cpu->interrupt_request;
871         }
872 #endif /* !CONFIG_USER_ONLY */
873         if (interrupt_request & CPU_INTERRUPT_EXITTB) {
874             cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
875             /* ensure that no TB jump will be modified as
876                the program flow was changed */
877             *last_tb = NULL;
878         }
879 
880         /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
881         bql_unlock();
882     }
883 
884     /* Finally, check if we need to exit to the main loop.  */
885     if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) {
886         qatomic_set(&cpu->exit_request, 0);
887         if (cpu->exception_index == -1) {
888             cpu->exception_index = EXCP_INTERRUPT;
889         }
890         return true;
891     }
892 
893     return false;
894 }
895 
896 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
897                                     vaddr pc, TranslationBlock **last_tb,
898                                     int *tb_exit)
899 {
900     int32_t insns_left;
901 
902     trace_exec_tb(tb, pc);
903     tb = cpu_tb_exec(cpu, tb, tb_exit);
904     if (*tb_exit != TB_EXIT_REQUESTED) {
905         *last_tb = tb;
906         return;
907     }
908 
909     *last_tb = NULL;
910     insns_left = qatomic_read(&cpu->neg.icount_decr.u32);
911     if (insns_left < 0) {
912         /* Something asked us to stop executing chained TBs; just
913          * continue round the main loop. Whatever requested the exit
914          * will also have set something else (eg exit_request or
915          * interrupt_request) which will be handled by
916          * cpu_handle_interrupt.  cpu_handle_interrupt will also
917          * clear cpu->icount_decr.u16.high.
918          */
919         return;
920     }
921 
922     /* Instruction counter expired.  */
923     assert(icount_enabled());
924 #ifndef CONFIG_USER_ONLY
925     /* Ensure global icount has gone forward */
926     icount_update(cpu);
927     /* Refill decrementer and continue execution.  */
928     insns_left = MIN(0xffff, cpu->icount_budget);
929     cpu->neg.icount_decr.u16.low = insns_left;
930     cpu->icount_extra = cpu->icount_budget - insns_left;
931 
932     /*
933      * If the next tb has more instructions than we have left to
934      * execute we need to ensure we find/generate a TB with exactly
935      * insns_left instructions in it.
936      */
937     if (insns_left > 0 && insns_left < tb->icount)  {
938         assert(insns_left <= CF_COUNT_MASK);
939         assert(cpu->icount_extra == 0);
940         cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left;
941     }
942 #endif
943 }
944 
945 /* main execution loop */
946 
947 static int __attribute__((noinline))
948 cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
949 {
950     int ret;
951 
952     /* if an exception is pending, we execute it here */
953     while (!cpu_handle_exception(cpu, &ret)) {
954         TranslationBlock *last_tb = NULL;
955         int tb_exit = 0;
956 
957         while (!cpu_handle_interrupt(cpu, &last_tb)) {
958             TranslationBlock *tb;
959             vaddr pc;
960             uint64_t cs_base;
961             uint32_t flags, cflags;
962 
963             cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
964 
965             /*
966              * When requested, use an exact setting for cflags for the next
967              * execution.  This is used for icount, precise smc, and stop-
968              * after-access watchpoints.  Since this request should never
969              * have CF_INVALID set, -1 is a convenient invalid value that
970              * does not require tcg headers for cpu_common_reset.
971              */
972             cflags = cpu->cflags_next_tb;
973             if (cflags == -1) {
974                 cflags = curr_cflags(cpu);
975             } else {
976                 cpu->cflags_next_tb = -1;
977             }
978 
979             if (check_for_breakpoints(cpu, pc, &cflags)) {
980                 break;
981             }
982 
983             tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
984             if (tb == NULL) {
985                 CPUJumpCache *jc;
986                 uint32_t h;
987 
988                 mmap_lock();
989                 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
990                 mmap_unlock();
991 
992                 /*
993                  * We add the TB in the virtual pc hash table
994                  * for the fast lookup
995                  */
996                 h = tb_jmp_cache_hash_func(pc);
997                 jc = cpu->tb_jmp_cache;
998                 jc->array[h].pc = pc;
999                 qatomic_set(&jc->array[h].tb, tb);
1000             }
1001 
1002 #ifndef CONFIG_USER_ONLY
1003             /*
1004              * We don't take care of direct jumps when address mapping
1005              * changes in system emulation.  So it's not safe to make a
1006              * direct jump to a TB spanning two pages because the mapping
1007              * for the second page can change.
1008              */
1009             if (tb_page_addr1(tb) != -1) {
1010                 last_tb = NULL;
1011             }
1012 #endif
1013             /* See if we can patch the calling TB. */
1014             if (last_tb) {
1015                 tb_add_jump(last_tb, tb_exit, tb);
1016             }
1017 
1018             cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
1019 
1020             /* Try to align the host and virtual clocks
1021                if the guest is in advance */
1022             align_clocks(sc, cpu);
1023         }
1024     }
1025     return ret;
1026 }
1027 
1028 static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
1029 {
1030     /* Prepare setjmp context for exception handling. */
1031     if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
1032         cpu_exec_longjmp_cleanup(cpu);
1033     }
1034 
1035     return cpu_exec_loop(cpu, sc);
1036 }
1037 
1038 int cpu_exec(CPUState *cpu)
1039 {
1040     int ret;
1041     SyncClocks sc = { 0 };
1042 
1043     /* replay_interrupt may need current_cpu */
1044     current_cpu = cpu;
1045 
1046     if (cpu_handle_halt(cpu)) {
1047         return EXCP_HALTED;
1048     }
1049 
1050     RCU_READ_LOCK_GUARD();
1051     cpu_exec_enter(cpu);
1052 
1053     /*
1054      * Calculate difference between guest clock and host clock.
1055      * This delay includes the delay of the last cycle, so
1056      * what we have to do is sleep until it is 0. As for the
1057      * advance/delay we gain here, we try to fix it next time.
1058      */
1059     init_delay_params(&sc, cpu);
1060 
1061     ret = cpu_exec_setjmp(cpu, &sc);
1062 
1063     cpu_exec_exit(cpu);
1064     return ret;
1065 }
1066 
1067 bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
1068 {
1069     static bool tcg_target_initialized;
1070 
1071     if (!tcg_target_initialized) {
1072         cpu->cc->tcg_ops->initialize();
1073         tcg_target_initialized = true;
1074     }
1075 
1076     cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1);
1077     tlb_init(cpu);
1078 #ifndef CONFIG_USER_ONLY
1079     tcg_iommu_init_notifier_list(cpu);
1080 #endif /* !CONFIG_USER_ONLY */
1081     /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
1082 
1083     return true;
1084 }
1085 
1086 /* undo the initializations in reverse order */
1087 void tcg_exec_unrealizefn(CPUState *cpu)
1088 {
1089 #ifndef CONFIG_USER_ONLY
1090     tcg_iommu_free_notifier_list(cpu);
1091 #endif /* !CONFIG_USER_ONLY */
1092 
1093     tlb_destroy(cpu);
1094     g_free_rcu(cpu->tb_jmp_cache, rcu);
1095 }
1096