xref: /qemu/accel/tcg/cpu-exec.c (revision 7dd8f6fd)
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "cpu.h"
23 #include "trace.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg.h"
27 #include "qemu/atomic.h"
28 #include "sysemu/qtest.h"
29 #include "qemu/timer.h"
30 #include "qemu/rcu.h"
31 #include "exec/tb-hash.h"
32 #include "exec/tb-lookup.h"
33 #include "exec/log.h"
34 #include "qemu/main-loop.h"
35 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
36 #include "hw/i386/apic.h"
37 #endif
38 #include "sysemu/cpus.h"
39 #include "sysemu/replay.h"
40 
41 /* -icount align implementation. */
42 
43 typedef struct SyncClocks {
44     int64_t diff_clk;
45     int64_t last_cpu_icount;
46     int64_t realtime_clock;
47 } SyncClocks;
48 
49 #if !defined(CONFIG_USER_ONLY)
50 /* Allow the guest to have a max 3ms advance.
51  * The difference between the 2 clocks could therefore
52  * oscillate around 0.
53  */
54 #define VM_CLOCK_ADVANCE 3000000
55 #define THRESHOLD_REDUCE 1.5
56 #define MAX_DELAY_PRINT_RATE 2000000000LL
57 #define MAX_NB_PRINTS 100
58 
59 static void align_clocks(SyncClocks *sc, CPUState *cpu)
60 {
61     int64_t cpu_icount;
62 
63     if (!icount_align_option) {
64         return;
65     }
66 
67     cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
68     sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
69     sc->last_cpu_icount = cpu_icount;
70 
71     if (sc->diff_clk > VM_CLOCK_ADVANCE) {
72 #ifndef _WIN32
73         struct timespec sleep_delay, rem_delay;
74         sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
75         sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
76         if (nanosleep(&sleep_delay, &rem_delay) < 0) {
77             sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
78         } else {
79             sc->diff_clk = 0;
80         }
81 #else
82         Sleep(sc->diff_clk / SCALE_MS);
83         sc->diff_clk = 0;
84 #endif
85     }
86 }
87 
88 static void print_delay(const SyncClocks *sc)
89 {
90     static float threshold_delay;
91     static int64_t last_realtime_clock;
92     static int nb_prints;
93 
94     if (icount_align_option &&
95         sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
96         nb_prints < MAX_NB_PRINTS) {
97         if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
98             (-sc->diff_clk / (float)1000000000LL <
99              (threshold_delay - THRESHOLD_REDUCE))) {
100             threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
101             printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
102                    threshold_delay - 1,
103                    threshold_delay);
104             nb_prints++;
105             last_realtime_clock = sc->realtime_clock;
106         }
107     }
108 }
109 
110 static void init_delay_params(SyncClocks *sc, CPUState *cpu)
111 {
112     if (!icount_align_option) {
113         return;
114     }
115     sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
116     sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
117     sc->last_cpu_icount
118         = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
119     if (sc->diff_clk < max_delay) {
120         max_delay = sc->diff_clk;
121     }
122     if (sc->diff_clk > max_advance) {
123         max_advance = sc->diff_clk;
124     }
125 
126     /* Print every 2s max if the guest is late. We limit the number
127        of printed messages to NB_PRINT_MAX(currently 100) */
128     print_delay(sc);
129 }
130 #else
131 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
132 {
133 }
134 
135 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
136 {
137 }
138 #endif /* CONFIG USER ONLY */
139 
140 /* Execute a TB, and fix up the CPU state afterwards if necessary */
141 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
142 {
143     CPUArchState *env = cpu->env_ptr;
144     uintptr_t ret;
145     TranslationBlock *last_tb;
146     int tb_exit;
147     uint8_t *tb_ptr = itb->tc.ptr;
148 
149     qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
150                            "Trace %d: %p ["
151                            TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
152                            cpu->cpu_index, itb->tc.ptr,
153                            itb->cs_base, itb->pc, itb->flags,
154                            lookup_symbol(itb->pc));
155 
156 #if defined(DEBUG_DISAS)
157     if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
158         && qemu_log_in_addr_range(itb->pc)) {
159         FILE *logfile = qemu_log_lock();
160         int flags = 0;
161         if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
162             flags |= CPU_DUMP_FPU;
163         }
164 #if defined(TARGET_I386)
165         flags |= CPU_DUMP_CCOP;
166 #endif
167         log_cpu_state(cpu, flags);
168         qemu_log_unlock(logfile);
169     }
170 #endif /* DEBUG_DISAS */
171 
172     ret = tcg_qemu_tb_exec(env, tb_ptr);
173     cpu->can_do_io = 1;
174     last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
175     tb_exit = ret & TB_EXIT_MASK;
176     trace_exec_tb_exit(last_tb, tb_exit);
177 
178     if (tb_exit > TB_EXIT_IDX1) {
179         /* We didn't start executing this TB (eg because the instruction
180          * counter hit zero); we must restore the guest PC to the address
181          * of the start of the TB.
182          */
183         CPUClass *cc = CPU_GET_CLASS(cpu);
184         qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
185                                "Stopped execution of TB chain before %p ["
186                                TARGET_FMT_lx "] %s\n",
187                                last_tb->tc.ptr, last_tb->pc,
188                                lookup_symbol(last_tb->pc));
189         if (cc->synchronize_from_tb) {
190             cc->synchronize_from_tb(cpu, last_tb);
191         } else {
192             assert(cc->set_pc);
193             cc->set_pc(cpu, last_tb->pc);
194         }
195     }
196     return ret;
197 }
198 
199 #ifndef CONFIG_USER_ONLY
200 /* Execute the code without caching the generated code. An interpreter
201    could be used if available. */
202 static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
203                              TranslationBlock *orig_tb, bool ignore_icount)
204 {
205     TranslationBlock *tb;
206     uint32_t cflags = curr_cflags() | CF_NOCACHE;
207 
208     if (ignore_icount) {
209         cflags &= ~CF_USE_ICOUNT;
210     }
211 
212     /* Should never happen.
213        We only end up here when an existing TB is too long.  */
214     cflags |= MIN(max_cycles, CF_COUNT_MASK);
215 
216     mmap_lock();
217     tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
218                      orig_tb->flags, cflags);
219     tb->orig_tb = orig_tb;
220     mmap_unlock();
221 
222     /* execute the generated code */
223     trace_exec_tb_nocache(tb, tb->pc);
224     cpu_tb_exec(cpu, tb);
225 
226     mmap_lock();
227     tb_phys_invalidate(tb, -1);
228     mmap_unlock();
229     tcg_tb_remove(tb);
230 }
231 #endif
232 
233 void cpu_exec_step_atomic(CPUState *cpu)
234 {
235     CPUClass *cc = CPU_GET_CLASS(cpu);
236     TranslationBlock *tb;
237     target_ulong cs_base, pc;
238     uint32_t flags;
239     uint32_t cflags = 1;
240     uint32_t cf_mask = cflags & CF_HASH_MASK;
241 
242     if (sigsetjmp(cpu->jmp_env, 0) == 0) {
243         start_exclusive();
244 
245         tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
246         if (tb == NULL) {
247             mmap_lock();
248             tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
249             mmap_unlock();
250         }
251 
252         /* Since we got here, we know that parallel_cpus must be true.  */
253         parallel_cpus = false;
254         cc->cpu_exec_enter(cpu);
255         /* execute the generated code */
256         trace_exec_tb(tb, pc);
257         cpu_tb_exec(cpu, tb);
258         cc->cpu_exec_exit(cpu);
259     } else {
260         /*
261          * The mmap_lock is dropped by tb_gen_code if it runs out of
262          * memory.
263          */
264 #ifndef CONFIG_SOFTMMU
265         tcg_debug_assert(!have_mmap_lock());
266 #endif
267         if (qemu_mutex_iothread_locked()) {
268             qemu_mutex_unlock_iothread();
269         }
270         assert_no_pages_locked();
271         qemu_plugin_disable_mem_helpers(cpu);
272     }
273 
274 
275     /*
276      * As we start the exclusive region before codegen we must still
277      * be in the region if we longjump out of either the codegen or
278      * the execution.
279      */
280     g_assert(cpu_in_exclusive_context(cpu));
281     parallel_cpus = true;
282     end_exclusive();
283 }
284 
285 struct tb_desc {
286     target_ulong pc;
287     target_ulong cs_base;
288     CPUArchState *env;
289     tb_page_addr_t phys_page1;
290     uint32_t flags;
291     uint32_t cf_mask;
292     uint32_t trace_vcpu_dstate;
293 };
294 
295 static bool tb_lookup_cmp(const void *p, const void *d)
296 {
297     const TranslationBlock *tb = p;
298     const struct tb_desc *desc = d;
299 
300     if (tb->pc == desc->pc &&
301         tb->page_addr[0] == desc->phys_page1 &&
302         tb->cs_base == desc->cs_base &&
303         tb->flags == desc->flags &&
304         tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
305         (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
306         /* check next page if needed */
307         if (tb->page_addr[1] == -1) {
308             return true;
309         } else {
310             tb_page_addr_t phys_page2;
311             target_ulong virt_page2;
312 
313             virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
314             phys_page2 = get_page_addr_code(desc->env, virt_page2);
315             if (tb->page_addr[1] == phys_page2) {
316                 return true;
317             }
318         }
319     }
320     return false;
321 }
322 
323 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
324                                    target_ulong cs_base, uint32_t flags,
325                                    uint32_t cf_mask)
326 {
327     tb_page_addr_t phys_pc;
328     struct tb_desc desc;
329     uint32_t h;
330 
331     desc.env = (CPUArchState *)cpu->env_ptr;
332     desc.cs_base = cs_base;
333     desc.flags = flags;
334     desc.cf_mask = cf_mask;
335     desc.trace_vcpu_dstate = *cpu->trace_dstate;
336     desc.pc = pc;
337     phys_pc = get_page_addr_code(desc.env, pc);
338     if (phys_pc == -1) {
339         return NULL;
340     }
341     desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
342     h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
343     return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
344 }
345 
346 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
347 {
348     if (TCG_TARGET_HAS_direct_jump) {
349         uintptr_t offset = tb->jmp_target_arg[n];
350         uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
351         tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
352     } else {
353         tb->jmp_target_arg[n] = addr;
354     }
355 }
356 
357 static inline void tb_add_jump(TranslationBlock *tb, int n,
358                                TranslationBlock *tb_next)
359 {
360     uintptr_t old;
361 
362     assert(n < ARRAY_SIZE(tb->jmp_list_next));
363     qemu_spin_lock(&tb_next->jmp_lock);
364 
365     /* make sure the destination TB is valid */
366     if (tb_next->cflags & CF_INVALID) {
367         goto out_unlock_next;
368     }
369     /* Atomically claim the jump destination slot only if it was NULL */
370     old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
371     if (old) {
372         goto out_unlock_next;
373     }
374 
375     /* patch the native jump address */
376     tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
377 
378     /* add in TB jmp list */
379     tb->jmp_list_next[n] = tb_next->jmp_list_head;
380     tb_next->jmp_list_head = (uintptr_t)tb | n;
381 
382     qemu_spin_unlock(&tb_next->jmp_lock);
383 
384     qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
385                            "Linking TBs %p [" TARGET_FMT_lx
386                            "] index %d -> %p [" TARGET_FMT_lx "]\n",
387                            tb->tc.ptr, tb->pc, n,
388                            tb_next->tc.ptr, tb_next->pc);
389     return;
390 
391  out_unlock_next:
392     qemu_spin_unlock(&tb_next->jmp_lock);
393     return;
394 }
395 
396 static inline TranslationBlock *tb_find(CPUState *cpu,
397                                         TranslationBlock *last_tb,
398                                         int tb_exit, uint32_t cf_mask)
399 {
400     TranslationBlock *tb;
401     target_ulong cs_base, pc;
402     uint32_t flags;
403 
404     tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
405     if (tb == NULL) {
406         mmap_lock();
407         tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
408         mmap_unlock();
409         /* We add the TB in the virtual pc hash table for the fast lookup */
410         atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
411     }
412 #ifndef CONFIG_USER_ONLY
413     /* We don't take care of direct jumps when address mapping changes in
414      * system emulation. So it's not safe to make a direct jump to a TB
415      * spanning two pages because the mapping for the second page can change.
416      */
417     if (tb->page_addr[1] != -1) {
418         last_tb = NULL;
419     }
420 #endif
421     /* See if we can patch the calling TB. */
422     if (last_tb) {
423         tb_add_jump(last_tb, tb_exit, tb);
424     }
425     return tb;
426 }
427 
428 static inline bool cpu_handle_halt(CPUState *cpu)
429 {
430     if (cpu->halted) {
431 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
432         if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
433             && replay_interrupt()) {
434             X86CPU *x86_cpu = X86_CPU(cpu);
435             qemu_mutex_lock_iothread();
436             apic_poll_irq(x86_cpu->apic_state);
437             cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
438             qemu_mutex_unlock_iothread();
439         }
440 #endif
441         if (!cpu_has_work(cpu)) {
442             return true;
443         }
444 
445         cpu->halted = 0;
446     }
447 
448     return false;
449 }
450 
451 static inline void cpu_handle_debug_exception(CPUState *cpu)
452 {
453     CPUClass *cc = CPU_GET_CLASS(cpu);
454     CPUWatchpoint *wp;
455 
456     if (!cpu->watchpoint_hit) {
457         QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
458             wp->flags &= ~BP_WATCHPOINT_HIT;
459         }
460     }
461 
462     cc->debug_excp_handler(cpu);
463 }
464 
465 static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
466 {
467     if (cpu->exception_index < 0) {
468 #ifndef CONFIG_USER_ONLY
469         if (replay_has_exception()
470             && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
471             /* try to cause an exception pending in the log */
472             cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
473         }
474 #endif
475         if (cpu->exception_index < 0) {
476             return false;
477         }
478     }
479 
480     if (cpu->exception_index >= EXCP_INTERRUPT) {
481         /* exit request from the cpu execution loop */
482         *ret = cpu->exception_index;
483         if (*ret == EXCP_DEBUG) {
484             cpu_handle_debug_exception(cpu);
485         }
486         cpu->exception_index = -1;
487         return true;
488     } else {
489 #if defined(CONFIG_USER_ONLY)
490         /* if user mode only, we simulate a fake exception
491            which will be handled outside the cpu execution
492            loop */
493 #if defined(TARGET_I386)
494         CPUClass *cc = CPU_GET_CLASS(cpu);
495         cc->do_interrupt(cpu);
496 #endif
497         *ret = cpu->exception_index;
498         cpu->exception_index = -1;
499         return true;
500 #else
501         if (replay_exception()) {
502             CPUClass *cc = CPU_GET_CLASS(cpu);
503             qemu_mutex_lock_iothread();
504             cc->do_interrupt(cpu);
505             qemu_mutex_unlock_iothread();
506             cpu->exception_index = -1;
507         } else if (!replay_has_interrupt()) {
508             /* give a chance to iothread in replay mode */
509             *ret = EXCP_INTERRUPT;
510             return true;
511         }
512 #endif
513     }
514 
515     return false;
516 }
517 
518 static inline bool cpu_handle_interrupt(CPUState *cpu,
519                                         TranslationBlock **last_tb)
520 {
521     CPUClass *cc = CPU_GET_CLASS(cpu);
522 
523     /* Clear the interrupt flag now since we're processing
524      * cpu->interrupt_request and cpu->exit_request.
525      * Ensure zeroing happens before reading cpu->exit_request or
526      * cpu->interrupt_request (see also smp_wmb in cpu_exit())
527      */
528     atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
529 
530     if (unlikely(atomic_read(&cpu->interrupt_request))) {
531         int interrupt_request;
532         qemu_mutex_lock_iothread();
533         interrupt_request = cpu->interrupt_request;
534         if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
535             /* Mask out external interrupts for this step. */
536             interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
537         }
538         if (interrupt_request & CPU_INTERRUPT_DEBUG) {
539             cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
540             cpu->exception_index = EXCP_DEBUG;
541             qemu_mutex_unlock_iothread();
542             return true;
543         }
544         if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
545             /* Do nothing */
546         } else if (interrupt_request & CPU_INTERRUPT_HALT) {
547             replay_interrupt();
548             cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
549             cpu->halted = 1;
550             cpu->exception_index = EXCP_HLT;
551             qemu_mutex_unlock_iothread();
552             return true;
553         }
554 #if defined(TARGET_I386)
555         else if (interrupt_request & CPU_INTERRUPT_INIT) {
556             X86CPU *x86_cpu = X86_CPU(cpu);
557             CPUArchState *env = &x86_cpu->env;
558             replay_interrupt();
559             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
560             do_cpu_init(x86_cpu);
561             cpu->exception_index = EXCP_HALTED;
562             qemu_mutex_unlock_iothread();
563             return true;
564         }
565 #else
566         else if (interrupt_request & CPU_INTERRUPT_RESET) {
567             replay_interrupt();
568             cpu_reset(cpu);
569             qemu_mutex_unlock_iothread();
570             return true;
571         }
572 #endif
573         /* The target hook has 3 exit conditions:
574            False when the interrupt isn't processed,
575            True when it is, and we should restart on a new TB,
576            and via longjmp via cpu_loop_exit.  */
577         else {
578             if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
579                 replay_interrupt();
580                 cpu->exception_index = -1;
581                 *last_tb = NULL;
582             }
583             /* The target hook may have updated the 'cpu->interrupt_request';
584              * reload the 'interrupt_request' value */
585             interrupt_request = cpu->interrupt_request;
586         }
587         if (interrupt_request & CPU_INTERRUPT_EXITTB) {
588             cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
589             /* ensure that no TB jump will be modified as
590                the program flow was changed */
591             *last_tb = NULL;
592         }
593 
594         /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
595         qemu_mutex_unlock_iothread();
596     }
597 
598     /* Finally, check if we need to exit to the main loop.  */
599     if (unlikely(atomic_read(&cpu->exit_request))
600         || (use_icount
601             && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
602         atomic_set(&cpu->exit_request, 0);
603         if (cpu->exception_index == -1) {
604             cpu->exception_index = EXCP_INTERRUPT;
605         }
606         return true;
607     }
608 
609     return false;
610 }
611 
612 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
613                                     TranslationBlock **last_tb, int *tb_exit)
614 {
615     uintptr_t ret;
616     int32_t insns_left;
617 
618     trace_exec_tb(tb, tb->pc);
619     ret = cpu_tb_exec(cpu, tb);
620     tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
621     *tb_exit = ret & TB_EXIT_MASK;
622     if (*tb_exit != TB_EXIT_REQUESTED) {
623         *last_tb = tb;
624         return;
625     }
626 
627     *last_tb = NULL;
628     insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
629     if (insns_left < 0) {
630         /* Something asked us to stop executing chained TBs; just
631          * continue round the main loop. Whatever requested the exit
632          * will also have set something else (eg exit_request or
633          * interrupt_request) which will be handled by
634          * cpu_handle_interrupt.  cpu_handle_interrupt will also
635          * clear cpu->icount_decr.u16.high.
636          */
637         return;
638     }
639 
640     /* Instruction counter expired.  */
641     assert(use_icount);
642 #ifndef CONFIG_USER_ONLY
643     /* Ensure global icount has gone forward */
644     cpu_update_icount(cpu);
645     /* Refill decrementer and continue execution.  */
646     insns_left = MIN(0xffff, cpu->icount_budget);
647     cpu_neg(cpu)->icount_decr.u16.low = insns_left;
648     cpu->icount_extra = cpu->icount_budget - insns_left;
649     if (!cpu->icount_extra) {
650         /* Execute any remaining instructions, then let the main loop
651          * handle the next event.
652          */
653         if (insns_left > 0) {
654             cpu_exec_nocache(cpu, insns_left, tb, false);
655         }
656     }
657 #endif
658 }
659 
660 /* main execution loop */
661 
662 int cpu_exec(CPUState *cpu)
663 {
664     CPUClass *cc = CPU_GET_CLASS(cpu);
665     int ret;
666     SyncClocks sc = { 0 };
667 
668     /* replay_interrupt may need current_cpu */
669     current_cpu = cpu;
670 
671     if (cpu_handle_halt(cpu)) {
672         return EXCP_HALTED;
673     }
674 
675     rcu_read_lock();
676 
677     cc->cpu_exec_enter(cpu);
678 
679     /* Calculate difference between guest clock and host clock.
680      * This delay includes the delay of the last cycle, so
681      * what we have to do is sleep until it is 0. As for the
682      * advance/delay we gain here, we try to fix it next time.
683      */
684     init_delay_params(&sc, cpu);
685 
686     /* prepare setjmp context for exception handling */
687     if (sigsetjmp(cpu->jmp_env, 0) != 0) {
688 #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
689         /* Some compilers wrongly smash all local variables after
690          * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
691          * Reload essential local variables here for those compilers.
692          * Newer versions of gcc would complain about this code (-Wclobbered). */
693         cpu = current_cpu;
694         cc = CPU_GET_CLASS(cpu);
695 #else /* buggy compiler */
696         /* Assert that the compiler does not smash local variables. */
697         g_assert(cpu == current_cpu);
698         g_assert(cc == CPU_GET_CLASS(cpu));
699 #endif /* buggy compiler */
700 #ifndef CONFIG_SOFTMMU
701         tcg_debug_assert(!have_mmap_lock());
702 #endif
703         if (qemu_mutex_iothread_locked()) {
704             qemu_mutex_unlock_iothread();
705         }
706         qemu_plugin_disable_mem_helpers(cpu);
707 
708         assert_no_pages_locked();
709     }
710 
711     /* if an exception is pending, we execute it here */
712     while (!cpu_handle_exception(cpu, &ret)) {
713         TranslationBlock *last_tb = NULL;
714         int tb_exit = 0;
715 
716         while (!cpu_handle_interrupt(cpu, &last_tb)) {
717             uint32_t cflags = cpu->cflags_next_tb;
718             TranslationBlock *tb;
719 
720             /* When requested, use an exact setting for cflags for the next
721                execution.  This is used for icount, precise smc, and stop-
722                after-access watchpoints.  Since this request should never
723                have CF_INVALID set, -1 is a convenient invalid value that
724                does not require tcg headers for cpu_common_reset.  */
725             if (cflags == -1) {
726                 cflags = curr_cflags();
727             } else {
728                 cpu->cflags_next_tb = -1;
729             }
730 
731             tb = tb_find(cpu, last_tb, tb_exit, cflags);
732             cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
733             /* Try to align the host and virtual clocks
734                if the guest is in advance */
735             align_clocks(&sc, cpu);
736         }
737     }
738 
739     cc->cpu_exec_exit(cpu);
740     rcu_read_unlock();
741 
742     return ret;
743 }
744