xref: /qemu/accel/tcg/cpu-exec.c (revision 43692239)
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "qemu/qemu-print.h"
23 #include "hw/core/tcg-cpu-ops.h"
24 #include "trace.h"
25 #include "disas/disas.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg.h"
28 #include "qemu/atomic.h"
29 #include "qemu/compiler.h"
30 #include "qemu/timer.h"
31 #include "qemu/rcu.h"
32 #include "exec/tb-hash.h"
33 #include "exec/tb-lookup.h"
34 #include "exec/log.h"
35 #include "qemu/main-loop.h"
36 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
37 #include "hw/i386/apic.h"
38 #endif
39 #include "sysemu/cpus.h"
40 #include "exec/cpu-all.h"
41 #include "sysemu/cpu-timers.h"
42 #include "sysemu/replay.h"
43 #include "internal.h"
44 
45 /* -icount align implementation. */
46 
47 typedef struct SyncClocks {
48     int64_t diff_clk;
49     int64_t last_cpu_icount;
50     int64_t realtime_clock;
51 } SyncClocks;
52 
53 #if !defined(CONFIG_USER_ONLY)
54 /* Allow the guest to have a max 3ms advance.
55  * The difference between the 2 clocks could therefore
56  * oscillate around 0.
57  */
58 #define VM_CLOCK_ADVANCE 3000000
59 #define THRESHOLD_REDUCE 1.5
60 #define MAX_DELAY_PRINT_RATE 2000000000LL
61 #define MAX_NB_PRINTS 100
62 
63 static int64_t max_delay;
64 static int64_t max_advance;
65 
66 static void align_clocks(SyncClocks *sc, CPUState *cpu)
67 {
68     int64_t cpu_icount;
69 
70     if (!icount_align_option) {
71         return;
72     }
73 
74     cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
75     sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
76     sc->last_cpu_icount = cpu_icount;
77 
78     if (sc->diff_clk > VM_CLOCK_ADVANCE) {
79 #ifndef _WIN32
80         struct timespec sleep_delay, rem_delay;
81         sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
82         sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
83         if (nanosleep(&sleep_delay, &rem_delay) < 0) {
84             sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
85         } else {
86             sc->diff_clk = 0;
87         }
88 #else
89         Sleep(sc->diff_clk / SCALE_MS);
90         sc->diff_clk = 0;
91 #endif
92     }
93 }
94 
95 static void print_delay(const SyncClocks *sc)
96 {
97     static float threshold_delay;
98     static int64_t last_realtime_clock;
99     static int nb_prints;
100 
101     if (icount_align_option &&
102         sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
103         nb_prints < MAX_NB_PRINTS) {
104         if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
105             (-sc->diff_clk / (float)1000000000LL <
106              (threshold_delay - THRESHOLD_REDUCE))) {
107             threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
108             qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
109                         threshold_delay - 1,
110                         threshold_delay);
111             nb_prints++;
112             last_realtime_clock = sc->realtime_clock;
113         }
114     }
115 }
116 
117 static void init_delay_params(SyncClocks *sc, CPUState *cpu)
118 {
119     if (!icount_align_option) {
120         return;
121     }
122     sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
123     sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
124     sc->last_cpu_icount
125         = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
126     if (sc->diff_clk < max_delay) {
127         max_delay = sc->diff_clk;
128     }
129     if (sc->diff_clk > max_advance) {
130         max_advance = sc->diff_clk;
131     }
132 
133     /* Print every 2s max if the guest is late. We limit the number
134        of printed messages to NB_PRINT_MAX(currently 100) */
135     print_delay(sc);
136 }
137 #else
138 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
139 {
140 }
141 
142 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
143 {
144 }
145 #endif /* CONFIG USER ONLY */
146 
147 /* Execute a TB, and fix up the CPU state afterwards if necessary */
148 /*
149  * Disable CFI checks.
150  * TCG creates binary blobs at runtime, with the transformed code.
151  * A TB is a blob of binary code, created at runtime and called with an
152  * indirect function call. Since such function did not exist at compile time,
153  * the CFI runtime has no way to verify its signature and would fail.
154  * TCG is not considered a security-sensitive part of QEMU so this does not
155  * affect the impact of CFI in environment with high security requirements
156  */
157 static inline TranslationBlock * QEMU_DISABLE_CFI
158 cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
159 {
160     CPUArchState *env = cpu->env_ptr;
161     uintptr_t ret;
162     TranslationBlock *last_tb;
163     const void *tb_ptr = itb->tc.ptr;
164 
165     qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
166                            "Trace %d: %p ["
167                            TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
168                            cpu->cpu_index, itb->tc.ptr,
169                            itb->cs_base, itb->pc, itb->flags,
170                            lookup_symbol(itb->pc));
171 
172 #if defined(DEBUG_DISAS)
173     if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
174         && qemu_log_in_addr_range(itb->pc)) {
175         FILE *logfile = qemu_log_lock();
176         int flags = 0;
177         if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
178             flags |= CPU_DUMP_FPU;
179         }
180 #if defined(TARGET_I386)
181         flags |= CPU_DUMP_CCOP;
182 #endif
183         log_cpu_state(cpu, flags);
184         qemu_log_unlock(logfile);
185     }
186 #endif /* DEBUG_DISAS */
187 
188     qemu_thread_jit_execute();
189     ret = tcg_qemu_tb_exec(env, tb_ptr);
190     cpu->can_do_io = 1;
191     /*
192      * TODO: Delay swapping back to the read-write region of the TB
193      * until we actually need to modify the TB.  The read-only copy,
194      * coming from the rx region, shares the same host TLB entry as
195      * the code that executed the exit_tb opcode that arrived here.
196      * If we insist on touching both the RX and the RW pages, we
197      * double the host TLB pressure.
198      */
199     last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK));
200     *tb_exit = ret & TB_EXIT_MASK;
201 
202     trace_exec_tb_exit(last_tb, *tb_exit);
203 
204     if (*tb_exit > TB_EXIT_IDX1) {
205         /* We didn't start executing this TB (eg because the instruction
206          * counter hit zero); we must restore the guest PC to the address
207          * of the start of the TB.
208          */
209         CPUClass *cc = CPU_GET_CLASS(cpu);
210         qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
211                                "Stopped execution of TB chain before %p ["
212                                TARGET_FMT_lx "] %s\n",
213                                last_tb->tc.ptr, last_tb->pc,
214                                lookup_symbol(last_tb->pc));
215         if (cc->tcg_ops->synchronize_from_tb) {
216             cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
217         } else {
218             assert(cc->set_pc);
219             cc->set_pc(cpu, last_tb->pc);
220         }
221     }
222     return last_tb;
223 }
224 
225 
226 static void cpu_exec_enter(CPUState *cpu)
227 {
228     CPUClass *cc = CPU_GET_CLASS(cpu);
229 
230     if (cc->tcg_ops->cpu_exec_enter) {
231         cc->tcg_ops->cpu_exec_enter(cpu);
232     }
233 }
234 
235 static void cpu_exec_exit(CPUState *cpu)
236 {
237     CPUClass *cc = CPU_GET_CLASS(cpu);
238 
239     if (cc->tcg_ops->cpu_exec_exit) {
240         cc->tcg_ops->cpu_exec_exit(cpu);
241     }
242 }
243 
244 void cpu_exec_step_atomic(CPUState *cpu)
245 {
246     CPUArchState *env = (CPUArchState *)cpu->env_ptr;
247     TranslationBlock *tb;
248     target_ulong cs_base, pc;
249     uint32_t flags;
250     uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1;
251     int tb_exit;
252 
253     if (sigsetjmp(cpu->jmp_env, 0) == 0) {
254         start_exclusive();
255         g_assert(cpu == current_cpu);
256         g_assert(!cpu->running);
257         cpu->running = true;
258 
259         cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
260         tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
261 
262         if (tb == NULL) {
263             mmap_lock();
264             tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
265             mmap_unlock();
266         }
267 
268         cpu_exec_enter(cpu);
269         /* execute the generated code */
270         trace_exec_tb(tb, pc);
271         cpu_tb_exec(cpu, tb, &tb_exit);
272         cpu_exec_exit(cpu);
273     } else {
274         /*
275          * The mmap_lock is dropped by tb_gen_code if it runs out of
276          * memory.
277          */
278 #ifndef CONFIG_SOFTMMU
279         tcg_debug_assert(!have_mmap_lock());
280 #endif
281         if (qemu_mutex_iothread_locked()) {
282             qemu_mutex_unlock_iothread();
283         }
284         assert_no_pages_locked();
285         qemu_plugin_disable_mem_helpers(cpu);
286     }
287 
288 
289     /*
290      * As we start the exclusive region before codegen we must still
291      * be in the region if we longjump out of either the codegen or
292      * the execution.
293      */
294     g_assert(cpu_in_exclusive_context(cpu));
295     cpu->running = false;
296     end_exclusive();
297 }
298 
299 struct tb_desc {
300     target_ulong pc;
301     target_ulong cs_base;
302     CPUArchState *env;
303     tb_page_addr_t phys_page1;
304     uint32_t flags;
305     uint32_t cflags;
306     uint32_t trace_vcpu_dstate;
307 };
308 
309 static bool tb_lookup_cmp(const void *p, const void *d)
310 {
311     const TranslationBlock *tb = p;
312     const struct tb_desc *desc = d;
313 
314     if (tb->pc == desc->pc &&
315         tb->page_addr[0] == desc->phys_page1 &&
316         tb->cs_base == desc->cs_base &&
317         tb->flags == desc->flags &&
318         tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
319         tb_cflags(tb) == desc->cflags) {
320         /* check next page if needed */
321         if (tb->page_addr[1] == -1) {
322             return true;
323         } else {
324             tb_page_addr_t phys_page2;
325             target_ulong virt_page2;
326 
327             virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
328             phys_page2 = get_page_addr_code(desc->env, virt_page2);
329             if (tb->page_addr[1] == phys_page2) {
330                 return true;
331             }
332         }
333     }
334     return false;
335 }
336 
337 TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
338                                    target_ulong cs_base, uint32_t flags,
339                                    uint32_t cflags)
340 {
341     tb_page_addr_t phys_pc;
342     struct tb_desc desc;
343     uint32_t h;
344 
345     desc.env = (CPUArchState *)cpu->env_ptr;
346     desc.cs_base = cs_base;
347     desc.flags = flags;
348     desc.cflags = cflags;
349     desc.trace_vcpu_dstate = *cpu->trace_dstate;
350     desc.pc = pc;
351     phys_pc = get_page_addr_code(desc.env, pc);
352     if (phys_pc == -1) {
353         return NULL;
354     }
355     desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
356     h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
357     return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
358 }
359 
360 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
361 {
362     if (TCG_TARGET_HAS_direct_jump) {
363         uintptr_t offset = tb->jmp_target_arg[n];
364         uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
365         uintptr_t jmp_rx = tc_ptr + offset;
366         uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
367         tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr);
368     } else {
369         tb->jmp_target_arg[n] = addr;
370     }
371 }
372 
373 static inline void tb_add_jump(TranslationBlock *tb, int n,
374                                TranslationBlock *tb_next)
375 {
376     uintptr_t old;
377 
378     qemu_thread_jit_write();
379     assert(n < ARRAY_SIZE(tb->jmp_list_next));
380     qemu_spin_lock(&tb_next->jmp_lock);
381 
382     /* make sure the destination TB is valid */
383     if (tb_next->cflags & CF_INVALID) {
384         goto out_unlock_next;
385     }
386     /* Atomically claim the jump destination slot only if it was NULL */
387     old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL,
388                           (uintptr_t)tb_next);
389     if (old) {
390         goto out_unlock_next;
391     }
392 
393     /* patch the native jump address */
394     tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
395 
396     /* add in TB jmp list */
397     tb->jmp_list_next[n] = tb_next->jmp_list_head;
398     tb_next->jmp_list_head = (uintptr_t)tb | n;
399 
400     qemu_spin_unlock(&tb_next->jmp_lock);
401 
402     qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
403                            "Linking TBs %p [" TARGET_FMT_lx
404                            "] index %d -> %p [" TARGET_FMT_lx "]\n",
405                            tb->tc.ptr, tb->pc, n,
406                            tb_next->tc.ptr, tb_next->pc);
407     return;
408 
409  out_unlock_next:
410     qemu_spin_unlock(&tb_next->jmp_lock);
411     return;
412 }
413 
414 static inline TranslationBlock *tb_find(CPUState *cpu,
415                                         TranslationBlock *last_tb,
416                                         int tb_exit, uint32_t cflags)
417 {
418     CPUArchState *env = (CPUArchState *)cpu->env_ptr;
419     TranslationBlock *tb;
420     target_ulong cs_base, pc;
421     uint32_t flags;
422 
423     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
424 
425     tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
426     if (tb == NULL) {
427         mmap_lock();
428         tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
429         mmap_unlock();
430         /* We add the TB in the virtual pc hash table for the fast lookup */
431         qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
432     }
433 #ifndef CONFIG_USER_ONLY
434     /* We don't take care of direct jumps when address mapping changes in
435      * system emulation. So it's not safe to make a direct jump to a TB
436      * spanning two pages because the mapping for the second page can change.
437      */
438     if (tb->page_addr[1] != -1) {
439         last_tb = NULL;
440     }
441 #endif
442     /* See if we can patch the calling TB. */
443     if (last_tb) {
444         tb_add_jump(last_tb, tb_exit, tb);
445     }
446     return tb;
447 }
448 
449 static inline bool cpu_handle_halt(CPUState *cpu)
450 {
451     if (cpu->halted) {
452 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
453         if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
454             X86CPU *x86_cpu = X86_CPU(cpu);
455             qemu_mutex_lock_iothread();
456             apic_poll_irq(x86_cpu->apic_state);
457             cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
458             qemu_mutex_unlock_iothread();
459         }
460 #endif
461         if (!cpu_has_work(cpu)) {
462             return true;
463         }
464 
465         cpu->halted = 0;
466     }
467 
468     return false;
469 }
470 
471 static inline void cpu_handle_debug_exception(CPUState *cpu)
472 {
473     CPUClass *cc = CPU_GET_CLASS(cpu);
474     CPUWatchpoint *wp;
475 
476     if (!cpu->watchpoint_hit) {
477         QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
478             wp->flags &= ~BP_WATCHPOINT_HIT;
479         }
480     }
481 
482     if (cc->tcg_ops->debug_excp_handler) {
483         cc->tcg_ops->debug_excp_handler(cpu);
484     }
485 }
486 
487 static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
488 {
489     if (cpu->exception_index < 0) {
490 #ifndef CONFIG_USER_ONLY
491         if (replay_has_exception()
492             && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
493             /* Execute just one insn to trigger exception pending in the log */
494             cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1;
495         }
496 #endif
497         return false;
498     }
499     if (cpu->exception_index >= EXCP_INTERRUPT) {
500         /* exit request from the cpu execution loop */
501         *ret = cpu->exception_index;
502         if (*ret == EXCP_DEBUG) {
503             cpu_handle_debug_exception(cpu);
504         }
505         cpu->exception_index = -1;
506         return true;
507     } else {
508 #if defined(CONFIG_USER_ONLY)
509         /* if user mode only, we simulate a fake exception
510            which will be handled outside the cpu execution
511            loop */
512 #if defined(TARGET_I386)
513         CPUClass *cc = CPU_GET_CLASS(cpu);
514         cc->tcg_ops->do_interrupt(cpu);
515 #endif
516         *ret = cpu->exception_index;
517         cpu->exception_index = -1;
518         return true;
519 #else
520         if (replay_exception()) {
521             CPUClass *cc = CPU_GET_CLASS(cpu);
522             qemu_mutex_lock_iothread();
523             cc->tcg_ops->do_interrupt(cpu);
524             qemu_mutex_unlock_iothread();
525             cpu->exception_index = -1;
526 
527             if (unlikely(cpu->singlestep_enabled)) {
528                 /*
529                  * After processing the exception, ensure an EXCP_DEBUG is
530                  * raised when single-stepping so that GDB doesn't miss the
531                  * next instruction.
532                  */
533                 *ret = EXCP_DEBUG;
534                 cpu_handle_debug_exception(cpu);
535                 return true;
536             }
537         } else if (!replay_has_interrupt()) {
538             /* give a chance to iothread in replay mode */
539             *ret = EXCP_INTERRUPT;
540             return true;
541         }
542 #endif
543     }
544 
545     return false;
546 }
547 
548 /*
549  * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
550  * "real" interrupt event later. It does not need to be recorded for
551  * replay purposes.
552  */
553 static inline bool need_replay_interrupt(int interrupt_request)
554 {
555 #if defined(TARGET_I386)
556     return !(interrupt_request & CPU_INTERRUPT_POLL);
557 #else
558     return true;
559 #endif
560 }
561 
562 static inline bool cpu_handle_interrupt(CPUState *cpu,
563                                         TranslationBlock **last_tb)
564 {
565     CPUClass *cc = CPU_GET_CLASS(cpu);
566 
567     /* Clear the interrupt flag now since we're processing
568      * cpu->interrupt_request and cpu->exit_request.
569      * Ensure zeroing happens before reading cpu->exit_request or
570      * cpu->interrupt_request (see also smp_wmb in cpu_exit())
571      */
572     qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
573 
574     if (unlikely(qatomic_read(&cpu->interrupt_request))) {
575         int interrupt_request;
576         qemu_mutex_lock_iothread();
577         interrupt_request = cpu->interrupt_request;
578         if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
579             /* Mask out external interrupts for this step. */
580             interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
581         }
582         if (interrupt_request & CPU_INTERRUPT_DEBUG) {
583             cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
584             cpu->exception_index = EXCP_DEBUG;
585             qemu_mutex_unlock_iothread();
586             return true;
587         }
588         if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
589             /* Do nothing */
590         } else if (interrupt_request & CPU_INTERRUPT_HALT) {
591             replay_interrupt();
592             cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
593             cpu->halted = 1;
594             cpu->exception_index = EXCP_HLT;
595             qemu_mutex_unlock_iothread();
596             return true;
597         }
598 #if defined(TARGET_I386)
599         else if (interrupt_request & CPU_INTERRUPT_INIT) {
600             X86CPU *x86_cpu = X86_CPU(cpu);
601             CPUArchState *env = &x86_cpu->env;
602             replay_interrupt();
603             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
604             do_cpu_init(x86_cpu);
605             cpu->exception_index = EXCP_HALTED;
606             qemu_mutex_unlock_iothread();
607             return true;
608         }
609 #else
610         else if (interrupt_request & CPU_INTERRUPT_RESET) {
611             replay_interrupt();
612             cpu_reset(cpu);
613             qemu_mutex_unlock_iothread();
614             return true;
615         }
616 #endif
617         /* The target hook has 3 exit conditions:
618            False when the interrupt isn't processed,
619            True when it is, and we should restart on a new TB,
620            and via longjmp via cpu_loop_exit.  */
621         else {
622             if (cc->tcg_ops->cpu_exec_interrupt &&
623                 cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
624                 if (need_replay_interrupt(interrupt_request)) {
625                     replay_interrupt();
626                 }
627                 /*
628                  * After processing the interrupt, ensure an EXCP_DEBUG is
629                  * raised when single-stepping so that GDB doesn't miss the
630                  * next instruction.
631                  */
632                 cpu->exception_index =
633                     (cpu->singlestep_enabled ? EXCP_DEBUG : -1);
634                 *last_tb = NULL;
635             }
636             /* The target hook may have updated the 'cpu->interrupt_request';
637              * reload the 'interrupt_request' value */
638             interrupt_request = cpu->interrupt_request;
639         }
640         if (interrupt_request & CPU_INTERRUPT_EXITTB) {
641             cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
642             /* ensure that no TB jump will be modified as
643                the program flow was changed */
644             *last_tb = NULL;
645         }
646 
647         /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
648         qemu_mutex_unlock_iothread();
649     }
650 
651     /* Finally, check if we need to exit to the main loop.  */
652     if (unlikely(qatomic_read(&cpu->exit_request))
653         || (icount_enabled()
654             && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
655             && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
656         qatomic_set(&cpu->exit_request, 0);
657         if (cpu->exception_index == -1) {
658             cpu->exception_index = EXCP_INTERRUPT;
659         }
660         return true;
661     }
662 
663     return false;
664 }
665 
666 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
667                                     TranslationBlock **last_tb, int *tb_exit)
668 {
669     int32_t insns_left;
670 
671     trace_exec_tb(tb, tb->pc);
672     tb = cpu_tb_exec(cpu, tb, tb_exit);
673     if (*tb_exit != TB_EXIT_REQUESTED) {
674         *last_tb = tb;
675         return;
676     }
677 
678     *last_tb = NULL;
679     insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
680     if (insns_left < 0) {
681         /* Something asked us to stop executing chained TBs; just
682          * continue round the main loop. Whatever requested the exit
683          * will also have set something else (eg exit_request or
684          * interrupt_request) which will be handled by
685          * cpu_handle_interrupt.  cpu_handle_interrupt will also
686          * clear cpu->icount_decr.u16.high.
687          */
688         return;
689     }
690 
691     /* Instruction counter expired.  */
692     assert(icount_enabled());
693 #ifndef CONFIG_USER_ONLY
694     /* Ensure global icount has gone forward */
695     icount_update(cpu);
696     /* Refill decrementer and continue execution.  */
697     insns_left = MIN(CF_COUNT_MASK, cpu->icount_budget);
698     cpu_neg(cpu)->icount_decr.u16.low = insns_left;
699     cpu->icount_extra = cpu->icount_budget - insns_left;
700 
701     /*
702      * If the next tb has more instructions than we have left to
703      * execute we need to ensure we find/generate a TB with exactly
704      * insns_left instructions in it.
705      */
706     if (!cpu->icount_extra && insns_left > 0 && insns_left < tb->icount)  {
707         cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left;
708     }
709 #endif
710 }
711 
712 /* main execution loop */
713 
714 int cpu_exec(CPUState *cpu)
715 {
716     CPUClass *cc = CPU_GET_CLASS(cpu);
717     int ret;
718     SyncClocks sc = { 0 };
719 
720     /* replay_interrupt may need current_cpu */
721     current_cpu = cpu;
722 
723     if (cpu_handle_halt(cpu)) {
724         return EXCP_HALTED;
725     }
726 
727     rcu_read_lock();
728 
729     cpu_exec_enter(cpu);
730 
731     /* Calculate difference between guest clock and host clock.
732      * This delay includes the delay of the last cycle, so
733      * what we have to do is sleep until it is 0. As for the
734      * advance/delay we gain here, we try to fix it next time.
735      */
736     init_delay_params(&sc, cpu);
737 
738     /* prepare setjmp context for exception handling */
739     if (sigsetjmp(cpu->jmp_env, 0) != 0) {
740 #if defined(__clang__)
741         /*
742          * Some compilers wrongly smash all local variables after
743          * siglongjmp (the spec requires that only non-volatile locals
744          * which are changed between the sigsetjmp and siglongjmp are
745          * permitted to be trashed). There were bug reports for gcc
746          * 4.5.0 and clang.  The bug is fixed in all versions of gcc
747          * that we support, but is still unfixed in clang:
748          *   https://bugs.llvm.org/show_bug.cgi?id=21183
749          *
750          * Reload essential local variables here for those compilers.
751          * Newer versions of gcc would complain about this code (-Wclobbered),
752          * so we only perform the workaround for clang.
753          */
754         cpu = current_cpu;
755         cc = CPU_GET_CLASS(cpu);
756 #else
757         /*
758          * Non-buggy compilers preserve these locals; assert that
759          * they have the correct value.
760          */
761         g_assert(cpu == current_cpu);
762         g_assert(cc == CPU_GET_CLASS(cpu));
763 #endif
764 
765 #ifndef CONFIG_SOFTMMU
766         tcg_debug_assert(!have_mmap_lock());
767 #endif
768         if (qemu_mutex_iothread_locked()) {
769             qemu_mutex_unlock_iothread();
770         }
771         qemu_plugin_disable_mem_helpers(cpu);
772 
773         assert_no_pages_locked();
774     }
775 
776     /* if an exception is pending, we execute it here */
777     while (!cpu_handle_exception(cpu, &ret)) {
778         TranslationBlock *last_tb = NULL;
779         int tb_exit = 0;
780 
781         while (!cpu_handle_interrupt(cpu, &last_tb)) {
782             uint32_t cflags = cpu->cflags_next_tb;
783             TranslationBlock *tb;
784 
785             /* When requested, use an exact setting for cflags for the next
786                execution.  This is used for icount, precise smc, and stop-
787                after-access watchpoints.  Since this request should never
788                have CF_INVALID set, -1 is a convenient invalid value that
789                does not require tcg headers for cpu_common_reset.  */
790             if (cflags == -1) {
791                 cflags = curr_cflags(cpu);
792             } else {
793                 cpu->cflags_next_tb = -1;
794             }
795 
796             tb = tb_find(cpu, last_tb, tb_exit, cflags);
797             cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
798             /* Try to align the host and virtual clocks
799                if the guest is in advance */
800             align_clocks(&sc, cpu);
801         }
802     }
803 
804     cpu_exec_exit(cpu);
805     rcu_read_unlock();
806 
807     return ret;
808 }
809 
810 void tcg_exec_realizefn(CPUState *cpu, Error **errp)
811 {
812     static bool tcg_target_initialized;
813     CPUClass *cc = CPU_GET_CLASS(cpu);
814 
815     if (!tcg_target_initialized) {
816         cc->tcg_ops->initialize();
817         tcg_target_initialized = true;
818     }
819     tlb_init(cpu);
820     qemu_plugin_vcpu_init_hook(cpu);
821 
822 #ifndef CONFIG_USER_ONLY
823     tcg_iommu_init_notifier_list(cpu);
824 #endif /* !CONFIG_USER_ONLY */
825 }
826 
827 /* undo the initializations in reverse order */
828 void tcg_exec_unrealizefn(CPUState *cpu)
829 {
830 #ifndef CONFIG_USER_ONLY
831     tcg_iommu_free_notifier_list(cpu);
832 #endif /* !CONFIG_USER_ONLY */
833 
834     qemu_plugin_vcpu_exit_hook(cpu);
835     tlb_destroy(cpu);
836 }
837 
838 #ifndef CONFIG_USER_ONLY
839 
840 void dump_drift_info(void)
841 {
842     if (!icount_enabled()) {
843         return;
844     }
845 
846     qemu_printf("Host - Guest clock  %"PRIi64" ms\n",
847                 (cpu_get_clock() - icount_get()) / SCALE_MS);
848     if (icount_align_option) {
849         qemu_printf("Max guest delay     %"PRIi64" ms\n",
850                     -max_delay / SCALE_MS);
851         qemu_printf("Max guest advance   %"PRIi64" ms\n",
852                     max_advance / SCALE_MS);
853     } else {
854         qemu_printf("Max guest delay     NA\n");
855         qemu_printf("Max guest advance   NA\n");
856     }
857 }
858 
859 #endif /* !CONFIG_USER_ONLY */
860