1 /* 2 * emulator main execution loop 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qapi/error.h" 23 #include "qapi/qapi-commands-machine.h" 24 #include "qapi/type-helpers.h" 25 #include "hw/core/tcg-cpu-ops.h" 26 #include "trace.h" 27 #include "disas/disas.h" 28 #include "exec/exec-all.h" 29 #include "tcg/tcg.h" 30 #include "qemu/atomic.h" 31 #include "qemu/compiler.h" 32 #include "qemu/timer.h" 33 #include "qemu/rcu.h" 34 #include "exec/log.h" 35 #include "qemu/main-loop.h" 36 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) 37 #include "hw/i386/apic.h" 38 #endif 39 #include "sysemu/cpus.h" 40 #include "exec/cpu-all.h" 41 #include "sysemu/cpu-timers.h" 42 #include "sysemu/replay.h" 43 #include "sysemu/tcg.h" 44 #include "exec/helper-proto.h" 45 #include "tb-hash.h" 46 #include "tb-context.h" 47 #include "internal.h" 48 49 /* -icount align implementation. */ 50 51 typedef struct SyncClocks { 52 int64_t diff_clk; 53 int64_t last_cpu_icount; 54 int64_t realtime_clock; 55 } SyncClocks; 56 57 #if !defined(CONFIG_USER_ONLY) 58 /* Allow the guest to have a max 3ms advance. 59 * The difference between the 2 clocks could therefore 60 * oscillate around 0. 61 */ 62 #define VM_CLOCK_ADVANCE 3000000 63 #define THRESHOLD_REDUCE 1.5 64 #define MAX_DELAY_PRINT_RATE 2000000000LL 65 #define MAX_NB_PRINTS 100 66 67 static int64_t max_delay; 68 static int64_t max_advance; 69 70 static void align_clocks(SyncClocks *sc, CPUState *cpu) 71 { 72 int64_t cpu_icount; 73 74 if (!icount_align_option) { 75 return; 76 } 77 78 cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; 79 sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); 80 sc->last_cpu_icount = cpu_icount; 81 82 if (sc->diff_clk > VM_CLOCK_ADVANCE) { 83 #ifndef _WIN32 84 struct timespec sleep_delay, rem_delay; 85 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; 86 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; 87 if (nanosleep(&sleep_delay, &rem_delay) < 0) { 88 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; 89 } else { 90 sc->diff_clk = 0; 91 } 92 #else 93 Sleep(sc->diff_clk / SCALE_MS); 94 sc->diff_clk = 0; 95 #endif 96 } 97 } 98 99 static void print_delay(const SyncClocks *sc) 100 { 101 static float threshold_delay; 102 static int64_t last_realtime_clock; 103 static int nb_prints; 104 105 if (icount_align_option && 106 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && 107 nb_prints < MAX_NB_PRINTS) { 108 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || 109 (-sc->diff_clk / (float)1000000000LL < 110 (threshold_delay - THRESHOLD_REDUCE))) { 111 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; 112 qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n", 113 threshold_delay - 1, 114 threshold_delay); 115 nb_prints++; 116 last_realtime_clock = sc->realtime_clock; 117 } 118 } 119 } 120 121 static void init_delay_params(SyncClocks *sc, CPUState *cpu) 122 { 123 if (!icount_align_option) { 124 return; 125 } 126 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); 127 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; 128 sc->last_cpu_icount 129 = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; 130 if (sc->diff_clk < max_delay) { 131 max_delay = sc->diff_clk; 132 } 133 if (sc->diff_clk > max_advance) { 134 max_advance = sc->diff_clk; 135 } 136 137 /* Print every 2s max if the guest is late. We limit the number 138 of printed messages to NB_PRINT_MAX(currently 100) */ 139 print_delay(sc); 140 } 141 #else 142 static void align_clocks(SyncClocks *sc, const CPUState *cpu) 143 { 144 } 145 146 static void init_delay_params(SyncClocks *sc, const CPUState *cpu) 147 { 148 } 149 #endif /* CONFIG USER ONLY */ 150 151 uint32_t curr_cflags(CPUState *cpu) 152 { 153 uint32_t cflags = cpu->tcg_cflags; 154 155 /* 156 * Record gdb single-step. We should be exiting the TB by raising 157 * EXCP_DEBUG, but to simplify other tests, disable chaining too. 158 * 159 * For singlestep and -d nochain, suppress goto_tb so that 160 * we can log -d cpu,exec after every TB. 161 */ 162 if (unlikely(cpu->singlestep_enabled)) { 163 cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; 164 } else if (singlestep) { 165 cflags |= CF_NO_GOTO_TB | 1; 166 } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { 167 cflags |= CF_NO_GOTO_TB; 168 } 169 170 return cflags; 171 } 172 173 struct tb_desc { 174 target_ulong pc; 175 target_ulong cs_base; 176 CPUArchState *env; 177 tb_page_addr_t phys_page1; 178 uint32_t flags; 179 uint32_t cflags; 180 uint32_t trace_vcpu_dstate; 181 }; 182 183 static bool tb_lookup_cmp(const void *p, const void *d) 184 { 185 const TranslationBlock *tb = p; 186 const struct tb_desc *desc = d; 187 188 if (tb->pc == desc->pc && 189 tb->page_addr[0] == desc->phys_page1 && 190 tb->cs_base == desc->cs_base && 191 tb->flags == desc->flags && 192 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && 193 tb_cflags(tb) == desc->cflags) { 194 /* check next page if needed */ 195 if (tb->page_addr[1] == -1) { 196 return true; 197 } else { 198 tb_page_addr_t phys_page2; 199 target_ulong virt_page2; 200 201 /* 202 * We know that the first page matched, and an otherwise valid TB 203 * encountered an incomplete instruction at the end of that page, 204 * therefore we know that generating a new TB from the current PC 205 * must also require reading from the next page -- even if the 206 * second pages do not match, and therefore the resulting insn 207 * is different for the new TB. Therefore any exception raised 208 * here by the faulting lookup is not premature. 209 */ 210 virt_page2 = TARGET_PAGE_ALIGN(desc->pc); 211 phys_page2 = get_page_addr_code(desc->env, virt_page2); 212 if (tb->page_addr[1] == phys_page2) { 213 return true; 214 } 215 } 216 } 217 return false; 218 } 219 220 static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, 221 target_ulong cs_base, uint32_t flags, 222 uint32_t cflags) 223 { 224 tb_page_addr_t phys_pc; 225 struct tb_desc desc; 226 uint32_t h; 227 228 desc.env = cpu->env_ptr; 229 desc.cs_base = cs_base; 230 desc.flags = flags; 231 desc.cflags = cflags; 232 desc.trace_vcpu_dstate = *cpu->trace_dstate; 233 desc.pc = pc; 234 phys_pc = get_page_addr_code(desc.env, pc); 235 if (phys_pc == -1) { 236 return NULL; 237 } 238 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; 239 h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate); 240 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); 241 } 242 243 /* Might cause an exception, so have a longjmp destination ready */ 244 static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, 245 target_ulong cs_base, 246 uint32_t flags, uint32_t cflags) 247 { 248 TranslationBlock *tb; 249 uint32_t hash; 250 251 /* we should never be trying to look up an INVALID tb */ 252 tcg_debug_assert(!(cflags & CF_INVALID)); 253 254 hash = tb_jmp_cache_hash_func(pc); 255 tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); 256 257 if (likely(tb && 258 tb->pc == pc && 259 tb->cs_base == cs_base && 260 tb->flags == flags && 261 tb->trace_vcpu_dstate == *cpu->trace_dstate && 262 tb_cflags(tb) == cflags)) { 263 return tb; 264 } 265 tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); 266 if (tb == NULL) { 267 return NULL; 268 } 269 qatomic_set(&cpu->tb_jmp_cache[hash], tb); 270 return tb; 271 } 272 273 static inline void log_cpu_exec(target_ulong pc, CPUState *cpu, 274 const TranslationBlock *tb) 275 { 276 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) 277 && qemu_log_in_addr_range(pc)) { 278 279 qemu_log_mask(CPU_LOG_EXEC, 280 "Trace %d: %p [" TARGET_FMT_lx 281 "/" TARGET_FMT_lx "/%08x/%08x] %s\n", 282 cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, 283 tb->flags, tb->cflags, lookup_symbol(pc)); 284 285 #if defined(DEBUG_DISAS) 286 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { 287 FILE *logfile = qemu_log_trylock(); 288 if (logfile) { 289 int flags = 0; 290 291 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { 292 flags |= CPU_DUMP_FPU; 293 } 294 #if defined(TARGET_I386) 295 flags |= CPU_DUMP_CCOP; 296 #endif 297 cpu_dump_state(cpu, logfile, flags); 298 qemu_log_unlock(logfile); 299 } 300 } 301 #endif /* DEBUG_DISAS */ 302 } 303 } 304 305 static bool check_for_breakpoints(CPUState *cpu, target_ulong pc, 306 uint32_t *cflags) 307 { 308 CPUBreakpoint *bp; 309 bool match_page = false; 310 311 if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) { 312 return false; 313 } 314 315 /* 316 * Singlestep overrides breakpoints. 317 * This requirement is visible in the record-replay tests, where 318 * we would fail to make forward progress in reverse-continue. 319 * 320 * TODO: gdb singlestep should only override gdb breakpoints, 321 * so that one could (gdb) singlestep into the guest kernel's 322 * architectural breakpoint handler. 323 */ 324 if (cpu->singlestep_enabled) { 325 return false; 326 } 327 328 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { 329 /* 330 * If we have an exact pc match, trigger the breakpoint. 331 * Otherwise, note matches within the page. 332 */ 333 if (pc == bp->pc) { 334 bool match_bp = false; 335 336 if (bp->flags & BP_GDB) { 337 match_bp = true; 338 } else if (bp->flags & BP_CPU) { 339 #ifdef CONFIG_USER_ONLY 340 g_assert_not_reached(); 341 #else 342 CPUClass *cc = CPU_GET_CLASS(cpu); 343 assert(cc->tcg_ops->debug_check_breakpoint); 344 match_bp = cc->tcg_ops->debug_check_breakpoint(cpu); 345 #endif 346 } 347 348 if (match_bp) { 349 cpu->exception_index = EXCP_DEBUG; 350 return true; 351 } 352 } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) { 353 match_page = true; 354 } 355 } 356 357 /* 358 * Within the same page as a breakpoint, single-step, 359 * returning to helper_lookup_tb_ptr after each insn looking 360 * for the actual breakpoint. 361 * 362 * TODO: Perhaps better to record all of the TBs associated 363 * with a given virtual page that contains a breakpoint, and 364 * then invalidate them when a new overlapping breakpoint is 365 * set on the page. Non-overlapping TBs would not be 366 * invalidated, nor would any TB need to be invalidated as 367 * breakpoints are removed. 368 */ 369 if (match_page) { 370 *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1; 371 } 372 return false; 373 } 374 375 /** 376 * helper_lookup_tb_ptr: quick check for next tb 377 * @env: current cpu state 378 * 379 * Look for an existing TB matching the current cpu state. 380 * If found, return the code pointer. If not found, return 381 * the tcg epilogue so that we return into cpu_tb_exec. 382 */ 383 const void *HELPER(lookup_tb_ptr)(CPUArchState *env) 384 { 385 CPUState *cpu = env_cpu(env); 386 TranslationBlock *tb; 387 target_ulong cs_base, pc; 388 uint32_t flags, cflags; 389 390 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 391 392 cflags = curr_cflags(cpu); 393 if (check_for_breakpoints(cpu, pc, &cflags)) { 394 cpu_loop_exit(cpu); 395 } 396 397 tb = tb_lookup(cpu, pc, cs_base, flags, cflags); 398 if (tb == NULL) { 399 return tcg_code_gen_epilogue; 400 } 401 402 log_cpu_exec(pc, cpu, tb); 403 404 return tb->tc.ptr; 405 } 406 407 /* Execute a TB, and fix up the CPU state afterwards if necessary */ 408 /* 409 * Disable CFI checks. 410 * TCG creates binary blobs at runtime, with the transformed code. 411 * A TB is a blob of binary code, created at runtime and called with an 412 * indirect function call. Since such function did not exist at compile time, 413 * the CFI runtime has no way to verify its signature and would fail. 414 * TCG is not considered a security-sensitive part of QEMU so this does not 415 * affect the impact of CFI in environment with high security requirements 416 */ 417 static inline TranslationBlock * QEMU_DISABLE_CFI 418 cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) 419 { 420 CPUArchState *env = cpu->env_ptr; 421 uintptr_t ret; 422 TranslationBlock *last_tb; 423 const void *tb_ptr = itb->tc.ptr; 424 425 log_cpu_exec(itb->pc, cpu, itb); 426 427 qemu_thread_jit_execute(); 428 ret = tcg_qemu_tb_exec(env, tb_ptr); 429 cpu->can_do_io = 1; 430 /* 431 * TODO: Delay swapping back to the read-write region of the TB 432 * until we actually need to modify the TB. The read-only copy, 433 * coming from the rx region, shares the same host TLB entry as 434 * the code that executed the exit_tb opcode that arrived here. 435 * If we insist on touching both the RX and the RW pages, we 436 * double the host TLB pressure. 437 */ 438 last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK)); 439 *tb_exit = ret & TB_EXIT_MASK; 440 441 trace_exec_tb_exit(last_tb, *tb_exit); 442 443 if (*tb_exit > TB_EXIT_IDX1) { 444 /* We didn't start executing this TB (eg because the instruction 445 * counter hit zero); we must restore the guest PC to the address 446 * of the start of the TB. 447 */ 448 CPUClass *cc = CPU_GET_CLASS(cpu); 449 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, 450 "Stopped execution of TB chain before %p [" 451 TARGET_FMT_lx "] %s\n", 452 last_tb->tc.ptr, last_tb->pc, 453 lookup_symbol(last_tb->pc)); 454 if (cc->tcg_ops->synchronize_from_tb) { 455 cc->tcg_ops->synchronize_from_tb(cpu, last_tb); 456 } else { 457 assert(cc->set_pc); 458 cc->set_pc(cpu, last_tb->pc); 459 } 460 } 461 462 /* 463 * If gdb single-step, and we haven't raised another exception, 464 * raise a debug exception. Single-step with another exception 465 * is handled in cpu_handle_exception. 466 */ 467 if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) { 468 cpu->exception_index = EXCP_DEBUG; 469 cpu_loop_exit(cpu); 470 } 471 472 return last_tb; 473 } 474 475 476 static void cpu_exec_enter(CPUState *cpu) 477 { 478 CPUClass *cc = CPU_GET_CLASS(cpu); 479 480 if (cc->tcg_ops->cpu_exec_enter) { 481 cc->tcg_ops->cpu_exec_enter(cpu); 482 } 483 } 484 485 static void cpu_exec_exit(CPUState *cpu) 486 { 487 CPUClass *cc = CPU_GET_CLASS(cpu); 488 489 if (cc->tcg_ops->cpu_exec_exit) { 490 cc->tcg_ops->cpu_exec_exit(cpu); 491 } 492 } 493 494 void cpu_exec_step_atomic(CPUState *cpu) 495 { 496 CPUArchState *env = cpu->env_ptr; 497 TranslationBlock *tb; 498 target_ulong cs_base, pc; 499 uint32_t flags, cflags; 500 int tb_exit; 501 502 if (sigsetjmp(cpu->jmp_env, 0) == 0) { 503 start_exclusive(); 504 g_assert(cpu == current_cpu); 505 g_assert(!cpu->running); 506 cpu->running = true; 507 508 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 509 510 cflags = curr_cflags(cpu); 511 /* Execute in a serial context. */ 512 cflags &= ~CF_PARALLEL; 513 /* After 1 insn, return and release the exclusive lock. */ 514 cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; 515 /* 516 * No need to check_for_breakpoints here. 517 * We only arrive in cpu_exec_step_atomic after beginning execution 518 * of an insn that includes an atomic operation we can't handle. 519 * Any breakpoint for this insn will have been recognized earlier. 520 */ 521 522 tb = tb_lookup(cpu, pc, cs_base, flags, cflags); 523 if (tb == NULL) { 524 mmap_lock(); 525 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); 526 mmap_unlock(); 527 } 528 529 cpu_exec_enter(cpu); 530 /* execute the generated code */ 531 trace_exec_tb(tb, pc); 532 cpu_tb_exec(cpu, tb, &tb_exit); 533 cpu_exec_exit(cpu); 534 } else { 535 #ifndef CONFIG_SOFTMMU 536 clear_helper_retaddr(); 537 if (have_mmap_lock()) { 538 mmap_unlock(); 539 } 540 #endif 541 if (qemu_mutex_iothread_locked()) { 542 qemu_mutex_unlock_iothread(); 543 } 544 assert_no_pages_locked(); 545 qemu_plugin_disable_mem_helpers(cpu); 546 } 547 548 /* 549 * As we start the exclusive region before codegen we must still 550 * be in the region if we longjump out of either the codegen or 551 * the execution. 552 */ 553 g_assert(cpu_in_exclusive_context(cpu)); 554 cpu->running = false; 555 end_exclusive(); 556 } 557 558 void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) 559 { 560 if (TCG_TARGET_HAS_direct_jump) { 561 uintptr_t offset = tb->jmp_target_arg[n]; 562 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr; 563 uintptr_t jmp_rx = tc_ptr + offset; 564 uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff; 565 tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr); 566 } else { 567 tb->jmp_target_arg[n] = addr; 568 } 569 } 570 571 static inline void tb_add_jump(TranslationBlock *tb, int n, 572 TranslationBlock *tb_next) 573 { 574 uintptr_t old; 575 576 qemu_thread_jit_write(); 577 assert(n < ARRAY_SIZE(tb->jmp_list_next)); 578 qemu_spin_lock(&tb_next->jmp_lock); 579 580 /* make sure the destination TB is valid */ 581 if (tb_next->cflags & CF_INVALID) { 582 goto out_unlock_next; 583 } 584 /* Atomically claim the jump destination slot only if it was NULL */ 585 old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, 586 (uintptr_t)tb_next); 587 if (old) { 588 goto out_unlock_next; 589 } 590 591 /* patch the native jump address */ 592 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); 593 594 /* add in TB jmp list */ 595 tb->jmp_list_next[n] = tb_next->jmp_list_head; 596 tb_next->jmp_list_head = (uintptr_t)tb | n; 597 598 qemu_spin_unlock(&tb_next->jmp_lock); 599 600 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, 601 "Linking TBs %p [" TARGET_FMT_lx 602 "] index %d -> %p [" TARGET_FMT_lx "]\n", 603 tb->tc.ptr, tb->pc, n, 604 tb_next->tc.ptr, tb_next->pc); 605 return; 606 607 out_unlock_next: 608 qemu_spin_unlock(&tb_next->jmp_lock); 609 return; 610 } 611 612 static inline bool cpu_handle_halt(CPUState *cpu) 613 { 614 #ifndef CONFIG_USER_ONLY 615 if (cpu->halted) { 616 #if defined(TARGET_I386) 617 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { 618 X86CPU *x86_cpu = X86_CPU(cpu); 619 qemu_mutex_lock_iothread(); 620 apic_poll_irq(x86_cpu->apic_state); 621 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); 622 qemu_mutex_unlock_iothread(); 623 } 624 #endif /* TARGET_I386 */ 625 if (!cpu_has_work(cpu)) { 626 return true; 627 } 628 629 cpu->halted = 0; 630 } 631 #endif /* !CONFIG_USER_ONLY */ 632 633 return false; 634 } 635 636 static inline void cpu_handle_debug_exception(CPUState *cpu) 637 { 638 CPUClass *cc = CPU_GET_CLASS(cpu); 639 CPUWatchpoint *wp; 640 641 if (!cpu->watchpoint_hit) { 642 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { 643 wp->flags &= ~BP_WATCHPOINT_HIT; 644 } 645 } 646 647 if (cc->tcg_ops->debug_excp_handler) { 648 cc->tcg_ops->debug_excp_handler(cpu); 649 } 650 } 651 652 static inline bool cpu_handle_exception(CPUState *cpu, int *ret) 653 { 654 if (cpu->exception_index < 0) { 655 #ifndef CONFIG_USER_ONLY 656 if (replay_has_exception() 657 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { 658 /* Execute just one insn to trigger exception pending in the log */ 659 cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) 660 | CF_NOIRQ | 1; 661 } 662 #endif 663 return false; 664 } 665 if (cpu->exception_index >= EXCP_INTERRUPT) { 666 /* exit request from the cpu execution loop */ 667 *ret = cpu->exception_index; 668 if (*ret == EXCP_DEBUG) { 669 cpu_handle_debug_exception(cpu); 670 } 671 cpu->exception_index = -1; 672 return true; 673 } else { 674 #if defined(CONFIG_USER_ONLY) 675 /* if user mode only, we simulate a fake exception 676 which will be handled outside the cpu execution 677 loop */ 678 #if defined(TARGET_I386) 679 CPUClass *cc = CPU_GET_CLASS(cpu); 680 cc->tcg_ops->fake_user_interrupt(cpu); 681 #endif /* TARGET_I386 */ 682 *ret = cpu->exception_index; 683 cpu->exception_index = -1; 684 return true; 685 #else 686 if (replay_exception()) { 687 CPUClass *cc = CPU_GET_CLASS(cpu); 688 qemu_mutex_lock_iothread(); 689 cc->tcg_ops->do_interrupt(cpu); 690 qemu_mutex_unlock_iothread(); 691 cpu->exception_index = -1; 692 693 if (unlikely(cpu->singlestep_enabled)) { 694 /* 695 * After processing the exception, ensure an EXCP_DEBUG is 696 * raised when single-stepping so that GDB doesn't miss the 697 * next instruction. 698 */ 699 *ret = EXCP_DEBUG; 700 cpu_handle_debug_exception(cpu); 701 return true; 702 } 703 } else if (!replay_has_interrupt()) { 704 /* give a chance to iothread in replay mode */ 705 *ret = EXCP_INTERRUPT; 706 return true; 707 } 708 #endif 709 } 710 711 return false; 712 } 713 714 #ifndef CONFIG_USER_ONLY 715 /* 716 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a 717 * "real" interrupt event later. It does not need to be recorded for 718 * replay purposes. 719 */ 720 static inline bool need_replay_interrupt(int interrupt_request) 721 { 722 #if defined(TARGET_I386) 723 return !(interrupt_request & CPU_INTERRUPT_POLL); 724 #else 725 return true; 726 #endif 727 } 728 #endif /* !CONFIG_USER_ONLY */ 729 730 static inline bool cpu_handle_interrupt(CPUState *cpu, 731 TranslationBlock **last_tb) 732 { 733 /* 734 * If we have requested custom cflags with CF_NOIRQ we should 735 * skip checking here. Any pending interrupts will get picked up 736 * by the next TB we execute under normal cflags. 737 */ 738 if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) { 739 return false; 740 } 741 742 /* Clear the interrupt flag now since we're processing 743 * cpu->interrupt_request and cpu->exit_request. 744 * Ensure zeroing happens before reading cpu->exit_request or 745 * cpu->interrupt_request (see also smp_wmb in cpu_exit()) 746 */ 747 qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0); 748 749 if (unlikely(qatomic_read(&cpu->interrupt_request))) { 750 int interrupt_request; 751 qemu_mutex_lock_iothread(); 752 interrupt_request = cpu->interrupt_request; 753 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { 754 /* Mask out external interrupts for this step. */ 755 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; 756 } 757 if (interrupt_request & CPU_INTERRUPT_DEBUG) { 758 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; 759 cpu->exception_index = EXCP_DEBUG; 760 qemu_mutex_unlock_iothread(); 761 return true; 762 } 763 #if !defined(CONFIG_USER_ONLY) 764 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { 765 /* Do nothing */ 766 } else if (interrupt_request & CPU_INTERRUPT_HALT) { 767 replay_interrupt(); 768 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; 769 cpu->halted = 1; 770 cpu->exception_index = EXCP_HLT; 771 qemu_mutex_unlock_iothread(); 772 return true; 773 } 774 #if defined(TARGET_I386) 775 else if (interrupt_request & CPU_INTERRUPT_INIT) { 776 X86CPU *x86_cpu = X86_CPU(cpu); 777 CPUArchState *env = &x86_cpu->env; 778 replay_interrupt(); 779 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); 780 do_cpu_init(x86_cpu); 781 cpu->exception_index = EXCP_HALTED; 782 qemu_mutex_unlock_iothread(); 783 return true; 784 } 785 #else 786 else if (interrupt_request & CPU_INTERRUPT_RESET) { 787 replay_interrupt(); 788 cpu_reset(cpu); 789 qemu_mutex_unlock_iothread(); 790 return true; 791 } 792 #endif /* !TARGET_I386 */ 793 /* The target hook has 3 exit conditions: 794 False when the interrupt isn't processed, 795 True when it is, and we should restart on a new TB, 796 and via longjmp via cpu_loop_exit. */ 797 else { 798 CPUClass *cc = CPU_GET_CLASS(cpu); 799 800 if (cc->tcg_ops->cpu_exec_interrupt && 801 cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { 802 if (need_replay_interrupt(interrupt_request)) { 803 replay_interrupt(); 804 } 805 /* 806 * After processing the interrupt, ensure an EXCP_DEBUG is 807 * raised when single-stepping so that GDB doesn't miss the 808 * next instruction. 809 */ 810 if (unlikely(cpu->singlestep_enabled)) { 811 cpu->exception_index = EXCP_DEBUG; 812 qemu_mutex_unlock_iothread(); 813 return true; 814 } 815 cpu->exception_index = -1; 816 *last_tb = NULL; 817 } 818 /* The target hook may have updated the 'cpu->interrupt_request'; 819 * reload the 'interrupt_request' value */ 820 interrupt_request = cpu->interrupt_request; 821 } 822 #endif /* !CONFIG_USER_ONLY */ 823 if (interrupt_request & CPU_INTERRUPT_EXITTB) { 824 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; 825 /* ensure that no TB jump will be modified as 826 the program flow was changed */ 827 *last_tb = NULL; 828 } 829 830 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ 831 qemu_mutex_unlock_iothread(); 832 } 833 834 /* Finally, check if we need to exit to the main loop. */ 835 if (unlikely(qatomic_read(&cpu->exit_request)) 836 || (icount_enabled() 837 && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT) 838 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { 839 qatomic_set(&cpu->exit_request, 0); 840 if (cpu->exception_index == -1) { 841 cpu->exception_index = EXCP_INTERRUPT; 842 } 843 return true; 844 } 845 846 return false; 847 } 848 849 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, 850 TranslationBlock **last_tb, int *tb_exit) 851 { 852 int32_t insns_left; 853 854 trace_exec_tb(tb, tb->pc); 855 tb = cpu_tb_exec(cpu, tb, tb_exit); 856 if (*tb_exit != TB_EXIT_REQUESTED) { 857 *last_tb = tb; 858 return; 859 } 860 861 *last_tb = NULL; 862 insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32); 863 if (insns_left < 0) { 864 /* Something asked us to stop executing chained TBs; just 865 * continue round the main loop. Whatever requested the exit 866 * will also have set something else (eg exit_request or 867 * interrupt_request) which will be handled by 868 * cpu_handle_interrupt. cpu_handle_interrupt will also 869 * clear cpu->icount_decr.u16.high. 870 */ 871 return; 872 } 873 874 /* Instruction counter expired. */ 875 assert(icount_enabled()); 876 #ifndef CONFIG_USER_ONLY 877 /* Ensure global icount has gone forward */ 878 icount_update(cpu); 879 /* Refill decrementer and continue execution. */ 880 insns_left = MIN(0xffff, cpu->icount_budget); 881 cpu_neg(cpu)->icount_decr.u16.low = insns_left; 882 cpu->icount_extra = cpu->icount_budget - insns_left; 883 884 /* 885 * If the next tb has more instructions than we have left to 886 * execute we need to ensure we find/generate a TB with exactly 887 * insns_left instructions in it. 888 */ 889 if (insns_left > 0 && insns_left < tb->icount) { 890 assert(insns_left <= CF_COUNT_MASK); 891 assert(cpu->icount_extra == 0); 892 cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left; 893 } 894 #endif 895 } 896 897 /* main execution loop */ 898 899 int cpu_exec(CPUState *cpu) 900 { 901 int ret; 902 SyncClocks sc = { 0 }; 903 904 /* replay_interrupt may need current_cpu */ 905 current_cpu = cpu; 906 907 if (cpu_handle_halt(cpu)) { 908 return EXCP_HALTED; 909 } 910 911 rcu_read_lock(); 912 913 cpu_exec_enter(cpu); 914 915 /* Calculate difference between guest clock and host clock. 916 * This delay includes the delay of the last cycle, so 917 * what we have to do is sleep until it is 0. As for the 918 * advance/delay we gain here, we try to fix it next time. 919 */ 920 init_delay_params(&sc, cpu); 921 922 /* prepare setjmp context for exception handling */ 923 if (sigsetjmp(cpu->jmp_env, 0) != 0) { 924 #if defined(__clang__) 925 /* 926 * Some compilers wrongly smash all local variables after 927 * siglongjmp (the spec requires that only non-volatile locals 928 * which are changed between the sigsetjmp and siglongjmp are 929 * permitted to be trashed). There were bug reports for gcc 930 * 4.5.0 and clang. The bug is fixed in all versions of gcc 931 * that we support, but is still unfixed in clang: 932 * https://bugs.llvm.org/show_bug.cgi?id=21183 933 * 934 * Reload an essential local variable here for those compilers. 935 * Newer versions of gcc would complain about this code (-Wclobbered), 936 * so we only perform the workaround for clang. 937 */ 938 cpu = current_cpu; 939 #else 940 /* Non-buggy compilers preserve this; assert the correct value. */ 941 g_assert(cpu == current_cpu); 942 #endif 943 944 #ifndef CONFIG_SOFTMMU 945 clear_helper_retaddr(); 946 if (have_mmap_lock()) { 947 mmap_unlock(); 948 } 949 #endif 950 if (qemu_mutex_iothread_locked()) { 951 qemu_mutex_unlock_iothread(); 952 } 953 qemu_plugin_disable_mem_helpers(cpu); 954 955 assert_no_pages_locked(); 956 } 957 958 /* if an exception is pending, we execute it here */ 959 while (!cpu_handle_exception(cpu, &ret)) { 960 TranslationBlock *last_tb = NULL; 961 int tb_exit = 0; 962 963 while (!cpu_handle_interrupt(cpu, &last_tb)) { 964 TranslationBlock *tb; 965 target_ulong cs_base, pc; 966 uint32_t flags, cflags; 967 968 cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags); 969 970 /* 971 * When requested, use an exact setting for cflags for the next 972 * execution. This is used for icount, precise smc, and stop- 973 * after-access watchpoints. Since this request should never 974 * have CF_INVALID set, -1 is a convenient invalid value that 975 * does not require tcg headers for cpu_common_reset. 976 */ 977 cflags = cpu->cflags_next_tb; 978 if (cflags == -1) { 979 cflags = curr_cflags(cpu); 980 } else { 981 cpu->cflags_next_tb = -1; 982 } 983 984 if (check_for_breakpoints(cpu, pc, &cflags)) { 985 break; 986 } 987 988 tb = tb_lookup(cpu, pc, cs_base, flags, cflags); 989 if (tb == NULL) { 990 mmap_lock(); 991 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); 992 mmap_unlock(); 993 /* 994 * We add the TB in the virtual pc hash table 995 * for the fast lookup 996 */ 997 qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); 998 } 999 1000 #ifndef CONFIG_USER_ONLY 1001 /* 1002 * We don't take care of direct jumps when address mapping 1003 * changes in system emulation. So it's not safe to make a 1004 * direct jump to a TB spanning two pages because the mapping 1005 * for the second page can change. 1006 */ 1007 if (tb->page_addr[1] != -1) { 1008 last_tb = NULL; 1009 } 1010 #endif 1011 /* See if we can patch the calling TB. */ 1012 if (last_tb) { 1013 tb_add_jump(last_tb, tb_exit, tb); 1014 } 1015 1016 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); 1017 1018 /* Try to align the host and virtual clocks 1019 if the guest is in advance */ 1020 align_clocks(&sc, cpu); 1021 } 1022 } 1023 1024 cpu_exec_exit(cpu); 1025 rcu_read_unlock(); 1026 1027 return ret; 1028 } 1029 1030 void tcg_exec_realizefn(CPUState *cpu, Error **errp) 1031 { 1032 static bool tcg_target_initialized; 1033 CPUClass *cc = CPU_GET_CLASS(cpu); 1034 1035 if (!tcg_target_initialized) { 1036 cc->tcg_ops->initialize(); 1037 tcg_target_initialized = true; 1038 } 1039 tlb_init(cpu); 1040 qemu_plugin_vcpu_init_hook(cpu); 1041 1042 #ifndef CONFIG_USER_ONLY 1043 tcg_iommu_init_notifier_list(cpu); 1044 #endif /* !CONFIG_USER_ONLY */ 1045 } 1046 1047 /* undo the initializations in reverse order */ 1048 void tcg_exec_unrealizefn(CPUState *cpu) 1049 { 1050 #ifndef CONFIG_USER_ONLY 1051 tcg_iommu_free_notifier_list(cpu); 1052 #endif /* !CONFIG_USER_ONLY */ 1053 1054 qemu_plugin_vcpu_exit_hook(cpu); 1055 tlb_destroy(cpu); 1056 } 1057 1058 #ifndef CONFIG_USER_ONLY 1059 1060 static void dump_drift_info(GString *buf) 1061 { 1062 if (!icount_enabled()) { 1063 return; 1064 } 1065 1066 g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n", 1067 (cpu_get_clock() - icount_get()) / SCALE_MS); 1068 if (icount_align_option) { 1069 g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n", 1070 -max_delay / SCALE_MS); 1071 g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n", 1072 max_advance / SCALE_MS); 1073 } else { 1074 g_string_append_printf(buf, "Max guest delay NA\n"); 1075 g_string_append_printf(buf, "Max guest advance NA\n"); 1076 } 1077 } 1078 1079 HumanReadableText *qmp_x_query_jit(Error **errp) 1080 { 1081 g_autoptr(GString) buf = g_string_new(""); 1082 1083 if (!tcg_enabled()) { 1084 error_setg(errp, "JIT information is only available with accel=tcg"); 1085 return NULL; 1086 } 1087 1088 dump_exec_info(buf); 1089 dump_drift_info(buf); 1090 1091 return human_readable_text_from_str(buf); 1092 } 1093 1094 HumanReadableText *qmp_x_query_opcount(Error **errp) 1095 { 1096 g_autoptr(GString) buf = g_string_new(""); 1097 1098 if (!tcg_enabled()) { 1099 error_setg(errp, "Opcode count information is only available with accel=tcg"); 1100 return NULL; 1101 } 1102 1103 tcg_dump_op_count(buf); 1104 1105 return human_readable_text_from_str(buf); 1106 } 1107 1108 #ifdef CONFIG_PROFILER 1109 1110 int64_t dev_time; 1111 1112 HumanReadableText *qmp_x_query_profile(Error **errp) 1113 { 1114 g_autoptr(GString) buf = g_string_new(""); 1115 static int64_t last_cpu_exec_time; 1116 int64_t cpu_exec_time; 1117 int64_t delta; 1118 1119 cpu_exec_time = tcg_cpu_exec_time(); 1120 delta = cpu_exec_time - last_cpu_exec_time; 1121 1122 g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n", 1123 dev_time, dev_time / (double)NANOSECONDS_PER_SECOND); 1124 g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n", 1125 delta, delta / (double)NANOSECONDS_PER_SECOND); 1126 last_cpu_exec_time = cpu_exec_time; 1127 dev_time = 0; 1128 1129 return human_readable_text_from_str(buf); 1130 } 1131 #else 1132 HumanReadableText *qmp_x_query_profile(Error **errp) 1133 { 1134 error_setg(errp, "Internal profiler not compiled"); 1135 return NULL; 1136 } 1137 #endif 1138 1139 #endif /* !CONFIG_USER_ONLY */ 1140