1 /* 2 * Host code generation 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu-common.h" 23 24 #define NO_CPU_IO_DEFS 25 #include "trace.h" 26 #include "disas/disas.h" 27 #include "exec/exec-all.h" 28 #include "tcg/tcg.h" 29 #if defined(CONFIG_USER_ONLY) 30 #include "qemu.h" 31 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 32 #include <sys/param.h> 33 #if __FreeBSD_version >= 700104 34 #define HAVE_KINFO_GETVMMAP 35 #define sigqueue sigqueue_freebsd /* avoid redefinition */ 36 #include <sys/proc.h> 37 #include <machine/profile.h> 38 #define _KERNEL 39 #include <sys/user.h> 40 #undef _KERNEL 41 #undef sigqueue 42 #include <libutil.h> 43 #endif 44 #endif 45 #else 46 #include "exec/ram_addr.h" 47 #endif 48 49 #include "exec/cputlb.h" 50 #include "exec/translate-all.h" 51 #include "qemu/bitmap.h" 52 #include "qemu/error-report.h" 53 #include "qemu/qemu-print.h" 54 #include "qemu/timer.h" 55 #include "qemu/main-loop.h" 56 #include "exec/log.h" 57 #include "sysemu/cpus.h" 58 #include "sysemu/cpu-timers.h" 59 #include "sysemu/tcg.h" 60 #include "qapi/error.h" 61 #include "hw/core/tcg-cpu-ops.h" 62 #include "tb-hash.h" 63 #include "tb-context.h" 64 #include "internal.h" 65 66 /* #define DEBUG_TB_INVALIDATE */ 67 /* #define DEBUG_TB_FLUSH */ 68 /* make various TB consistency checks */ 69 /* #define DEBUG_TB_CHECK */ 70 71 #ifdef DEBUG_TB_INVALIDATE 72 #define DEBUG_TB_INVALIDATE_GATE 1 73 #else 74 #define DEBUG_TB_INVALIDATE_GATE 0 75 #endif 76 77 #ifdef DEBUG_TB_FLUSH 78 #define DEBUG_TB_FLUSH_GATE 1 79 #else 80 #define DEBUG_TB_FLUSH_GATE 0 81 #endif 82 83 #if !defined(CONFIG_USER_ONLY) 84 /* TB consistency checks only implemented for usermode emulation. */ 85 #undef DEBUG_TB_CHECK 86 #endif 87 88 #ifdef DEBUG_TB_CHECK 89 #define DEBUG_TB_CHECK_GATE 1 90 #else 91 #define DEBUG_TB_CHECK_GATE 0 92 #endif 93 94 /* Access to the various translations structures need to be serialised via locks 95 * for consistency. 96 * In user-mode emulation access to the memory related structures are protected 97 * with mmap_lock. 98 * In !user-mode we use per-page locks. 99 */ 100 #ifdef CONFIG_SOFTMMU 101 #define assert_memory_lock() 102 #else 103 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock()) 104 #endif 105 106 #define SMC_BITMAP_USE_THRESHOLD 10 107 108 typedef struct PageDesc { 109 /* list of TBs intersecting this ram page */ 110 uintptr_t first_tb; 111 #ifdef CONFIG_SOFTMMU 112 /* in order to optimize self modifying code, we count the number 113 of lookups we do to a given page to use a bitmap */ 114 unsigned long *code_bitmap; 115 unsigned int code_write_count; 116 #else 117 unsigned long flags; 118 void *target_data; 119 #endif 120 #ifndef CONFIG_USER_ONLY 121 QemuSpin lock; 122 #endif 123 } PageDesc; 124 125 /** 126 * struct page_entry - page descriptor entry 127 * @pd: pointer to the &struct PageDesc of the page this entry represents 128 * @index: page index of the page 129 * @locked: whether the page is locked 130 * 131 * This struct helps us keep track of the locked state of a page, without 132 * bloating &struct PageDesc. 133 * 134 * A page lock protects accesses to all fields of &struct PageDesc. 135 * 136 * See also: &struct page_collection. 137 */ 138 struct page_entry { 139 PageDesc *pd; 140 tb_page_addr_t index; 141 bool locked; 142 }; 143 144 /** 145 * struct page_collection - tracks a set of pages (i.e. &struct page_entry's) 146 * @tree: Binary search tree (BST) of the pages, with key == page index 147 * @max: Pointer to the page in @tree with the highest page index 148 * 149 * To avoid deadlock we lock pages in ascending order of page index. 150 * When operating on a set of pages, we need to keep track of them so that 151 * we can lock them in order and also unlock them later. For this we collect 152 * pages (i.e. &struct page_entry's) in a binary search @tree. Given that the 153 * @tree implementation we use does not provide an O(1) operation to obtain the 154 * highest-ranked element, we use @max to keep track of the inserted page 155 * with the highest index. This is valuable because if a page is not in 156 * the tree and its index is higher than @max's, then we can lock it 157 * without breaking the locking order rule. 158 * 159 * Note on naming: 'struct page_set' would be shorter, but we already have a few 160 * page_set_*() helpers, so page_collection is used instead to avoid confusion. 161 * 162 * See also: page_collection_lock(). 163 */ 164 struct page_collection { 165 GTree *tree; 166 struct page_entry *max; 167 }; 168 169 /* list iterators for lists of tagged pointers in TranslationBlock */ 170 #define TB_FOR_EACH_TAGGED(head, tb, n, field) \ 171 for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \ 172 tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \ 173 tb = (TranslationBlock *)((uintptr_t)tb & ~1)) 174 175 #define PAGE_FOR_EACH_TB(pagedesc, tb, n) \ 176 TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next) 177 178 #define TB_FOR_EACH_JMP(head_tb, tb, n) \ 179 TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next) 180 181 /* 182 * In system mode we want L1_MAP to be based on ram offsets, 183 * while in user mode we want it to be based on virtual addresses. 184 * 185 * TODO: For user mode, see the caveat re host vs guest virtual 186 * address spaces near GUEST_ADDR_MAX. 187 */ 188 #if !defined(CONFIG_USER_ONLY) 189 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS 190 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS 191 #else 192 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS 193 #endif 194 #else 195 # define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS) 196 #endif 197 198 /* Size of the L2 (and L3, etc) page tables. */ 199 #define V_L2_BITS 10 200 #define V_L2_SIZE (1 << V_L2_BITS) 201 202 /* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */ 203 QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS > 204 sizeof_field(TranslationBlock, trace_vcpu_dstate) 205 * BITS_PER_BYTE); 206 207 /* 208 * L1 Mapping properties 209 */ 210 static int v_l1_size; 211 static int v_l1_shift; 212 static int v_l2_levels; 213 214 /* The bottom level has pointers to PageDesc, and is indexed by 215 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size. 216 */ 217 #define V_L1_MIN_BITS 4 218 #define V_L1_MAX_BITS (V_L2_BITS + 3) 219 #define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS) 220 221 static void *l1_map[V_L1_MAX_SIZE]; 222 223 /* code generation context */ 224 TCGContext tcg_init_ctx; 225 __thread TCGContext *tcg_ctx; 226 TBContext tb_ctx; 227 228 static void page_table_config_init(void) 229 { 230 uint32_t v_l1_bits; 231 232 assert(TARGET_PAGE_BITS); 233 /* The bits remaining after N lower levels of page tables. */ 234 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS; 235 if (v_l1_bits < V_L1_MIN_BITS) { 236 v_l1_bits += V_L2_BITS; 237 } 238 239 v_l1_size = 1 << v_l1_bits; 240 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits; 241 v_l2_levels = v_l1_shift / V_L2_BITS - 1; 242 243 assert(v_l1_bits <= V_L1_MAX_BITS); 244 assert(v_l1_shift % V_L2_BITS == 0); 245 assert(v_l2_levels >= 0); 246 } 247 248 static void cpu_gen_init(void) 249 { 250 tcg_context_init(&tcg_init_ctx); 251 } 252 253 /* Encode VAL as a signed leb128 sequence at P. 254 Return P incremented past the encoded value. */ 255 static uint8_t *encode_sleb128(uint8_t *p, target_long val) 256 { 257 int more, byte; 258 259 do { 260 byte = val & 0x7f; 261 val >>= 7; 262 more = !((val == 0 && (byte & 0x40) == 0) 263 || (val == -1 && (byte & 0x40) != 0)); 264 if (more) { 265 byte |= 0x80; 266 } 267 *p++ = byte; 268 } while (more); 269 270 return p; 271 } 272 273 /* Decode a signed leb128 sequence at *PP; increment *PP past the 274 decoded value. Return the decoded value. */ 275 static target_long decode_sleb128(const uint8_t **pp) 276 { 277 const uint8_t *p = *pp; 278 target_long val = 0; 279 int byte, shift = 0; 280 281 do { 282 byte = *p++; 283 val |= (target_ulong)(byte & 0x7f) << shift; 284 shift += 7; 285 } while (byte & 0x80); 286 if (shift < TARGET_LONG_BITS && (byte & 0x40)) { 287 val |= -(target_ulong)1 << shift; 288 } 289 290 *pp = p; 291 return val; 292 } 293 294 /* Encode the data collected about the instructions while compiling TB. 295 Place the data at BLOCK, and return the number of bytes consumed. 296 297 The logical table consists of TARGET_INSN_START_WORDS target_ulong's, 298 which come from the target's insn_start data, followed by a uintptr_t 299 which comes from the host pc of the end of the code implementing the insn. 300 301 Each line of the table is encoded as sleb128 deltas from the previous 302 line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }. 303 That is, the first column is seeded with the guest pc, the last column 304 with the host pc, and the middle columns with zeros. */ 305 306 static int encode_search(TranslationBlock *tb, uint8_t *block) 307 { 308 uint8_t *highwater = tcg_ctx->code_gen_highwater; 309 uint8_t *p = block; 310 int i, j, n; 311 312 for (i = 0, n = tb->icount; i < n; ++i) { 313 target_ulong prev; 314 315 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 316 if (i == 0) { 317 prev = (j == 0 ? tb->pc : 0); 318 } else { 319 prev = tcg_ctx->gen_insn_data[i - 1][j]; 320 } 321 p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev); 322 } 323 prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]); 324 p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev); 325 326 /* Test for (pending) buffer overflow. The assumption is that any 327 one row beginning below the high water mark cannot overrun 328 the buffer completely. Thus we can test for overflow after 329 encoding a row without having to check during encoding. */ 330 if (unlikely(p > highwater)) { 331 return -1; 332 } 333 } 334 335 return p - block; 336 } 337 338 /* The cpu state corresponding to 'searched_pc' is restored. 339 * When reset_icount is true, current TB will be interrupted and 340 * icount should be recalculated. 341 */ 342 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, 343 uintptr_t searched_pc, bool reset_icount) 344 { 345 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; 346 uintptr_t host_pc = (uintptr_t)tb->tc.ptr; 347 CPUArchState *env = cpu->env_ptr; 348 const uint8_t *p = tb->tc.ptr + tb->tc.size; 349 int i, j, num_insns = tb->icount; 350 #ifdef CONFIG_PROFILER 351 TCGProfile *prof = &tcg_ctx->prof; 352 int64_t ti = profile_getclock(); 353 #endif 354 355 searched_pc -= GETPC_ADJ; 356 357 if (searched_pc < host_pc) { 358 return -1; 359 } 360 361 /* Reconstruct the stored insn data while looking for the point at 362 which the end of the insn exceeds the searched_pc. */ 363 for (i = 0; i < num_insns; ++i) { 364 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) { 365 data[j] += decode_sleb128(&p); 366 } 367 host_pc += decode_sleb128(&p); 368 if (host_pc > searched_pc) { 369 goto found; 370 } 371 } 372 return -1; 373 374 found: 375 if (reset_icount && (tb_cflags(tb) & CF_USE_ICOUNT)) { 376 assert(icount_enabled()); 377 /* Reset the cycle counter to the start of the block 378 and shift if to the number of actually executed instructions */ 379 cpu_neg(cpu)->icount_decr.u16.low += num_insns - i; 380 } 381 restore_state_to_opc(env, tb, data); 382 383 #ifdef CONFIG_PROFILER 384 qatomic_set(&prof->restore_time, 385 prof->restore_time + profile_getclock() - ti); 386 qatomic_set(&prof->restore_count, prof->restore_count + 1); 387 #endif 388 return 0; 389 } 390 391 void tb_destroy(TranslationBlock *tb) 392 { 393 qemu_spin_destroy(&tb->jmp_lock); 394 } 395 396 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) 397 { 398 /* 399 * The host_pc has to be in the rx region of the code buffer. 400 * If it is not we will not be able to resolve it here. 401 * The two cases where host_pc will not be correct are: 402 * 403 * - fault during translation (instruction fetch) 404 * - fault from helper (not using GETPC() macro) 405 * 406 * Either way we need return early as we can't resolve it here. 407 */ 408 if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) { 409 TranslationBlock *tb = tcg_tb_lookup(host_pc); 410 if (tb) { 411 cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); 412 return true; 413 } 414 } 415 return false; 416 } 417 418 static void page_init(void) 419 { 420 page_size_init(); 421 page_table_config_init(); 422 423 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) 424 { 425 #ifdef HAVE_KINFO_GETVMMAP 426 struct kinfo_vmentry *freep; 427 int i, cnt; 428 429 freep = kinfo_getvmmap(getpid(), &cnt); 430 if (freep) { 431 mmap_lock(); 432 for (i = 0; i < cnt; i++) { 433 unsigned long startaddr, endaddr; 434 435 startaddr = freep[i].kve_start; 436 endaddr = freep[i].kve_end; 437 if (h2g_valid(startaddr)) { 438 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 439 440 if (h2g_valid(endaddr)) { 441 endaddr = h2g(endaddr); 442 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 443 } else { 444 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS 445 endaddr = ~0ul; 446 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 447 #endif 448 } 449 } 450 } 451 free(freep); 452 mmap_unlock(); 453 } 454 #else 455 FILE *f; 456 457 last_brk = (unsigned long)sbrk(0); 458 459 f = fopen("/compat/linux/proc/self/maps", "r"); 460 if (f) { 461 mmap_lock(); 462 463 do { 464 unsigned long startaddr, endaddr; 465 int n; 466 467 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); 468 469 if (n == 2 && h2g_valid(startaddr)) { 470 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 471 472 if (h2g_valid(endaddr)) { 473 endaddr = h2g(endaddr); 474 } else { 475 endaddr = ~0ul; 476 } 477 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 478 } 479 } while (!feof(f)); 480 481 fclose(f); 482 mmap_unlock(); 483 } 484 #endif 485 } 486 #endif 487 } 488 489 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) 490 { 491 PageDesc *pd; 492 void **lp; 493 int i; 494 495 /* Level 1. Always allocated. */ 496 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); 497 498 /* Level 2..N-1. */ 499 for (i = v_l2_levels; i > 0; i--) { 500 void **p = qatomic_rcu_read(lp); 501 502 if (p == NULL) { 503 void *existing; 504 505 if (!alloc) { 506 return NULL; 507 } 508 p = g_new0(void *, V_L2_SIZE); 509 existing = qatomic_cmpxchg(lp, NULL, p); 510 if (unlikely(existing)) { 511 g_free(p); 512 p = existing; 513 } 514 } 515 516 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); 517 } 518 519 pd = qatomic_rcu_read(lp); 520 if (pd == NULL) { 521 void *existing; 522 523 if (!alloc) { 524 return NULL; 525 } 526 pd = g_new0(PageDesc, V_L2_SIZE); 527 #ifndef CONFIG_USER_ONLY 528 { 529 int i; 530 531 for (i = 0; i < V_L2_SIZE; i++) { 532 qemu_spin_init(&pd[i].lock); 533 } 534 } 535 #endif 536 existing = qatomic_cmpxchg(lp, NULL, pd); 537 if (unlikely(existing)) { 538 #ifndef CONFIG_USER_ONLY 539 { 540 int i; 541 542 for (i = 0; i < V_L2_SIZE; i++) { 543 qemu_spin_destroy(&pd[i].lock); 544 } 545 } 546 #endif 547 g_free(pd); 548 pd = existing; 549 } 550 } 551 552 return pd + (index & (V_L2_SIZE - 1)); 553 } 554 555 static inline PageDesc *page_find(tb_page_addr_t index) 556 { 557 return page_find_alloc(index, 0); 558 } 559 560 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, 561 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc); 562 563 /* In user-mode page locks aren't used; mmap_lock is enough */ 564 #ifdef CONFIG_USER_ONLY 565 566 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock()) 567 568 static inline void page_lock(PageDesc *pd) 569 { } 570 571 static inline void page_unlock(PageDesc *pd) 572 { } 573 574 static inline void page_lock_tb(const TranslationBlock *tb) 575 { } 576 577 static inline void page_unlock_tb(const TranslationBlock *tb) 578 { } 579 580 struct page_collection * 581 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) 582 { 583 return NULL; 584 } 585 586 void page_collection_unlock(struct page_collection *set) 587 { } 588 #else /* !CONFIG_USER_ONLY */ 589 590 #ifdef CONFIG_DEBUG_TCG 591 592 static __thread GHashTable *ht_pages_locked_debug; 593 594 static void ht_pages_locked_debug_init(void) 595 { 596 if (ht_pages_locked_debug) { 597 return; 598 } 599 ht_pages_locked_debug = g_hash_table_new(NULL, NULL); 600 } 601 602 static bool page_is_locked(const PageDesc *pd) 603 { 604 PageDesc *found; 605 606 ht_pages_locked_debug_init(); 607 found = g_hash_table_lookup(ht_pages_locked_debug, pd); 608 return !!found; 609 } 610 611 static void page_lock__debug(PageDesc *pd) 612 { 613 ht_pages_locked_debug_init(); 614 g_assert(!page_is_locked(pd)); 615 g_hash_table_insert(ht_pages_locked_debug, pd, pd); 616 } 617 618 static void page_unlock__debug(const PageDesc *pd) 619 { 620 bool removed; 621 622 ht_pages_locked_debug_init(); 623 g_assert(page_is_locked(pd)); 624 removed = g_hash_table_remove(ht_pages_locked_debug, pd); 625 g_assert(removed); 626 } 627 628 static void 629 do_assert_page_locked(const PageDesc *pd, const char *file, int line) 630 { 631 if (unlikely(!page_is_locked(pd))) { 632 error_report("assert_page_lock: PageDesc %p not locked @ %s:%d", 633 pd, file, line); 634 abort(); 635 } 636 } 637 638 #define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__) 639 640 void assert_no_pages_locked(void) 641 { 642 ht_pages_locked_debug_init(); 643 g_assert(g_hash_table_size(ht_pages_locked_debug) == 0); 644 } 645 646 #else /* !CONFIG_DEBUG_TCG */ 647 648 #define assert_page_locked(pd) 649 650 static inline void page_lock__debug(const PageDesc *pd) 651 { 652 } 653 654 static inline void page_unlock__debug(const PageDesc *pd) 655 { 656 } 657 658 #endif /* CONFIG_DEBUG_TCG */ 659 660 static inline void page_lock(PageDesc *pd) 661 { 662 page_lock__debug(pd); 663 qemu_spin_lock(&pd->lock); 664 } 665 666 static inline void page_unlock(PageDesc *pd) 667 { 668 qemu_spin_unlock(&pd->lock); 669 page_unlock__debug(pd); 670 } 671 672 /* lock the page(s) of a TB in the correct acquisition order */ 673 static inline void page_lock_tb(const TranslationBlock *tb) 674 { 675 page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0); 676 } 677 678 static inline void page_unlock_tb(const TranslationBlock *tb) 679 { 680 PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); 681 682 page_unlock(p1); 683 if (unlikely(tb->page_addr[1] != -1)) { 684 PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); 685 686 if (p2 != p1) { 687 page_unlock(p2); 688 } 689 } 690 } 691 692 static inline struct page_entry * 693 page_entry_new(PageDesc *pd, tb_page_addr_t index) 694 { 695 struct page_entry *pe = g_malloc(sizeof(*pe)); 696 697 pe->index = index; 698 pe->pd = pd; 699 pe->locked = false; 700 return pe; 701 } 702 703 static void page_entry_destroy(gpointer p) 704 { 705 struct page_entry *pe = p; 706 707 g_assert(pe->locked); 708 page_unlock(pe->pd); 709 g_free(pe); 710 } 711 712 /* returns false on success */ 713 static bool page_entry_trylock(struct page_entry *pe) 714 { 715 bool busy; 716 717 busy = qemu_spin_trylock(&pe->pd->lock); 718 if (!busy) { 719 g_assert(!pe->locked); 720 pe->locked = true; 721 page_lock__debug(pe->pd); 722 } 723 return busy; 724 } 725 726 static void do_page_entry_lock(struct page_entry *pe) 727 { 728 page_lock(pe->pd); 729 g_assert(!pe->locked); 730 pe->locked = true; 731 } 732 733 static gboolean page_entry_lock(gpointer key, gpointer value, gpointer data) 734 { 735 struct page_entry *pe = value; 736 737 do_page_entry_lock(pe); 738 return FALSE; 739 } 740 741 static gboolean page_entry_unlock(gpointer key, gpointer value, gpointer data) 742 { 743 struct page_entry *pe = value; 744 745 if (pe->locked) { 746 pe->locked = false; 747 page_unlock(pe->pd); 748 } 749 return FALSE; 750 } 751 752 /* 753 * Trylock a page, and if successful, add the page to a collection. 754 * Returns true ("busy") if the page could not be locked; false otherwise. 755 */ 756 static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) 757 { 758 tb_page_addr_t index = addr >> TARGET_PAGE_BITS; 759 struct page_entry *pe; 760 PageDesc *pd; 761 762 pe = g_tree_lookup(set->tree, &index); 763 if (pe) { 764 return false; 765 } 766 767 pd = page_find(index); 768 if (pd == NULL) { 769 return false; 770 } 771 772 pe = page_entry_new(pd, index); 773 g_tree_insert(set->tree, &pe->index, pe); 774 775 /* 776 * If this is either (1) the first insertion or (2) a page whose index 777 * is higher than any other so far, just lock the page and move on. 778 */ 779 if (set->max == NULL || pe->index > set->max->index) { 780 set->max = pe; 781 do_page_entry_lock(pe); 782 return false; 783 } 784 /* 785 * Try to acquire out-of-order lock; if busy, return busy so that we acquire 786 * locks in order. 787 */ 788 return page_entry_trylock(pe); 789 } 790 791 static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata) 792 { 793 tb_page_addr_t a = *(const tb_page_addr_t *)ap; 794 tb_page_addr_t b = *(const tb_page_addr_t *)bp; 795 796 if (a == b) { 797 return 0; 798 } else if (a < b) { 799 return -1; 800 } 801 return 1; 802 } 803 804 /* 805 * Lock a range of pages ([@start,@end[) as well as the pages of all 806 * intersecting TBs. 807 * Locking order: acquire locks in ascending order of page index. 808 */ 809 struct page_collection * 810 page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) 811 { 812 struct page_collection *set = g_malloc(sizeof(*set)); 813 tb_page_addr_t index; 814 PageDesc *pd; 815 816 start >>= TARGET_PAGE_BITS; 817 end >>= TARGET_PAGE_BITS; 818 g_assert(start <= end); 819 820 set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, 821 page_entry_destroy); 822 set->max = NULL; 823 assert_no_pages_locked(); 824 825 retry: 826 g_tree_foreach(set->tree, page_entry_lock, NULL); 827 828 for (index = start; index <= end; index++) { 829 TranslationBlock *tb; 830 int n; 831 832 pd = page_find(index); 833 if (pd == NULL) { 834 continue; 835 } 836 if (page_trylock_add(set, index << TARGET_PAGE_BITS)) { 837 g_tree_foreach(set->tree, page_entry_unlock, NULL); 838 goto retry; 839 } 840 assert_page_locked(pd); 841 PAGE_FOR_EACH_TB(pd, tb, n) { 842 if (page_trylock_add(set, tb->page_addr[0]) || 843 (tb->page_addr[1] != -1 && 844 page_trylock_add(set, tb->page_addr[1]))) { 845 /* drop all locks, and reacquire in order */ 846 g_tree_foreach(set->tree, page_entry_unlock, NULL); 847 goto retry; 848 } 849 } 850 } 851 return set; 852 } 853 854 void page_collection_unlock(struct page_collection *set) 855 { 856 /* entries are unlocked and freed via page_entry_destroy */ 857 g_tree_destroy(set->tree); 858 g_free(set); 859 } 860 861 #endif /* !CONFIG_USER_ONLY */ 862 863 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1, 864 PageDesc **ret_p2, tb_page_addr_t phys2, int alloc) 865 { 866 PageDesc *p1, *p2; 867 tb_page_addr_t page1; 868 tb_page_addr_t page2; 869 870 assert_memory_lock(); 871 g_assert(phys1 != -1); 872 873 page1 = phys1 >> TARGET_PAGE_BITS; 874 page2 = phys2 >> TARGET_PAGE_BITS; 875 876 p1 = page_find_alloc(page1, alloc); 877 if (ret_p1) { 878 *ret_p1 = p1; 879 } 880 if (likely(phys2 == -1)) { 881 page_lock(p1); 882 return; 883 } else if (page1 == page2) { 884 page_lock(p1); 885 if (ret_p2) { 886 *ret_p2 = p1; 887 } 888 return; 889 } 890 p2 = page_find_alloc(page2, alloc); 891 if (ret_p2) { 892 *ret_p2 = p2; 893 } 894 if (page1 < page2) { 895 page_lock(p1); 896 page_lock(p2); 897 } else { 898 page_lock(p2); 899 page_lock(p1); 900 } 901 } 902 903 /* Minimum size of the code gen buffer. This number is randomly chosen, 904 but not so small that we can't have a fair number of TB's live. */ 905 #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB) 906 907 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise 908 indicated, this is constrained by the range of direct branches on the 909 host cpu, as used by the TCG implementation of goto_tb. */ 910 #if defined(__x86_64__) 911 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) 912 #elif defined(__sparc__) 913 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) 914 #elif defined(__powerpc64__) 915 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) 916 #elif defined(__powerpc__) 917 # define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB) 918 #elif defined(__aarch64__) 919 # define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB) 920 #elif defined(__s390x__) 921 /* We have a +- 4GB range on the branches; leave some slop. */ 922 # define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB) 923 #elif defined(__mips__) 924 /* We have a 256MB branch region, but leave room to make sure the 925 main executable is also within that region. */ 926 # define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB) 927 #else 928 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) 929 #endif 930 931 #if TCG_TARGET_REG_BITS == 32 932 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB) 933 #ifdef CONFIG_USER_ONLY 934 /* 935 * For user mode on smaller 32 bit systems we may run into trouble 936 * allocating big chunks of data in the right place. On these systems 937 * we utilise a static code generation buffer directly in the binary. 938 */ 939 #define USE_STATIC_CODE_GEN_BUFFER 940 #endif 941 #else /* TCG_TARGET_REG_BITS == 64 */ 942 #ifdef CONFIG_USER_ONLY 943 /* 944 * As user-mode emulation typically means running multiple instances 945 * of the translator don't go too nuts with our default code gen 946 * buffer lest we make things too hard for the OS. 947 */ 948 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB) 949 #else 950 /* 951 * We expect most system emulation to run one or two guests per host. 952 * Users running large scale system emulation may want to tweak their 953 * runtime setup via the tb-size control on the command line. 954 */ 955 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB) 956 #endif 957 #endif 958 959 #define DEFAULT_CODE_GEN_BUFFER_SIZE \ 960 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ 961 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) 962 963 static size_t size_code_gen_buffer(size_t tb_size) 964 { 965 /* Size the buffer. */ 966 if (tb_size == 0) { 967 size_t phys_mem = qemu_get_host_physmem(); 968 if (phys_mem == 0) { 969 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; 970 } else { 971 tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8); 972 } 973 } 974 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { 975 tb_size = MIN_CODE_GEN_BUFFER_SIZE; 976 } 977 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { 978 tb_size = MAX_CODE_GEN_BUFFER_SIZE; 979 } 980 return tb_size; 981 } 982 983 #ifdef __mips__ 984 /* In order to use J and JAL within the code_gen_buffer, we require 985 that the buffer not cross a 256MB boundary. */ 986 static inline bool cross_256mb(void *addr, size_t size) 987 { 988 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful; 989 } 990 991 /* We weren't able to allocate a buffer without crossing that boundary, 992 so make do with the larger portion of the buffer that doesn't cross. 993 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */ 994 static inline void *split_cross_256mb(void *buf1, size_t size1) 995 { 996 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful); 997 size_t size2 = buf1 + size1 - buf2; 998 999 size1 = buf2 - buf1; 1000 if (size1 < size2) { 1001 size1 = size2; 1002 buf1 = buf2; 1003 } 1004 1005 tcg_ctx->code_gen_buffer_size = size1; 1006 return buf1; 1007 } 1008 #endif 1009 1010 #ifdef USE_STATIC_CODE_GEN_BUFFER 1011 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] 1012 __attribute__((aligned(CODE_GEN_ALIGN))); 1013 1014 static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) 1015 { 1016 void *buf, *end; 1017 size_t size; 1018 1019 if (splitwx > 0) { 1020 error_setg(errp, "jit split-wx not supported"); 1021 return false; 1022 } 1023 1024 /* page-align the beginning and end of the buffer */ 1025 buf = static_code_gen_buffer; 1026 end = static_code_gen_buffer + sizeof(static_code_gen_buffer); 1027 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size); 1028 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size); 1029 1030 size = end - buf; 1031 1032 /* Honor a command-line option limiting the size of the buffer. */ 1033 if (size > tb_size) { 1034 size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size); 1035 } 1036 tcg_ctx->code_gen_buffer_size = size; 1037 1038 #ifdef __mips__ 1039 if (cross_256mb(buf, size)) { 1040 buf = split_cross_256mb(buf, size); 1041 size = tcg_ctx->code_gen_buffer_size; 1042 } 1043 #endif 1044 1045 if (qemu_mprotect_rwx(buf, size)) { 1046 error_setg_errno(errp, errno, "mprotect of jit buffer"); 1047 return false; 1048 } 1049 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); 1050 1051 tcg_ctx->code_gen_buffer = buf; 1052 return true; 1053 } 1054 #elif defined(_WIN32) 1055 static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) 1056 { 1057 void *buf; 1058 1059 if (splitwx > 0) { 1060 error_setg(errp, "jit split-wx not supported"); 1061 return false; 1062 } 1063 1064 buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, 1065 PAGE_EXECUTE_READWRITE); 1066 if (buf == NULL) { 1067 error_setg_win32(errp, GetLastError(), 1068 "allocate %zu bytes for jit buffer", size); 1069 return false; 1070 } 1071 1072 tcg_ctx->code_gen_buffer = buf; 1073 tcg_ctx->code_gen_buffer_size = size; 1074 return true; 1075 } 1076 #else 1077 static bool alloc_code_gen_buffer_anon(size_t size, int prot, 1078 int flags, Error **errp) 1079 { 1080 void *buf; 1081 1082 buf = mmap(NULL, size, prot, flags, -1, 0); 1083 if (buf == MAP_FAILED) { 1084 error_setg_errno(errp, errno, 1085 "allocate %zu bytes for jit buffer", size); 1086 return false; 1087 } 1088 tcg_ctx->code_gen_buffer_size = size; 1089 1090 #ifdef __mips__ 1091 if (cross_256mb(buf, size)) { 1092 /* 1093 * Try again, with the original still mapped, to avoid re-acquiring 1094 * the same 256mb crossing. 1095 */ 1096 size_t size2; 1097 void *buf2 = mmap(NULL, size, prot, flags, -1, 0); 1098 switch ((int)(buf2 != MAP_FAILED)) { 1099 case 1: 1100 if (!cross_256mb(buf2, size)) { 1101 /* Success! Use the new buffer. */ 1102 munmap(buf, size); 1103 break; 1104 } 1105 /* Failure. Work with what we had. */ 1106 munmap(buf2, size); 1107 /* fallthru */ 1108 default: 1109 /* Split the original buffer. Free the smaller half. */ 1110 buf2 = split_cross_256mb(buf, size); 1111 size2 = tcg_ctx->code_gen_buffer_size; 1112 if (buf == buf2) { 1113 munmap(buf + size2, size - size2); 1114 } else { 1115 munmap(buf, size - size2); 1116 } 1117 size = size2; 1118 break; 1119 } 1120 buf = buf2; 1121 } 1122 #endif 1123 1124 /* Request large pages for the buffer. */ 1125 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE); 1126 1127 tcg_ctx->code_gen_buffer = buf; 1128 return true; 1129 } 1130 1131 #ifndef CONFIG_TCG_INTERPRETER 1132 #ifdef CONFIG_POSIX 1133 #include "qemu/memfd.h" 1134 1135 static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) 1136 { 1137 void *buf_rw = NULL, *buf_rx = MAP_FAILED; 1138 int fd = -1; 1139 1140 #ifdef __mips__ 1141 /* Find space for the RX mapping, vs the 256MiB regions. */ 1142 if (!alloc_code_gen_buffer_anon(size, PROT_NONE, 1143 MAP_PRIVATE | MAP_ANONYMOUS | 1144 MAP_NORESERVE, errp)) { 1145 return false; 1146 } 1147 /* The size of the mapping may have been adjusted. */ 1148 size = tcg_ctx->code_gen_buffer_size; 1149 buf_rx = tcg_ctx->code_gen_buffer; 1150 #endif 1151 1152 buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp); 1153 if (buf_rw == NULL) { 1154 goto fail; 1155 } 1156 1157 #ifdef __mips__ 1158 void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC, 1159 MAP_SHARED | MAP_FIXED, fd, 0); 1160 if (tmp != buf_rx) { 1161 goto fail_rx; 1162 } 1163 #else 1164 buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0); 1165 if (buf_rx == MAP_FAILED) { 1166 goto fail_rx; 1167 } 1168 #endif 1169 1170 close(fd); 1171 tcg_ctx->code_gen_buffer = buf_rw; 1172 tcg_ctx->code_gen_buffer_size = size; 1173 tcg_splitwx_diff = buf_rx - buf_rw; 1174 1175 /* Request large pages for the buffer and the splitwx. */ 1176 qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE); 1177 qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE); 1178 return true; 1179 1180 fail_rx: 1181 error_setg_errno(errp, errno, "failed to map shared memory for execute"); 1182 fail: 1183 if (buf_rx != MAP_FAILED) { 1184 munmap(buf_rx, size); 1185 } 1186 if (buf_rw) { 1187 munmap(buf_rw, size); 1188 } 1189 if (fd >= 0) { 1190 close(fd); 1191 } 1192 return false; 1193 } 1194 #endif /* CONFIG_POSIX */ 1195 1196 #ifdef CONFIG_DARWIN 1197 #include <mach/mach.h> 1198 1199 extern kern_return_t mach_vm_remap(vm_map_t target_task, 1200 mach_vm_address_t *target_address, 1201 mach_vm_size_t size, 1202 mach_vm_offset_t mask, 1203 int flags, 1204 vm_map_t src_task, 1205 mach_vm_address_t src_address, 1206 boolean_t copy, 1207 vm_prot_t *cur_protection, 1208 vm_prot_t *max_protection, 1209 vm_inherit_t inheritance); 1210 1211 static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp) 1212 { 1213 kern_return_t ret; 1214 mach_vm_address_t buf_rw, buf_rx; 1215 vm_prot_t cur_prot, max_prot; 1216 1217 /* Map the read-write portion via normal anon memory. */ 1218 if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE, 1219 MAP_PRIVATE | MAP_ANONYMOUS, errp)) { 1220 return false; 1221 } 1222 1223 buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer; 1224 buf_rx = 0; 1225 ret = mach_vm_remap(mach_task_self(), 1226 &buf_rx, 1227 size, 1228 0, 1229 VM_FLAGS_ANYWHERE, 1230 mach_task_self(), 1231 buf_rw, 1232 false, 1233 &cur_prot, 1234 &max_prot, 1235 VM_INHERIT_NONE); 1236 if (ret != KERN_SUCCESS) { 1237 /* TODO: Convert "ret" to a human readable error message. */ 1238 error_setg(errp, "vm_remap for jit splitwx failed"); 1239 munmap((void *)buf_rw, size); 1240 return false; 1241 } 1242 1243 if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) { 1244 error_setg_errno(errp, errno, "mprotect for jit splitwx"); 1245 munmap((void *)buf_rx, size); 1246 munmap((void *)buf_rw, size); 1247 return false; 1248 } 1249 1250 tcg_splitwx_diff = buf_rx - buf_rw; 1251 return true; 1252 } 1253 #endif /* CONFIG_DARWIN */ 1254 #endif /* CONFIG_TCG_INTERPRETER */ 1255 1256 static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp) 1257 { 1258 #ifndef CONFIG_TCG_INTERPRETER 1259 # ifdef CONFIG_DARWIN 1260 return alloc_code_gen_buffer_splitwx_vmremap(size, errp); 1261 # endif 1262 # ifdef CONFIG_POSIX 1263 return alloc_code_gen_buffer_splitwx_memfd(size, errp); 1264 # endif 1265 #endif 1266 error_setg(errp, "jit split-wx not supported"); 1267 return false; 1268 } 1269 1270 static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) 1271 { 1272 ERRP_GUARD(); 1273 int prot, flags; 1274 1275 if (splitwx) { 1276 if (alloc_code_gen_buffer_splitwx(size, errp)) { 1277 return true; 1278 } 1279 /* 1280 * If splitwx force-on (1), fail; 1281 * if splitwx default-on (-1), fall through to splitwx off. 1282 */ 1283 if (splitwx > 0) { 1284 return false; 1285 } 1286 error_free_or_abort(errp); 1287 } 1288 1289 prot = PROT_READ | PROT_WRITE | PROT_EXEC; 1290 flags = MAP_PRIVATE | MAP_ANONYMOUS; 1291 #ifdef CONFIG_TCG_INTERPRETER 1292 /* The tcg interpreter does not need execute permission. */ 1293 prot = PROT_READ | PROT_WRITE; 1294 #elif defined(CONFIG_DARWIN) 1295 /* Applicable to both iOS and macOS (Apple Silicon). */ 1296 if (!splitwx) { 1297 flags |= MAP_JIT; 1298 } 1299 #endif 1300 1301 return alloc_code_gen_buffer_anon(size, prot, flags, errp); 1302 } 1303 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */ 1304 1305 static bool tb_cmp(const void *ap, const void *bp) 1306 { 1307 const TranslationBlock *a = ap; 1308 const TranslationBlock *b = bp; 1309 1310 return a->pc == b->pc && 1311 a->cs_base == b->cs_base && 1312 a->flags == b->flags && 1313 (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) && 1314 a->trace_vcpu_dstate == b->trace_vcpu_dstate && 1315 a->page_addr[0] == b->page_addr[0] && 1316 a->page_addr[1] == b->page_addr[1]; 1317 } 1318 1319 static void tb_htable_init(void) 1320 { 1321 unsigned int mode = QHT_MODE_AUTO_RESIZE; 1322 1323 qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode); 1324 } 1325 1326 /* Must be called before using the QEMU cpus. 'tb_size' is the size 1327 (in bytes) allocated to the translation buffer. Zero means default 1328 size. */ 1329 void tcg_exec_init(unsigned long tb_size, int splitwx) 1330 { 1331 bool ok; 1332 1333 tcg_allowed = true; 1334 cpu_gen_init(); 1335 page_init(); 1336 tb_htable_init(); 1337 1338 ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size), 1339 splitwx, &error_fatal); 1340 assert(ok); 1341 1342 #if defined(CONFIG_SOFTMMU) 1343 /* There's no guest base to take into account, so go ahead and 1344 initialize the prologue now. */ 1345 tcg_prologue_init(tcg_ctx); 1346 #endif 1347 } 1348 1349 /* call with @p->lock held */ 1350 static inline void invalidate_page_bitmap(PageDesc *p) 1351 { 1352 assert_page_locked(p); 1353 #ifdef CONFIG_SOFTMMU 1354 g_free(p->code_bitmap); 1355 p->code_bitmap = NULL; 1356 p->code_write_count = 0; 1357 #endif 1358 } 1359 1360 /* Set to NULL all the 'first_tb' fields in all PageDescs. */ 1361 static void page_flush_tb_1(int level, void **lp) 1362 { 1363 int i; 1364 1365 if (*lp == NULL) { 1366 return; 1367 } 1368 if (level == 0) { 1369 PageDesc *pd = *lp; 1370 1371 for (i = 0; i < V_L2_SIZE; ++i) { 1372 page_lock(&pd[i]); 1373 pd[i].first_tb = (uintptr_t)NULL; 1374 invalidate_page_bitmap(pd + i); 1375 page_unlock(&pd[i]); 1376 } 1377 } else { 1378 void **pp = *lp; 1379 1380 for (i = 0; i < V_L2_SIZE; ++i) { 1381 page_flush_tb_1(level - 1, pp + i); 1382 } 1383 } 1384 } 1385 1386 static void page_flush_tb(void) 1387 { 1388 int i, l1_sz = v_l1_size; 1389 1390 for (i = 0; i < l1_sz; i++) { 1391 page_flush_tb_1(v_l2_levels, l1_map + i); 1392 } 1393 } 1394 1395 static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data) 1396 { 1397 const TranslationBlock *tb = value; 1398 size_t *size = data; 1399 1400 *size += tb->tc.size; 1401 return false; 1402 } 1403 1404 /* flush all the translation blocks */ 1405 static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) 1406 { 1407 bool did_flush = false; 1408 1409 mmap_lock(); 1410 /* If it is already been done on request of another CPU, 1411 * just retry. 1412 */ 1413 if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { 1414 goto done; 1415 } 1416 did_flush = true; 1417 1418 if (DEBUG_TB_FLUSH_GATE) { 1419 size_t nb_tbs = tcg_nb_tbs(); 1420 size_t host_size = 0; 1421 1422 tcg_tb_foreach(tb_host_size_iter, &host_size); 1423 printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n", 1424 tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0); 1425 } 1426 1427 CPU_FOREACH(cpu) { 1428 cpu_tb_jmp_cache_clear(cpu); 1429 } 1430 1431 qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); 1432 page_flush_tb(); 1433 1434 tcg_region_reset_all(); 1435 /* XXX: flush processor icache at this point if cache flush is 1436 expensive */ 1437 qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); 1438 1439 done: 1440 mmap_unlock(); 1441 if (did_flush) { 1442 qemu_plugin_flush_cb(); 1443 } 1444 } 1445 1446 void tb_flush(CPUState *cpu) 1447 { 1448 if (tcg_enabled()) { 1449 unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); 1450 1451 if (cpu_in_exclusive_context(cpu)) { 1452 do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); 1453 } else { 1454 async_safe_run_on_cpu(cpu, do_tb_flush, 1455 RUN_ON_CPU_HOST_INT(tb_flush_count)); 1456 } 1457 } 1458 } 1459 1460 /* 1461 * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only, 1462 * so in order to prevent bit rot we compile them unconditionally in user-mode, 1463 * and let the optimizer get rid of them by wrapping their user-only callers 1464 * with if (DEBUG_TB_CHECK_GATE). 1465 */ 1466 #ifdef CONFIG_USER_ONLY 1467 1468 static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp) 1469 { 1470 TranslationBlock *tb = p; 1471 target_ulong addr = *(target_ulong *)userp; 1472 1473 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) { 1474 printf("ERROR invalidate: address=" TARGET_FMT_lx 1475 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size); 1476 } 1477 } 1478 1479 /* verify that all the pages have correct rights for code 1480 * 1481 * Called with mmap_lock held. 1482 */ 1483 static void tb_invalidate_check(target_ulong address) 1484 { 1485 address &= TARGET_PAGE_MASK; 1486 qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address); 1487 } 1488 1489 static void do_tb_page_check(void *p, uint32_t hash, void *userp) 1490 { 1491 TranslationBlock *tb = p; 1492 int flags1, flags2; 1493 1494 flags1 = page_get_flags(tb->pc); 1495 flags2 = page_get_flags(tb->pc + tb->size - 1); 1496 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { 1497 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", 1498 (long)tb->pc, tb->size, flags1, flags2); 1499 } 1500 } 1501 1502 /* verify that all the pages have correct rights for code */ 1503 static void tb_page_check(void) 1504 { 1505 qht_iter(&tb_ctx.htable, do_tb_page_check, NULL); 1506 } 1507 1508 #endif /* CONFIG_USER_ONLY */ 1509 1510 /* 1511 * user-mode: call with mmap_lock held 1512 * !user-mode: call with @pd->lock held 1513 */ 1514 static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb) 1515 { 1516 TranslationBlock *tb1; 1517 uintptr_t *pprev; 1518 unsigned int n1; 1519 1520 assert_page_locked(pd); 1521 pprev = &pd->first_tb; 1522 PAGE_FOR_EACH_TB(pd, tb1, n1) { 1523 if (tb1 == tb) { 1524 *pprev = tb1->page_next[n1]; 1525 return; 1526 } 1527 pprev = &tb1->page_next[n1]; 1528 } 1529 g_assert_not_reached(); 1530 } 1531 1532 /* remove @orig from its @n_orig-th jump list */ 1533 static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) 1534 { 1535 uintptr_t ptr, ptr_locked; 1536 TranslationBlock *dest; 1537 TranslationBlock *tb; 1538 uintptr_t *pprev; 1539 int n; 1540 1541 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ 1542 ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1); 1543 dest = (TranslationBlock *)(ptr & ~1); 1544 if (dest == NULL) { 1545 return; 1546 } 1547 1548 qemu_spin_lock(&dest->jmp_lock); 1549 /* 1550 * While acquiring the lock, the jump might have been removed if the 1551 * destination TB was invalidated; check again. 1552 */ 1553 ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]); 1554 if (ptr_locked != ptr) { 1555 qemu_spin_unlock(&dest->jmp_lock); 1556 /* 1557 * The only possibility is that the jump was unlinked via 1558 * tb_jump_unlink(dest). Seeing here another destination would be a bug, 1559 * because we set the LSB above. 1560 */ 1561 g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID); 1562 return; 1563 } 1564 /* 1565 * We first acquired the lock, and since the destination pointer matches, 1566 * we know for sure that @orig is in the jmp list. 1567 */ 1568 pprev = &dest->jmp_list_head; 1569 TB_FOR_EACH_JMP(dest, tb, n) { 1570 if (tb == orig && n == n_orig) { 1571 *pprev = tb->jmp_list_next[n]; 1572 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */ 1573 qemu_spin_unlock(&dest->jmp_lock); 1574 return; 1575 } 1576 pprev = &tb->jmp_list_next[n]; 1577 } 1578 g_assert_not_reached(); 1579 } 1580 1581 /* reset the jump entry 'n' of a TB so that it is not chained to 1582 another TB */ 1583 static inline void tb_reset_jump(TranslationBlock *tb, int n) 1584 { 1585 uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]); 1586 tb_set_jmp_target(tb, n, addr); 1587 } 1588 1589 /* remove any jumps to the TB */ 1590 static inline void tb_jmp_unlink(TranslationBlock *dest) 1591 { 1592 TranslationBlock *tb; 1593 int n; 1594 1595 qemu_spin_lock(&dest->jmp_lock); 1596 1597 TB_FOR_EACH_JMP(dest, tb, n) { 1598 tb_reset_jump(tb, n); 1599 qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); 1600 /* No need to clear the list entry; setting the dest ptr is enough */ 1601 } 1602 dest->jmp_list_head = (uintptr_t)NULL; 1603 1604 qemu_spin_unlock(&dest->jmp_lock); 1605 } 1606 1607 /* 1608 * In user-mode, call with mmap_lock held. 1609 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages' 1610 * locks held. 1611 */ 1612 static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) 1613 { 1614 CPUState *cpu; 1615 PageDesc *p; 1616 uint32_t h; 1617 tb_page_addr_t phys_pc; 1618 uint32_t orig_cflags = tb_cflags(tb); 1619 1620 assert_memory_lock(); 1621 1622 /* make sure no further incoming jumps will be chained to this TB */ 1623 qemu_spin_lock(&tb->jmp_lock); 1624 qatomic_set(&tb->cflags, tb->cflags | CF_INVALID); 1625 qemu_spin_unlock(&tb->jmp_lock); 1626 1627 /* remove the TB from the hash list */ 1628 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 1629 h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags, 1630 tb->trace_vcpu_dstate); 1631 if (!qht_remove(&tb_ctx.htable, tb, h)) { 1632 return; 1633 } 1634 1635 /* remove the TB from the page list */ 1636 if (rm_from_page_list) { 1637 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); 1638 tb_page_remove(p, tb); 1639 invalidate_page_bitmap(p); 1640 if (tb->page_addr[1] != -1) { 1641 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); 1642 tb_page_remove(p, tb); 1643 invalidate_page_bitmap(p); 1644 } 1645 } 1646 1647 /* remove the TB from the hash list */ 1648 h = tb_jmp_cache_hash_func(tb->pc); 1649 CPU_FOREACH(cpu) { 1650 if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) { 1651 qatomic_set(&cpu->tb_jmp_cache[h], NULL); 1652 } 1653 } 1654 1655 /* suppress this TB from the two jump lists */ 1656 tb_remove_from_jmp_list(tb, 0); 1657 tb_remove_from_jmp_list(tb, 1); 1658 1659 /* suppress any remaining jumps to this TB */ 1660 tb_jmp_unlink(tb); 1661 1662 qatomic_set(&tcg_ctx->tb_phys_invalidate_count, 1663 tcg_ctx->tb_phys_invalidate_count + 1); 1664 } 1665 1666 static void tb_phys_invalidate__locked(TranslationBlock *tb) 1667 { 1668 qemu_thread_jit_write(); 1669 do_tb_phys_invalidate(tb, true); 1670 qemu_thread_jit_execute(); 1671 } 1672 1673 /* invalidate one TB 1674 * 1675 * Called with mmap_lock held in user-mode. 1676 */ 1677 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) 1678 { 1679 if (page_addr == -1 && tb->page_addr[0] != -1) { 1680 page_lock_tb(tb); 1681 do_tb_phys_invalidate(tb, true); 1682 page_unlock_tb(tb); 1683 } else { 1684 do_tb_phys_invalidate(tb, false); 1685 } 1686 } 1687 1688 #ifdef CONFIG_SOFTMMU 1689 /* call with @p->lock held */ 1690 static void build_page_bitmap(PageDesc *p) 1691 { 1692 int n, tb_start, tb_end; 1693 TranslationBlock *tb; 1694 1695 assert_page_locked(p); 1696 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE); 1697 1698 PAGE_FOR_EACH_TB(p, tb, n) { 1699 /* NOTE: this is subtle as a TB may span two physical pages */ 1700 if (n == 0) { 1701 /* NOTE: tb_end may be after the end of the page, but 1702 it is not a problem */ 1703 tb_start = tb->pc & ~TARGET_PAGE_MASK; 1704 tb_end = tb_start + tb->size; 1705 if (tb_end > TARGET_PAGE_SIZE) { 1706 tb_end = TARGET_PAGE_SIZE; 1707 } 1708 } else { 1709 tb_start = 0; 1710 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 1711 } 1712 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start); 1713 } 1714 } 1715 #endif 1716 1717 /* add the tb in the target page and protect it if necessary 1718 * 1719 * Called with mmap_lock held for user-mode emulation. 1720 * Called with @p->lock held in !user-mode. 1721 */ 1722 static inline void tb_page_add(PageDesc *p, TranslationBlock *tb, 1723 unsigned int n, tb_page_addr_t page_addr) 1724 { 1725 #ifndef CONFIG_USER_ONLY 1726 bool page_already_protected; 1727 #endif 1728 1729 assert_page_locked(p); 1730 1731 tb->page_addr[n] = page_addr; 1732 tb->page_next[n] = p->first_tb; 1733 #ifndef CONFIG_USER_ONLY 1734 page_already_protected = p->first_tb != (uintptr_t)NULL; 1735 #endif 1736 p->first_tb = (uintptr_t)tb | n; 1737 invalidate_page_bitmap(p); 1738 1739 #if defined(CONFIG_USER_ONLY) 1740 if (p->flags & PAGE_WRITE) { 1741 target_ulong addr; 1742 PageDesc *p2; 1743 int prot; 1744 1745 /* force the host page as non writable (writes will have a 1746 page fault + mprotect overhead) */ 1747 page_addr &= qemu_host_page_mask; 1748 prot = 0; 1749 for (addr = page_addr; addr < page_addr + qemu_host_page_size; 1750 addr += TARGET_PAGE_SIZE) { 1751 1752 p2 = page_find(addr >> TARGET_PAGE_BITS); 1753 if (!p2) { 1754 continue; 1755 } 1756 prot |= p2->flags; 1757 p2->flags &= ~PAGE_WRITE; 1758 } 1759 mprotect(g2h_untagged(page_addr), qemu_host_page_size, 1760 (prot & PAGE_BITS) & ~PAGE_WRITE); 1761 if (DEBUG_TB_INVALIDATE_GATE) { 1762 printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr); 1763 } 1764 } 1765 #else 1766 /* if some code is already present, then the pages are already 1767 protected. So we handle the case where only the first TB is 1768 allocated in a physical page */ 1769 if (!page_already_protected) { 1770 tlb_protect_code(page_addr); 1771 } 1772 #endif 1773 } 1774 1775 /* 1776 * Add a new TB and link it to the physical page tables. phys_page2 is 1777 * (-1) to indicate that only one page contains the TB. 1778 * 1779 * Called with mmap_lock held for user-mode emulation. 1780 * 1781 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb. 1782 * Note that in !user-mode, another thread might have already added a TB 1783 * for the same block of guest code that @tb corresponds to. In that case, 1784 * the caller should discard the original @tb, and use instead the returned TB. 1785 */ 1786 static TranslationBlock * 1787 tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, 1788 tb_page_addr_t phys_page2) 1789 { 1790 PageDesc *p; 1791 PageDesc *p2 = NULL; 1792 void *existing_tb = NULL; 1793 uint32_t h; 1794 1795 assert_memory_lock(); 1796 tcg_debug_assert(!(tb->cflags & CF_INVALID)); 1797 1798 /* 1799 * Add the TB to the page list, acquiring first the pages's locks. 1800 * We keep the locks held until after inserting the TB in the hash table, 1801 * so that if the insertion fails we know for sure that the TBs are still 1802 * in the page descriptors. 1803 * Note that inserting into the hash table first isn't an option, since 1804 * we can only insert TBs that are fully initialized. 1805 */ 1806 page_lock_pair(&p, phys_pc, &p2, phys_page2, 1); 1807 tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK); 1808 if (p2) { 1809 tb_page_add(p2, tb, 1, phys_page2); 1810 } else { 1811 tb->page_addr[1] = -1; 1812 } 1813 1814 /* add in the hash table */ 1815 h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags, 1816 tb->trace_vcpu_dstate); 1817 qht_insert(&tb_ctx.htable, tb, h, &existing_tb); 1818 1819 /* remove TB from the page(s) if we couldn't insert it */ 1820 if (unlikely(existing_tb)) { 1821 tb_page_remove(p, tb); 1822 invalidate_page_bitmap(p); 1823 if (p2) { 1824 tb_page_remove(p2, tb); 1825 invalidate_page_bitmap(p2); 1826 } 1827 tb = existing_tb; 1828 } 1829 1830 if (p2 && p2 != p) { 1831 page_unlock(p2); 1832 } 1833 page_unlock(p); 1834 1835 #ifdef CONFIG_USER_ONLY 1836 if (DEBUG_TB_CHECK_GATE) { 1837 tb_page_check(); 1838 } 1839 #endif 1840 return tb; 1841 } 1842 1843 /* Called with mmap_lock held for user mode emulation. */ 1844 TranslationBlock *tb_gen_code(CPUState *cpu, 1845 target_ulong pc, target_ulong cs_base, 1846 uint32_t flags, int cflags) 1847 { 1848 CPUArchState *env = cpu->env_ptr; 1849 TranslationBlock *tb, *existing_tb; 1850 tb_page_addr_t phys_pc, phys_page2; 1851 target_ulong virt_page2; 1852 tcg_insn_unit *gen_code_buf; 1853 int gen_code_size, search_size, max_insns; 1854 #ifdef CONFIG_PROFILER 1855 TCGProfile *prof = &tcg_ctx->prof; 1856 int64_t ti; 1857 #endif 1858 1859 assert_memory_lock(); 1860 qemu_thread_jit_write(); 1861 1862 phys_pc = get_page_addr_code(env, pc); 1863 1864 if (phys_pc == -1) { 1865 /* Generate a one-shot TB with 1 insn in it */ 1866 cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1; 1867 } 1868 1869 max_insns = cflags & CF_COUNT_MASK; 1870 if (max_insns == 0) { 1871 max_insns = CF_COUNT_MASK; 1872 } 1873 if (max_insns > TCG_MAX_INSNS) { 1874 max_insns = TCG_MAX_INSNS; 1875 } 1876 if (cpu->singlestep_enabled || singlestep) { 1877 max_insns = 1; 1878 } 1879 1880 buffer_overflow: 1881 tb = tcg_tb_alloc(tcg_ctx); 1882 if (unlikely(!tb)) { 1883 /* flush must be done */ 1884 tb_flush(cpu); 1885 mmap_unlock(); 1886 /* Make the execution loop process the flush as soon as possible. */ 1887 cpu->exception_index = EXCP_INTERRUPT; 1888 cpu_loop_exit(cpu); 1889 } 1890 1891 gen_code_buf = tcg_ctx->code_gen_ptr; 1892 tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf); 1893 tb->pc = pc; 1894 tb->cs_base = cs_base; 1895 tb->flags = flags; 1896 tb->cflags = cflags; 1897 tb->trace_vcpu_dstate = *cpu->trace_dstate; 1898 tcg_ctx->tb_cflags = cflags; 1899 tb_overflow: 1900 1901 #ifdef CONFIG_PROFILER 1902 /* includes aborted translations because of exceptions */ 1903 qatomic_set(&prof->tb_count1, prof->tb_count1 + 1); 1904 ti = profile_getclock(); 1905 #endif 1906 1907 gen_code_size = sigsetjmp(tcg_ctx->jmp_trans, 0); 1908 if (unlikely(gen_code_size != 0)) { 1909 goto error_return; 1910 } 1911 1912 tcg_func_start(tcg_ctx); 1913 1914 tcg_ctx->cpu = env_cpu(env); 1915 gen_intermediate_code(cpu, tb, max_insns); 1916 assert(tb->size != 0); 1917 tcg_ctx->cpu = NULL; 1918 max_insns = tb->icount; 1919 1920 trace_translate_block(tb, tb->pc, tb->tc.ptr); 1921 1922 /* generate machine code */ 1923 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; 1924 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; 1925 tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; 1926 if (TCG_TARGET_HAS_direct_jump) { 1927 tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; 1928 tcg_ctx->tb_jmp_target_addr = NULL; 1929 } else { 1930 tcg_ctx->tb_jmp_insn_offset = NULL; 1931 tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; 1932 } 1933 1934 #ifdef CONFIG_PROFILER 1935 qatomic_set(&prof->tb_count, prof->tb_count + 1); 1936 qatomic_set(&prof->interm_time, 1937 prof->interm_time + profile_getclock() - ti); 1938 ti = profile_getclock(); 1939 #endif 1940 1941 gen_code_size = tcg_gen_code(tcg_ctx, tb); 1942 if (unlikely(gen_code_size < 0)) { 1943 error_return: 1944 switch (gen_code_size) { 1945 case -1: 1946 /* 1947 * Overflow of code_gen_buffer, or the current slice of it. 1948 * 1949 * TODO: We don't need to re-do gen_intermediate_code, nor 1950 * should we re-do the tcg optimization currently hidden 1951 * inside tcg_gen_code. All that should be required is to 1952 * flush the TBs, allocate a new TB, re-initialize it per 1953 * above, and re-do the actual code generation. 1954 */ 1955 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, 1956 "Restarting code generation for " 1957 "code_gen_buffer overflow\n"); 1958 goto buffer_overflow; 1959 1960 case -2: 1961 /* 1962 * The code generated for the TranslationBlock is too large. 1963 * The maximum size allowed by the unwind info is 64k. 1964 * There may be stricter constraints from relocations 1965 * in the tcg backend. 1966 * 1967 * Try again with half as many insns as we attempted this time. 1968 * If a single insn overflows, there's a bug somewhere... 1969 */ 1970 assert(max_insns > 1); 1971 max_insns /= 2; 1972 qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT, 1973 "Restarting code generation with " 1974 "smaller translation block (max %d insns)\n", 1975 max_insns); 1976 goto tb_overflow; 1977 1978 default: 1979 g_assert_not_reached(); 1980 } 1981 } 1982 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); 1983 if (unlikely(search_size < 0)) { 1984 goto buffer_overflow; 1985 } 1986 tb->tc.size = gen_code_size; 1987 1988 #ifdef CONFIG_PROFILER 1989 qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti); 1990 qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size); 1991 qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size); 1992 qatomic_set(&prof->search_out_len, prof->search_out_len + search_size); 1993 #endif 1994 1995 #ifdef DEBUG_DISAS 1996 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) && 1997 qemu_log_in_addr_range(tb->pc)) { 1998 FILE *logfile = qemu_log_lock(); 1999 int code_size, data_size; 2000 const tcg_target_ulong *rx_data_gen_ptr; 2001 size_t chunk_start; 2002 int insn = 0; 2003 2004 if (tcg_ctx->data_gen_ptr) { 2005 rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr); 2006 code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr; 2007 data_size = gen_code_size - code_size; 2008 } else { 2009 rx_data_gen_ptr = 0; 2010 code_size = gen_code_size; 2011 data_size = 0; 2012 } 2013 2014 /* Dump header and the first instruction */ 2015 qemu_log("OUT: [size=%d]\n", gen_code_size); 2016 qemu_log(" -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n", 2017 tcg_ctx->gen_insn_data[insn][0]); 2018 chunk_start = tcg_ctx->gen_insn_end_off[insn]; 2019 log_disas(tb->tc.ptr, chunk_start); 2020 2021 /* 2022 * Dump each instruction chunk, wrapping up empty chunks into 2023 * the next instruction. The whole array is offset so the 2024 * first entry is the beginning of the 2nd instruction. 2025 */ 2026 while (insn < tb->icount) { 2027 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn]; 2028 if (chunk_end > chunk_start) { 2029 qemu_log(" -- guest addr 0x" TARGET_FMT_lx "\n", 2030 tcg_ctx->gen_insn_data[insn][0]); 2031 log_disas(tb->tc.ptr + chunk_start, chunk_end - chunk_start); 2032 chunk_start = chunk_end; 2033 } 2034 insn++; 2035 } 2036 2037 if (chunk_start < code_size) { 2038 qemu_log(" -- tb slow paths + alignment\n"); 2039 log_disas(tb->tc.ptr + chunk_start, code_size - chunk_start); 2040 } 2041 2042 /* Finally dump any data we may have after the block */ 2043 if (data_size) { 2044 int i; 2045 qemu_log(" data: [size=%d]\n", data_size); 2046 for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) { 2047 if (sizeof(tcg_target_ulong) == 8) { 2048 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" TCG_PRIlx "\n", 2049 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); 2050 } else if (sizeof(tcg_target_ulong) == 4) { 2051 qemu_log("0x%08" PRIxPTR ": .long 0x%08" TCG_PRIlx "\n", 2052 (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]); 2053 } else { 2054 qemu_build_not_reached(); 2055 } 2056 } 2057 } 2058 qemu_log("\n"); 2059 qemu_log_flush(); 2060 qemu_log_unlock(logfile); 2061 } 2062 #endif 2063 2064 qatomic_set(&tcg_ctx->code_gen_ptr, (void *) 2065 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, 2066 CODE_GEN_ALIGN)); 2067 2068 /* init jump list */ 2069 qemu_spin_init(&tb->jmp_lock); 2070 tb->jmp_list_head = (uintptr_t)NULL; 2071 tb->jmp_list_next[0] = (uintptr_t)NULL; 2072 tb->jmp_list_next[1] = (uintptr_t)NULL; 2073 tb->jmp_dest[0] = (uintptr_t)NULL; 2074 tb->jmp_dest[1] = (uintptr_t)NULL; 2075 2076 /* init original jump addresses which have been set during tcg_gen_code() */ 2077 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { 2078 tb_reset_jump(tb, 0); 2079 } 2080 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { 2081 tb_reset_jump(tb, 1); 2082 } 2083 2084 /* 2085 * If the TB is not associated with a physical RAM page then 2086 * it must be a temporary one-insn TB, and we have nothing to do 2087 * except fill in the page_addr[] fields. Return early before 2088 * attempting to link to other TBs or add to the lookup table. 2089 */ 2090 if (phys_pc == -1) { 2091 tb->page_addr[0] = tb->page_addr[1] = -1; 2092 return tb; 2093 } 2094 2095 /* check next page if needed */ 2096 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; 2097 phys_page2 = -1; 2098 if ((pc & TARGET_PAGE_MASK) != virt_page2) { 2099 phys_page2 = get_page_addr_code(env, virt_page2); 2100 } 2101 /* 2102 * No explicit memory barrier is required -- tb_link_page() makes the 2103 * TB visible in a consistent state. 2104 */ 2105 existing_tb = tb_link_page(tb, phys_pc, phys_page2); 2106 /* if the TB already exists, discard what we just translated */ 2107 if (unlikely(existing_tb != tb)) { 2108 uintptr_t orig_aligned = (uintptr_t)gen_code_buf; 2109 2110 orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); 2111 qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); 2112 tb_destroy(tb); 2113 return existing_tb; 2114 } 2115 tcg_tb_insert(tb); 2116 return tb; 2117 } 2118 2119 /* 2120 * @p must be non-NULL. 2121 * user-mode: call with mmap_lock held. 2122 * !user-mode: call with all @pages locked. 2123 */ 2124 static void 2125 tb_invalidate_phys_page_range__locked(struct page_collection *pages, 2126 PageDesc *p, tb_page_addr_t start, 2127 tb_page_addr_t end, 2128 uintptr_t retaddr) 2129 { 2130 TranslationBlock *tb; 2131 tb_page_addr_t tb_start, tb_end; 2132 int n; 2133 #ifdef TARGET_HAS_PRECISE_SMC 2134 CPUState *cpu = current_cpu; 2135 CPUArchState *env = NULL; 2136 bool current_tb_not_found = retaddr != 0; 2137 bool current_tb_modified = false; 2138 TranslationBlock *current_tb = NULL; 2139 target_ulong current_pc = 0; 2140 target_ulong current_cs_base = 0; 2141 uint32_t current_flags = 0; 2142 #endif /* TARGET_HAS_PRECISE_SMC */ 2143 2144 assert_page_locked(p); 2145 2146 #if defined(TARGET_HAS_PRECISE_SMC) 2147 if (cpu != NULL) { 2148 env = cpu->env_ptr; 2149 } 2150 #endif 2151 2152 /* we remove all the TBs in the range [start, end[ */ 2153 /* XXX: see if in some cases it could be faster to invalidate all 2154 the code */ 2155 PAGE_FOR_EACH_TB(p, tb, n) { 2156 assert_page_locked(p); 2157 /* NOTE: this is subtle as a TB may span two physical pages */ 2158 if (n == 0) { 2159 /* NOTE: tb_end may be after the end of the page, but 2160 it is not a problem */ 2161 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); 2162 tb_end = tb_start + tb->size; 2163 } else { 2164 tb_start = tb->page_addr[1]; 2165 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); 2166 } 2167 if (!(tb_end <= start || tb_start >= end)) { 2168 #ifdef TARGET_HAS_PRECISE_SMC 2169 if (current_tb_not_found) { 2170 current_tb_not_found = false; 2171 /* now we have a real cpu fault */ 2172 current_tb = tcg_tb_lookup(retaddr); 2173 } 2174 if (current_tb == tb && 2175 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { 2176 /* 2177 * If we are modifying the current TB, we must stop 2178 * its execution. We could be more precise by checking 2179 * that the modification is after the current PC, but it 2180 * would require a specialized function to partially 2181 * restore the CPU state. 2182 */ 2183 current_tb_modified = true; 2184 cpu_restore_state_from_tb(cpu, current_tb, retaddr, true); 2185 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 2186 ¤t_flags); 2187 } 2188 #endif /* TARGET_HAS_PRECISE_SMC */ 2189 tb_phys_invalidate__locked(tb); 2190 } 2191 } 2192 #if !defined(CONFIG_USER_ONLY) 2193 /* if no code remaining, no need to continue to use slow writes */ 2194 if (!p->first_tb) { 2195 invalidate_page_bitmap(p); 2196 tlb_unprotect_code(start); 2197 } 2198 #endif 2199 #ifdef TARGET_HAS_PRECISE_SMC 2200 if (current_tb_modified) { 2201 page_collection_unlock(pages); 2202 /* Force execution of one insn next time. */ 2203 cpu->cflags_next_tb = 1 | curr_cflags(cpu); 2204 mmap_unlock(); 2205 cpu_loop_exit_noexc(cpu); 2206 } 2207 #endif 2208 } 2209 2210 /* 2211 * Invalidate all TBs which intersect with the target physical address range 2212 * [start;end[. NOTE: start and end must refer to the *same* physical page. 2213 * 'is_cpu_write_access' should be true if called from a real cpu write 2214 * access: the virtual CPU will exit the current TB if code is modified inside 2215 * this TB. 2216 * 2217 * Called with mmap_lock held for user-mode emulation 2218 */ 2219 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end) 2220 { 2221 struct page_collection *pages; 2222 PageDesc *p; 2223 2224 assert_memory_lock(); 2225 2226 p = page_find(start >> TARGET_PAGE_BITS); 2227 if (p == NULL) { 2228 return; 2229 } 2230 pages = page_collection_lock(start, end); 2231 tb_invalidate_phys_page_range__locked(pages, p, start, end, 0); 2232 page_collection_unlock(pages); 2233 } 2234 2235 /* 2236 * Invalidate all TBs which intersect with the target physical address range 2237 * [start;end[. NOTE: start and end may refer to *different* physical pages. 2238 * 'is_cpu_write_access' should be true if called from a real cpu write 2239 * access: the virtual CPU will exit the current TB if code is modified inside 2240 * this TB. 2241 * 2242 * Called with mmap_lock held for user-mode emulation. 2243 */ 2244 #ifdef CONFIG_SOFTMMU 2245 void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end) 2246 #else 2247 void tb_invalidate_phys_range(target_ulong start, target_ulong end) 2248 #endif 2249 { 2250 struct page_collection *pages; 2251 tb_page_addr_t next; 2252 2253 assert_memory_lock(); 2254 2255 pages = page_collection_lock(start, end); 2256 for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; 2257 start < end; 2258 start = next, next += TARGET_PAGE_SIZE) { 2259 PageDesc *pd = page_find(start >> TARGET_PAGE_BITS); 2260 tb_page_addr_t bound = MIN(next, end); 2261 2262 if (pd == NULL) { 2263 continue; 2264 } 2265 tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0); 2266 } 2267 page_collection_unlock(pages); 2268 } 2269 2270 #ifdef CONFIG_SOFTMMU 2271 /* len must be <= 8 and start must be a multiple of len. 2272 * Called via softmmu_template.h when code areas are written to with 2273 * iothread mutex not held. 2274 * 2275 * Call with all @pages in the range [@start, @start + len[ locked. 2276 */ 2277 void tb_invalidate_phys_page_fast(struct page_collection *pages, 2278 tb_page_addr_t start, int len, 2279 uintptr_t retaddr) 2280 { 2281 PageDesc *p; 2282 2283 assert_memory_lock(); 2284 2285 p = page_find(start >> TARGET_PAGE_BITS); 2286 if (!p) { 2287 return; 2288 } 2289 2290 assert_page_locked(p); 2291 if (!p->code_bitmap && 2292 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { 2293 build_page_bitmap(p); 2294 } 2295 if (p->code_bitmap) { 2296 unsigned int nr; 2297 unsigned long b; 2298 2299 nr = start & ~TARGET_PAGE_MASK; 2300 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1)); 2301 if (b & ((1 << len) - 1)) { 2302 goto do_invalidate; 2303 } 2304 } else { 2305 do_invalidate: 2306 tb_invalidate_phys_page_range__locked(pages, p, start, start + len, 2307 retaddr); 2308 } 2309 } 2310 #else 2311 /* Called with mmap_lock held. If pc is not 0 then it indicates the 2312 * host PC of the faulting store instruction that caused this invalidate. 2313 * Returns true if the caller needs to abort execution of the current 2314 * TB (because it was modified by this store and the guest CPU has 2315 * precise-SMC semantics). 2316 */ 2317 static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) 2318 { 2319 TranslationBlock *tb; 2320 PageDesc *p; 2321 int n; 2322 #ifdef TARGET_HAS_PRECISE_SMC 2323 TranslationBlock *current_tb = NULL; 2324 CPUState *cpu = current_cpu; 2325 CPUArchState *env = NULL; 2326 int current_tb_modified = 0; 2327 target_ulong current_pc = 0; 2328 target_ulong current_cs_base = 0; 2329 uint32_t current_flags = 0; 2330 #endif 2331 2332 assert_memory_lock(); 2333 2334 addr &= TARGET_PAGE_MASK; 2335 p = page_find(addr >> TARGET_PAGE_BITS); 2336 if (!p) { 2337 return false; 2338 } 2339 2340 #ifdef TARGET_HAS_PRECISE_SMC 2341 if (p->first_tb && pc != 0) { 2342 current_tb = tcg_tb_lookup(pc); 2343 } 2344 if (cpu != NULL) { 2345 env = cpu->env_ptr; 2346 } 2347 #endif 2348 assert_page_locked(p); 2349 PAGE_FOR_EACH_TB(p, tb, n) { 2350 #ifdef TARGET_HAS_PRECISE_SMC 2351 if (current_tb == tb && 2352 (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { 2353 /* If we are modifying the current TB, we must stop 2354 its execution. We could be more precise by checking 2355 that the modification is after the current PC, but it 2356 would require a specialized function to partially 2357 restore the CPU state */ 2358 2359 current_tb_modified = 1; 2360 cpu_restore_state_from_tb(cpu, current_tb, pc, true); 2361 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, 2362 ¤t_flags); 2363 } 2364 #endif /* TARGET_HAS_PRECISE_SMC */ 2365 tb_phys_invalidate(tb, addr); 2366 } 2367 p->first_tb = (uintptr_t)NULL; 2368 #ifdef TARGET_HAS_PRECISE_SMC 2369 if (current_tb_modified) { 2370 /* Force execution of one insn next time. */ 2371 cpu->cflags_next_tb = 1 | curr_cflags(cpu); 2372 return true; 2373 } 2374 #endif 2375 2376 return false; 2377 } 2378 #endif 2379 2380 /* user-mode: call with mmap_lock held */ 2381 void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr) 2382 { 2383 TranslationBlock *tb; 2384 2385 assert_memory_lock(); 2386 2387 tb = tcg_tb_lookup(retaddr); 2388 if (tb) { 2389 /* We can use retranslation to find the PC. */ 2390 cpu_restore_state_from_tb(cpu, tb, retaddr, true); 2391 tb_phys_invalidate(tb, -1); 2392 } else { 2393 /* The exception probably happened in a helper. The CPU state should 2394 have been saved before calling it. Fetch the PC from there. */ 2395 CPUArchState *env = cpu->env_ptr; 2396 target_ulong pc, cs_base; 2397 tb_page_addr_t addr; 2398 uint32_t flags; 2399 2400 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); 2401 addr = get_page_addr_code(env, pc); 2402 if (addr != -1) { 2403 tb_invalidate_phys_range(addr, addr + 1); 2404 } 2405 } 2406 } 2407 2408 #ifndef CONFIG_USER_ONLY 2409 /* 2410 * In deterministic execution mode, instructions doing device I/Os 2411 * must be at the end of the TB. 2412 * 2413 * Called by softmmu_template.h, with iothread mutex not held. 2414 */ 2415 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) 2416 { 2417 TranslationBlock *tb; 2418 CPUClass *cc; 2419 uint32_t n; 2420 2421 tb = tcg_tb_lookup(retaddr); 2422 if (!tb) { 2423 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", 2424 (void *)retaddr); 2425 } 2426 cpu_restore_state_from_tb(cpu, tb, retaddr, true); 2427 2428 /* 2429 * Some guests must re-execute the branch when re-executing a delay 2430 * slot instruction. When this is the case, adjust icount and N 2431 * to account for the re-execution of the branch. 2432 */ 2433 n = 1; 2434 cc = CPU_GET_CLASS(cpu); 2435 if (cc->tcg_ops->io_recompile_replay_branch && 2436 cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) { 2437 cpu_neg(cpu)->icount_decr.u16.low++; 2438 n = 2; 2439 } 2440 2441 /* 2442 * Exit the loop and potentially generate a new TB executing the 2443 * just the I/O insns. We also limit instrumentation to memory 2444 * operations only (which execute after completion) so we don't 2445 * double instrument the instruction. 2446 */ 2447 cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n; 2448 2449 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, 2450 "cpu_io_recompile: rewound execution of TB to " 2451 TARGET_FMT_lx "\n", tb->pc); 2452 2453 cpu_loop_exit_noexc(cpu); 2454 } 2455 2456 static void print_qht_statistics(struct qht_stats hst) 2457 { 2458 uint32_t hgram_opts; 2459 size_t hgram_bins; 2460 char *hgram; 2461 2462 if (!hst.head_buckets) { 2463 return; 2464 } 2465 qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n", 2466 hst.used_head_buckets, hst.head_buckets, 2467 (double)hst.used_head_buckets / hst.head_buckets * 100); 2468 2469 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 2470 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; 2471 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) { 2472 hgram_opts |= QDIST_PR_NODECIMAL; 2473 } 2474 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); 2475 qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n", 2476 qdist_avg(&hst.occupancy) * 100, hgram); 2477 g_free(hgram); 2478 2479 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; 2480 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain); 2481 if (hgram_bins > 10) { 2482 hgram_bins = 10; 2483 } else { 2484 hgram_bins = 0; 2485 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; 2486 } 2487 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); 2488 qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n", 2489 qdist_avg(&hst.chain), hgram); 2490 g_free(hgram); 2491 } 2492 2493 struct tb_tree_stats { 2494 size_t nb_tbs; 2495 size_t host_size; 2496 size_t target_size; 2497 size_t max_target_size; 2498 size_t direct_jmp_count; 2499 size_t direct_jmp2_count; 2500 size_t cross_page; 2501 }; 2502 2503 static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) 2504 { 2505 const TranslationBlock *tb = value; 2506 struct tb_tree_stats *tst = data; 2507 2508 tst->nb_tbs++; 2509 tst->host_size += tb->tc.size; 2510 tst->target_size += tb->size; 2511 if (tb->size > tst->max_target_size) { 2512 tst->max_target_size = tb->size; 2513 } 2514 if (tb->page_addr[1] != -1) { 2515 tst->cross_page++; 2516 } 2517 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { 2518 tst->direct_jmp_count++; 2519 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { 2520 tst->direct_jmp2_count++; 2521 } 2522 } 2523 return false; 2524 } 2525 2526 void dump_exec_info(void) 2527 { 2528 struct tb_tree_stats tst = {}; 2529 struct qht_stats hst; 2530 size_t nb_tbs, flush_full, flush_part, flush_elide; 2531 2532 tcg_tb_foreach(tb_tree_stats_iter, &tst); 2533 nb_tbs = tst.nb_tbs; 2534 /* XXX: avoid using doubles ? */ 2535 qemu_printf("Translation buffer state:\n"); 2536 /* 2537 * Report total code size including the padding and TB structs; 2538 * otherwise users might think "-accel tcg,tb-size" is not honoured. 2539 * For avg host size we use the precise numbers from tb_tree_stats though. 2540 */ 2541 qemu_printf("gen code size %zu/%zu\n", 2542 tcg_code_size(), tcg_code_capacity()); 2543 qemu_printf("TB count %zu\n", nb_tbs); 2544 qemu_printf("TB avg target size %zu max=%zu bytes\n", 2545 nb_tbs ? tst.target_size / nb_tbs : 0, 2546 tst.max_target_size); 2547 qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n", 2548 nb_tbs ? tst.host_size / nb_tbs : 0, 2549 tst.target_size ? (double)tst.host_size / tst.target_size : 0); 2550 qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page, 2551 nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); 2552 qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n", 2553 tst.direct_jmp_count, 2554 nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, 2555 tst.direct_jmp2_count, 2556 nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); 2557 2558 qht_statistics_init(&tb_ctx.htable, &hst); 2559 print_qht_statistics(hst); 2560 qht_statistics_destroy(&hst); 2561 2562 qemu_printf("\nStatistics:\n"); 2563 qemu_printf("TB flush count %u\n", 2564 qatomic_read(&tb_ctx.tb_flush_count)); 2565 qemu_printf("TB invalidate count %zu\n", 2566 tcg_tb_phys_invalidate_count()); 2567 2568 tlb_flush_counts(&flush_full, &flush_part, &flush_elide); 2569 qemu_printf("TLB full flushes %zu\n", flush_full); 2570 qemu_printf("TLB partial flushes %zu\n", flush_part); 2571 qemu_printf("TLB elided flushes %zu\n", flush_elide); 2572 tcg_dump_info(); 2573 } 2574 2575 void dump_opcount_info(void) 2576 { 2577 tcg_dump_op_count(); 2578 } 2579 2580 #else /* CONFIG_USER_ONLY */ 2581 2582 void cpu_interrupt(CPUState *cpu, int mask) 2583 { 2584 g_assert(qemu_mutex_iothread_locked()); 2585 cpu->interrupt_request |= mask; 2586 qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); 2587 } 2588 2589 /* 2590 * Walks guest process memory "regions" one by one 2591 * and calls callback function 'fn' for each region. 2592 */ 2593 struct walk_memory_regions_data { 2594 walk_memory_regions_fn fn; 2595 void *priv; 2596 target_ulong start; 2597 int prot; 2598 }; 2599 2600 static int walk_memory_regions_end(struct walk_memory_regions_data *data, 2601 target_ulong end, int new_prot) 2602 { 2603 if (data->start != -1u) { 2604 int rc = data->fn(data->priv, data->start, end, data->prot); 2605 if (rc != 0) { 2606 return rc; 2607 } 2608 } 2609 2610 data->start = (new_prot ? end : -1u); 2611 data->prot = new_prot; 2612 2613 return 0; 2614 } 2615 2616 static int walk_memory_regions_1(struct walk_memory_regions_data *data, 2617 target_ulong base, int level, void **lp) 2618 { 2619 target_ulong pa; 2620 int i, rc; 2621 2622 if (*lp == NULL) { 2623 return walk_memory_regions_end(data, base, 0); 2624 } 2625 2626 if (level == 0) { 2627 PageDesc *pd = *lp; 2628 2629 for (i = 0; i < V_L2_SIZE; ++i) { 2630 int prot = pd[i].flags; 2631 2632 pa = base | (i << TARGET_PAGE_BITS); 2633 if (prot != data->prot) { 2634 rc = walk_memory_regions_end(data, pa, prot); 2635 if (rc != 0) { 2636 return rc; 2637 } 2638 } 2639 } 2640 } else { 2641 void **pp = *lp; 2642 2643 for (i = 0; i < V_L2_SIZE; ++i) { 2644 pa = base | ((target_ulong)i << 2645 (TARGET_PAGE_BITS + V_L2_BITS * level)); 2646 rc = walk_memory_regions_1(data, pa, level - 1, pp + i); 2647 if (rc != 0) { 2648 return rc; 2649 } 2650 } 2651 } 2652 2653 return 0; 2654 } 2655 2656 int walk_memory_regions(void *priv, walk_memory_regions_fn fn) 2657 { 2658 struct walk_memory_regions_data data; 2659 uintptr_t i, l1_sz = v_l1_size; 2660 2661 data.fn = fn; 2662 data.priv = priv; 2663 data.start = -1u; 2664 data.prot = 0; 2665 2666 for (i = 0; i < l1_sz; i++) { 2667 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS); 2668 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i); 2669 if (rc != 0) { 2670 return rc; 2671 } 2672 } 2673 2674 return walk_memory_regions_end(&data, 0, 0); 2675 } 2676 2677 static int dump_region(void *priv, target_ulong start, 2678 target_ulong end, unsigned long prot) 2679 { 2680 FILE *f = (FILE *)priv; 2681 2682 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx 2683 " "TARGET_FMT_lx" %c%c%c\n", 2684 start, end, end - start, 2685 ((prot & PAGE_READ) ? 'r' : '-'), 2686 ((prot & PAGE_WRITE) ? 'w' : '-'), 2687 ((prot & PAGE_EXEC) ? 'x' : '-')); 2688 2689 return 0; 2690 } 2691 2692 /* dump memory mappings */ 2693 void page_dump(FILE *f) 2694 { 2695 const int length = sizeof(target_ulong) * 2; 2696 (void) fprintf(f, "%-*s %-*s %-*s %s\n", 2697 length, "start", length, "end", length, "size", "prot"); 2698 walk_memory_regions(f, dump_region); 2699 } 2700 2701 int page_get_flags(target_ulong address) 2702 { 2703 PageDesc *p; 2704 2705 p = page_find(address >> TARGET_PAGE_BITS); 2706 if (!p) { 2707 return 0; 2708 } 2709 return p->flags; 2710 } 2711 2712 /* Modify the flags of a page and invalidate the code if necessary. 2713 The flag PAGE_WRITE_ORG is positioned automatically depending 2714 on PAGE_WRITE. The mmap_lock should already be held. */ 2715 void page_set_flags(target_ulong start, target_ulong end, int flags) 2716 { 2717 target_ulong addr, len; 2718 bool reset_target_data; 2719 2720 /* This function should never be called with addresses outside the 2721 guest address space. If this assert fires, it probably indicates 2722 a missing call to h2g_valid. */ 2723 assert(end - 1 <= GUEST_ADDR_MAX); 2724 assert(start < end); 2725 /* Only set PAGE_ANON with new mappings. */ 2726 assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); 2727 assert_memory_lock(); 2728 2729 start = start & TARGET_PAGE_MASK; 2730 end = TARGET_PAGE_ALIGN(end); 2731 2732 if (flags & PAGE_WRITE) { 2733 flags |= PAGE_WRITE_ORG; 2734 } 2735 reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET); 2736 flags &= ~PAGE_RESET; 2737 2738 for (addr = start, len = end - start; 2739 len != 0; 2740 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2741 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2742 2743 /* If the write protection bit is set, then we invalidate 2744 the code inside. */ 2745 if (!(p->flags & PAGE_WRITE) && 2746 (flags & PAGE_WRITE) && 2747 p->first_tb) { 2748 tb_invalidate_phys_page(addr, 0); 2749 } 2750 if (reset_target_data) { 2751 g_free(p->target_data); 2752 p->target_data = NULL; 2753 p->flags = flags; 2754 } else { 2755 /* Using mprotect on a page does not change MAP_ANON. */ 2756 p->flags = (p->flags & PAGE_ANON) | flags; 2757 } 2758 } 2759 } 2760 2761 void *page_get_target_data(target_ulong address) 2762 { 2763 PageDesc *p = page_find(address >> TARGET_PAGE_BITS); 2764 return p ? p->target_data : NULL; 2765 } 2766 2767 void *page_alloc_target_data(target_ulong address, size_t size) 2768 { 2769 PageDesc *p = page_find(address >> TARGET_PAGE_BITS); 2770 void *ret = NULL; 2771 2772 if (p->flags & PAGE_VALID) { 2773 ret = p->target_data; 2774 if (!ret) { 2775 p->target_data = ret = g_malloc0(size); 2776 } 2777 } 2778 return ret; 2779 } 2780 2781 int page_check_range(target_ulong start, target_ulong len, int flags) 2782 { 2783 PageDesc *p; 2784 target_ulong end; 2785 target_ulong addr; 2786 2787 /* This function should never be called with addresses outside the 2788 guest address space. If this assert fires, it probably indicates 2789 a missing call to h2g_valid. */ 2790 if (TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS) { 2791 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2792 } 2793 2794 if (len == 0) { 2795 return 0; 2796 } 2797 if (start + len - 1 < start) { 2798 /* We've wrapped around. */ 2799 return -1; 2800 } 2801 2802 /* must do before we loose bits in the next step */ 2803 end = TARGET_PAGE_ALIGN(start + len); 2804 start = start & TARGET_PAGE_MASK; 2805 2806 for (addr = start, len = end - start; 2807 len != 0; 2808 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2809 p = page_find(addr >> TARGET_PAGE_BITS); 2810 if (!p) { 2811 return -1; 2812 } 2813 if (!(p->flags & PAGE_VALID)) { 2814 return -1; 2815 } 2816 2817 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) { 2818 return -1; 2819 } 2820 if (flags & PAGE_WRITE) { 2821 if (!(p->flags & PAGE_WRITE_ORG)) { 2822 return -1; 2823 } 2824 /* unprotect the page if it was put read-only because it 2825 contains translated code */ 2826 if (!(p->flags & PAGE_WRITE)) { 2827 if (!page_unprotect(addr, 0)) { 2828 return -1; 2829 } 2830 } 2831 } 2832 } 2833 return 0; 2834 } 2835 2836 /* called from signal handler: invalidate the code and unprotect the 2837 * page. Return 0 if the fault was not handled, 1 if it was handled, 2838 * and 2 if it was handled but the caller must cause the TB to be 2839 * immediately exited. (We can only return 2 if the 'pc' argument is 2840 * non-zero.) 2841 */ 2842 int page_unprotect(target_ulong address, uintptr_t pc) 2843 { 2844 unsigned int prot; 2845 bool current_tb_invalidated; 2846 PageDesc *p; 2847 target_ulong host_start, host_end, addr; 2848 2849 /* Technically this isn't safe inside a signal handler. However we 2850 know this only ever happens in a synchronous SEGV handler, so in 2851 practice it seems to be ok. */ 2852 mmap_lock(); 2853 2854 p = page_find(address >> TARGET_PAGE_BITS); 2855 if (!p) { 2856 mmap_unlock(); 2857 return 0; 2858 } 2859 2860 /* if the page was really writable, then we change its 2861 protection back to writable */ 2862 if (p->flags & PAGE_WRITE_ORG) { 2863 current_tb_invalidated = false; 2864 if (p->flags & PAGE_WRITE) { 2865 /* If the page is actually marked WRITE then assume this is because 2866 * this thread raced with another one which got here first and 2867 * set the page to PAGE_WRITE and did the TB invalidate for us. 2868 */ 2869 #ifdef TARGET_HAS_PRECISE_SMC 2870 TranslationBlock *current_tb = tcg_tb_lookup(pc); 2871 if (current_tb) { 2872 current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID; 2873 } 2874 #endif 2875 } else { 2876 host_start = address & qemu_host_page_mask; 2877 host_end = host_start + qemu_host_page_size; 2878 2879 prot = 0; 2880 for (addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) { 2881 p = page_find(addr >> TARGET_PAGE_BITS); 2882 p->flags |= PAGE_WRITE; 2883 prot |= p->flags; 2884 2885 /* and since the content will be modified, we must invalidate 2886 the corresponding translated code. */ 2887 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc); 2888 #ifdef CONFIG_USER_ONLY 2889 if (DEBUG_TB_CHECK_GATE) { 2890 tb_invalidate_check(addr); 2891 } 2892 #endif 2893 } 2894 mprotect((void *)g2h_untagged(host_start), qemu_host_page_size, 2895 prot & PAGE_BITS); 2896 } 2897 mmap_unlock(); 2898 /* If current TB was invalidated return to main loop */ 2899 return current_tb_invalidated ? 2 : 1; 2900 } 2901 mmap_unlock(); 2902 return 0; 2903 } 2904 #endif /* CONFIG_USER_ONLY */ 2905 2906 /* This is a wrapper for common code that can not use CONFIG_SOFTMMU */ 2907 void tcg_flush_softmmu_tlb(CPUState *cs) 2908 { 2909 #ifdef CONFIG_SOFTMMU 2910 tlb_flush(cs); 2911 #endif 2912 } 2913