1 /* 2 * ARM v8.5-MemTag Operations 3 * 4 * Copyright (c) 2020 Linaro, Ltd. 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/log.h" 22 #include "cpu.h" 23 #include "internals.h" 24 #include "exec/exec-all.h" 25 #include "exec/page-protection.h" 26 #include "exec/ram_addr.h" 27 #include "exec/cpu_ldst.h" 28 #include "exec/helper-proto.h" 29 #include "hw/core/tcg-cpu-ops.h" 30 #include "qapi/error.h" 31 #include "qemu/guest-random.h" 32 #include "mte_helper.h" 33 34 35 static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude) 36 { 37 if (exclude == 0xffff) { 38 return 0; 39 } 40 if (offset == 0) { 41 while (exclude & (1 << tag)) { 42 tag = (tag + 1) & 15; 43 } 44 } else { 45 do { 46 do { 47 tag = (tag + 1) & 15; 48 } while (exclude & (1 << tag)); 49 } while (--offset > 0); 50 } 51 return tag; 52 } 53 54 uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx, 55 uint64_t ptr, MMUAccessType ptr_access, 56 int ptr_size, MMUAccessType tag_access, 57 bool probe, uintptr_t ra) 58 { 59 #ifdef CONFIG_USER_ONLY 60 uint64_t clean_ptr = useronly_clean_ptr(ptr); 61 int flags = page_get_flags(clean_ptr); 62 uint8_t *tags; 63 uintptr_t index; 64 65 assert(!(probe && ra)); 66 67 if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) { 68 if (probe) { 69 return NULL; 70 } 71 cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access, 72 !(flags & PAGE_VALID), ra); 73 } 74 75 /* Require both MAP_ANON and PROT_MTE for the page. */ 76 if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) { 77 return NULL; 78 } 79 80 tags = page_get_target_data(clean_ptr); 81 82 index = extract32(ptr, LOG2_TAG_GRANULE + 1, 83 TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1); 84 return tags + index; 85 #else 86 CPUTLBEntryFull *full; 87 MemTxAttrs attrs; 88 int in_page, flags; 89 hwaddr ptr_paddr, tag_paddr, xlat; 90 MemoryRegion *mr; 91 ARMASIdx tag_asi; 92 AddressSpace *tag_as; 93 void *host; 94 95 /* 96 * Probe the first byte of the virtual address. This raises an 97 * exception for inaccessible pages, and resolves the virtual address 98 * into the softmmu tlb. 99 * 100 * When RA == 0, this is either a pure probe or a no-fault-expected probe. 101 * Indicate to probe_access_flags no-fault, then either return NULL 102 * for the pure probe, or assert that we received a valid page for the 103 * no-fault-expected probe. 104 */ 105 flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx, 106 ra == 0, &host, &full, ra); 107 if (probe && (flags & TLB_INVALID_MASK)) { 108 return NULL; 109 } 110 assert(!(flags & TLB_INVALID_MASK)); 111 112 /* If the virtual page MemAttr != Tagged, access unchecked. */ 113 if (full->extra.arm.pte_attrs != 0xf0) { 114 return NULL; 115 } 116 117 /* 118 * If not backed by host ram, there is no tag storage: access unchecked. 119 * This is probably a guest os bug though, so log it. 120 */ 121 if (unlikely(flags & TLB_MMIO)) { 122 qemu_log_mask(LOG_GUEST_ERROR, 123 "Page @ 0x%" PRIx64 " indicates Tagged Normal memory " 124 "but is not backed by host ram\n", ptr); 125 return NULL; 126 } 127 128 /* 129 * Remember these values across the second lookup below, 130 * which may invalidate this pointer via tlb resize. 131 */ 132 ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK); 133 attrs = full->attrs; 134 full = NULL; 135 136 /* 137 * The Normal memory access can extend to the next page. E.g. a single 138 * 8-byte access to the last byte of a page will check only the last 139 * tag on the first page. 140 * Any page access exception has priority over tag check exception. 141 */ 142 in_page = -(ptr | TARGET_PAGE_MASK); 143 if (unlikely(ptr_size > in_page)) { 144 flags |= probe_access_full(env, ptr + in_page, 0, ptr_access, 145 ptr_mmu_idx, ra == 0, &host, &full, ra); 146 assert(!(flags & TLB_INVALID_MASK)); 147 } 148 149 /* Any debug exception has priority over a tag check exception. */ 150 if (!probe && unlikely(flags & TLB_WATCHPOINT)) { 151 int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE; 152 assert(ra != 0); 153 cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra); 154 } 155 156 /* Convert to the physical address in tag space. */ 157 tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1); 158 159 /* Look up the address in tag space. */ 160 tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS; 161 tag_as = cpu_get_address_space(env_cpu(env), tag_asi); 162 mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL, 163 tag_access == MMU_DATA_STORE, attrs); 164 165 /* 166 * Note that @mr will never be NULL. If there is nothing in the address 167 * space at @tag_paddr, the translation will return the unallocated memory 168 * region. For our purposes, the result must be ram. 169 */ 170 if (unlikely(!memory_region_is_ram(mr))) { 171 /* ??? Failure is a board configuration error. */ 172 qemu_log_mask(LOG_UNIMP, 173 "Tag Memory @ 0x%" HWADDR_PRIx " not found for " 174 "Normal Memory @ 0x%" HWADDR_PRIx "\n", 175 tag_paddr, ptr_paddr); 176 return NULL; 177 } 178 179 /* 180 * Ensure the tag memory is dirty on write, for migration. 181 * Tag memory can never contain code or display memory (vga). 182 */ 183 if (tag_access == MMU_DATA_STORE) { 184 ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat; 185 cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION); 186 } 187 188 return memory_region_get_ram_ptr(mr) + xlat; 189 #endif 190 } 191 192 static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, 193 uint64_t ptr, MMUAccessType ptr_access, 194 int ptr_size, MMUAccessType tag_access, 195 uintptr_t ra) 196 { 197 return allocation_tag_mem_probe(env, ptr_mmu_idx, ptr, ptr_access, 198 ptr_size, tag_access, false, ra); 199 } 200 201 uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm) 202 { 203 uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16); 204 int rrnd = extract32(env->cp15.gcr_el1, 16, 1); 205 int start = extract32(env->cp15.rgsr_el1, 0, 4); 206 int seed = extract32(env->cp15.rgsr_el1, 8, 16); 207 int offset, i, rtag; 208 209 /* 210 * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the 211 * deterministic algorithm. Except that with RRND==1 the kernel is 212 * not required to have set RGSR_EL1.SEED != 0, which is required for 213 * the deterministic algorithm to function. So we force a non-zero 214 * SEED for that case. 215 */ 216 if (unlikely(seed == 0) && rrnd) { 217 do { 218 Error *err = NULL; 219 uint16_t two; 220 221 if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) { 222 /* 223 * Failed, for unknown reasons in the crypto subsystem. 224 * Best we can do is log the reason and use a constant seed. 225 */ 226 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n", 227 error_get_pretty(err)); 228 error_free(err); 229 two = 1; 230 } 231 seed = two; 232 } while (seed == 0); 233 } 234 235 /* RandomTag */ 236 for (i = offset = 0; i < 4; ++i) { 237 /* NextRandomTagBit */ 238 int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^ 239 extract32(seed, 2, 1) ^ extract32(seed, 0, 1)); 240 seed = (top << 15) | (seed >> 1); 241 offset |= top << i; 242 } 243 rtag = choose_nonexcluded_tag(start, offset, exclude); 244 env->cp15.rgsr_el1 = rtag | (seed << 8); 245 246 return address_with_allocation_tag(rn, rtag); 247 } 248 249 uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr, 250 int32_t offset, uint32_t tag_offset) 251 { 252 int start_tag = allocation_tag_from_addr(ptr); 253 uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16); 254 int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude); 255 256 return address_with_allocation_tag(ptr + offset, rtag); 257 } 258 259 int load_tag1(uint64_t ptr, uint8_t *mem) 260 { 261 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; 262 return extract32(*mem, ofs, 4); 263 } 264 265 uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt) 266 { 267 int mmu_idx = arm_env_mmu_index(env); 268 uint8_t *mem; 269 int rtag = 0; 270 271 /* Trap if accessing an invalid page. */ 272 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1, 273 MMU_DATA_LOAD, GETPC()); 274 275 /* Load if page supports tags. */ 276 if (mem) { 277 rtag = load_tag1(ptr, mem); 278 } 279 280 return address_with_allocation_tag(xt, rtag); 281 } 282 283 static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra) 284 { 285 if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) { 286 arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE, 287 arm_env_mmu_index(env), ra); 288 g_assert_not_reached(); 289 } 290 } 291 292 /* For use in a non-parallel context, store to the given nibble. */ 293 void store_tag1(uint64_t ptr, uint8_t *mem, int tag) 294 { 295 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; 296 *mem = deposit32(*mem, ofs, 4, tag); 297 } 298 299 /* For use in a parallel context, atomically store to the given nibble. */ 300 static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag) 301 { 302 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; 303 uint8_t old = qatomic_read(mem); 304 305 while (1) { 306 uint8_t new = deposit32(old, ofs, 4, tag); 307 uint8_t cmp = qatomic_cmpxchg(mem, old, new); 308 if (likely(cmp == old)) { 309 return; 310 } 311 old = cmp; 312 } 313 } 314 315 typedef void stg_store1(uint64_t, uint8_t *, int); 316 317 static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt, 318 uintptr_t ra, stg_store1 store1) 319 { 320 int mmu_idx = arm_env_mmu_index(env); 321 uint8_t *mem; 322 323 check_tag_aligned(env, ptr, ra); 324 325 /* Trap if accessing an invalid page. */ 326 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE, 327 MMU_DATA_STORE, ra); 328 329 /* Store if page supports tags. */ 330 if (mem) { 331 store1(ptr, mem, allocation_tag_from_addr(xt)); 332 } 333 } 334 335 void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt) 336 { 337 do_stg(env, ptr, xt, GETPC(), store_tag1); 338 } 339 340 void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt) 341 { 342 do_stg(env, ptr, xt, GETPC(), store_tag1_parallel); 343 } 344 345 void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr) 346 { 347 int mmu_idx = arm_env_mmu_index(env); 348 uintptr_t ra = GETPC(); 349 350 check_tag_aligned(env, ptr, ra); 351 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra); 352 } 353 354 static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt, 355 uintptr_t ra, stg_store1 store1) 356 { 357 int mmu_idx = arm_env_mmu_index(env); 358 int tag = allocation_tag_from_addr(xt); 359 uint8_t *mem1, *mem2; 360 361 check_tag_aligned(env, ptr, ra); 362 363 /* 364 * Trap if accessing an invalid page(s). 365 * This takes priority over !allocation_tag_access_enabled. 366 */ 367 if (ptr & TAG_GRANULE) { 368 /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */ 369 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, 370 TAG_GRANULE, MMU_DATA_STORE, ra); 371 mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE, 372 MMU_DATA_STORE, TAG_GRANULE, 373 MMU_DATA_STORE, ra); 374 375 /* Store if page(s) support tags. */ 376 if (mem1) { 377 store1(TAG_GRANULE, mem1, tag); 378 } 379 if (mem2) { 380 store1(0, mem2, tag); 381 } 382 } else { 383 /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */ 384 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, 385 2 * TAG_GRANULE, MMU_DATA_STORE, ra); 386 if (mem1) { 387 tag |= tag << 4; 388 qatomic_set(mem1, tag); 389 } 390 } 391 } 392 393 void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt) 394 { 395 do_st2g(env, ptr, xt, GETPC(), store_tag1); 396 } 397 398 void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt) 399 { 400 do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel); 401 } 402 403 void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr) 404 { 405 int mmu_idx = arm_env_mmu_index(env); 406 uintptr_t ra = GETPC(); 407 int in_page = -(ptr | TARGET_PAGE_MASK); 408 409 check_tag_aligned(env, ptr, ra); 410 411 if (likely(in_page >= 2 * TAG_GRANULE)) { 412 probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra); 413 } else { 414 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra); 415 probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra); 416 } 417 } 418 419 uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr) 420 { 421 int mmu_idx = arm_env_mmu_index(env); 422 uintptr_t ra = GETPC(); 423 int gm_bs = env_archcpu(env)->gm_blocksize; 424 int gm_bs_bytes = 4 << gm_bs; 425 void *tag_mem; 426 uint64_t ret; 427 int shift; 428 429 ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes); 430 431 /* Trap if accessing an invalid page. */ 432 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 433 gm_bs_bytes, MMU_DATA_LOAD, ra); 434 435 /* The tag is squashed to zero if the page does not support tags. */ 436 if (!tag_mem) { 437 return 0; 438 } 439 440 /* 441 * The ordering of elements within the word corresponds to 442 * a little-endian operation. Computation of shift comes from 443 * 444 * index = address<LOG2_TAG_GRANULE+3:LOG2_TAG_GRANULE> 445 * data<index*4+3:index*4> = tag 446 * 447 * Because of the alignment of ptr above, BS=6 has shift=0. 448 * All memory operations are aligned. Defer support for BS=2, 449 * requiring insertion or extraction of a nibble, until we 450 * support a cpu that requires it. 451 */ 452 switch (gm_bs) { 453 case 3: 454 /* 32 bytes -> 2 tags -> 8 result bits */ 455 ret = *(uint8_t *)tag_mem; 456 break; 457 case 4: 458 /* 64 bytes -> 4 tags -> 16 result bits */ 459 ret = cpu_to_le16(*(uint16_t *)tag_mem); 460 break; 461 case 5: 462 /* 128 bytes -> 8 tags -> 32 result bits */ 463 ret = cpu_to_le32(*(uint32_t *)tag_mem); 464 break; 465 case 6: 466 /* 256 bytes -> 16 tags -> 64 result bits */ 467 return cpu_to_le64(*(uint64_t *)tag_mem); 468 default: 469 /* 470 * CPU configured with unsupported/invalid gm blocksize. 471 * This is detected early in arm_cpu_realizefn. 472 */ 473 g_assert_not_reached(); 474 } 475 shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4; 476 return ret << shift; 477 } 478 479 void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) 480 { 481 int mmu_idx = arm_env_mmu_index(env); 482 uintptr_t ra = GETPC(); 483 int gm_bs = env_archcpu(env)->gm_blocksize; 484 int gm_bs_bytes = 4 << gm_bs; 485 void *tag_mem; 486 int shift; 487 488 ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes); 489 490 /* Trap if accessing an invalid page. */ 491 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, 492 gm_bs_bytes, MMU_DATA_LOAD, ra); 493 494 /* 495 * Tag store only happens if the page support tags, 496 * and if the OS has enabled access to the tags. 497 */ 498 if (!tag_mem) { 499 return; 500 } 501 502 /* See LDGM for comments on BS and on shift. */ 503 shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4; 504 val >>= shift; 505 switch (gm_bs) { 506 case 3: 507 /* 32 bytes -> 2 tags -> 8 result bits */ 508 *(uint8_t *)tag_mem = val; 509 break; 510 case 4: 511 /* 64 bytes -> 4 tags -> 16 result bits */ 512 *(uint16_t *)tag_mem = cpu_to_le16(val); 513 break; 514 case 5: 515 /* 128 bytes -> 8 tags -> 32 result bits */ 516 *(uint32_t *)tag_mem = cpu_to_le32(val); 517 break; 518 case 6: 519 /* 256 bytes -> 16 tags -> 64 result bits */ 520 *(uint64_t *)tag_mem = cpu_to_le64(val); 521 break; 522 default: 523 /* cpu configured with unsupported gm blocksize. */ 524 g_assert_not_reached(); 525 } 526 } 527 528 void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val) 529 { 530 uintptr_t ra = GETPC(); 531 int mmu_idx = arm_env_mmu_index(env); 532 int log2_dcz_bytes, log2_tag_bytes; 533 intptr_t dcz_bytes, tag_bytes; 534 uint8_t *mem; 535 536 /* 537 * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1, 538 * i.e. 32 bytes, which is an unreasonably small dcz anyway, 539 * to make sure that we can access one complete tag byte here. 540 */ 541 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2; 542 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1); 543 dcz_bytes = (intptr_t)1 << log2_dcz_bytes; 544 tag_bytes = (intptr_t)1 << log2_tag_bytes; 545 ptr &= -dcz_bytes; 546 547 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes, 548 MMU_DATA_STORE, ra); 549 if (mem) { 550 int tag_pair = (val & 0xf) * 0x11; 551 memset(mem, tag_pair, tag_bytes); 552 } 553 } 554 555 static void mte_sync_check_fail(CPUARMState *env, uint32_t desc, 556 uint64_t dirty_ptr, uintptr_t ra) 557 { 558 int is_write, syn; 559 560 env->exception.vaddress = dirty_ptr; 561 562 is_write = FIELD_EX32(desc, MTEDESC, WRITE); 563 syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write, 564 0x11); 565 raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra); 566 g_assert_not_reached(); 567 } 568 569 static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr, 570 uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el) 571 { 572 int select; 573 574 if (regime_has_2_ranges(arm_mmu_idx)) { 575 select = extract64(dirty_ptr, 55, 1); 576 } else { 577 select = 0; 578 } 579 env->cp15.tfsr_el[el] |= 1 << select; 580 #ifdef CONFIG_USER_ONLY 581 /* 582 * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT, 583 * which then sends a SIGSEGV when the thread is next scheduled. 584 * This cpu will return to the main loop at the end of the TB, 585 * which is rather sooner than "normal". But the alternative 586 * is waiting until the next syscall. 587 */ 588 qemu_cpu_kick(env_cpu(env)); 589 #endif 590 } 591 592 /* Record a tag check failure. */ 593 void mte_check_fail(CPUARMState *env, uint32_t desc, 594 uint64_t dirty_ptr, uintptr_t ra) 595 { 596 int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); 597 ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx); 598 int el, reg_el, tcf; 599 uint64_t sctlr; 600 601 reg_el = regime_el(env, arm_mmu_idx); 602 sctlr = env->cp15.sctlr_el[reg_el]; 603 604 switch (arm_mmu_idx) { 605 case ARMMMUIdx_E10_0: 606 case ARMMMUIdx_E20_0: 607 el = 0; 608 tcf = extract64(sctlr, 38, 2); 609 break; 610 default: 611 el = reg_el; 612 tcf = extract64(sctlr, 40, 2); 613 } 614 615 switch (tcf) { 616 case 1: 617 /* Tag check fail causes a synchronous exception. */ 618 mte_sync_check_fail(env, desc, dirty_ptr, ra); 619 break; 620 621 case 0: 622 /* 623 * Tag check fail does not affect the PE. 624 * We eliminate this case by not setting MTE_ACTIVE 625 * in tb_flags, so that we never make this runtime call. 626 */ 627 g_assert_not_reached(); 628 629 case 2: 630 /* Tag check fail causes asynchronous flag set. */ 631 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el); 632 break; 633 634 case 3: 635 /* 636 * Tag check fail causes asynchronous flag set for stores, or 637 * a synchronous exception for loads. 638 */ 639 if (FIELD_EX32(desc, MTEDESC, WRITE)) { 640 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el); 641 } else { 642 mte_sync_check_fail(env, desc, dirty_ptr, ra); 643 } 644 break; 645 } 646 } 647 648 /** 649 * checkN: 650 * @tag: tag memory to test 651 * @odd: true to begin testing at tags at odd nibble 652 * @cmp: the tag to compare against 653 * @count: number of tags to test 654 * 655 * Return the number of successful tests. 656 * Thus a return value < @count indicates a failure. 657 * 658 * A note about sizes: count is expected to be small. 659 * 660 * The most common use will be LDP/STP of two integer registers, 661 * which means 16 bytes of memory touching at most 2 tags, but 662 * often the access is aligned and thus just 1 tag. 663 * 664 * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory, 665 * touching at most 5 tags. SVE LDR/STR (vector) with the default 666 * vector length is also 64 bytes; the maximum architectural length 667 * is 256 bytes touching at most 9 tags. 668 * 669 * The loop below uses 7 logical operations and 1 memory operation 670 * per tag pair. An implementation that loads an aligned word and 671 * uses masking to ignore adjacent tags requires 18 logical operations 672 * and thus does not begin to pay off until 6 tags. 673 * Which, according to the survey above, is unlikely to be common. 674 */ 675 static int checkN(uint8_t *mem, int odd, int cmp, int count) 676 { 677 int n = 0, diff; 678 679 /* Replicate the test tag and compare. */ 680 cmp *= 0x11; 681 diff = *mem++ ^ cmp; 682 683 if (odd) { 684 goto start_odd; 685 } 686 687 while (1) { 688 /* Test even tag. */ 689 if (unlikely((diff) & 0x0f)) { 690 break; 691 } 692 if (++n == count) { 693 break; 694 } 695 696 start_odd: 697 /* Test odd tag. */ 698 if (unlikely((diff) & 0xf0)) { 699 break; 700 } 701 if (++n == count) { 702 break; 703 } 704 705 diff = *mem++ ^ cmp; 706 } 707 return n; 708 } 709 710 /** 711 * checkNrev: 712 * @tag: tag memory to test 713 * @odd: true to begin testing at tags at odd nibble 714 * @cmp: the tag to compare against 715 * @count: number of tags to test 716 * 717 * Return the number of successful tests. 718 * Thus a return value < @count indicates a failure. 719 * 720 * This is like checkN, but it runs backwards, checking the 721 * tags starting with @tag and then the tags preceding it. 722 * This is needed by the backwards-memory-copying operations. 723 */ 724 static int checkNrev(uint8_t *mem, int odd, int cmp, int count) 725 { 726 int n = 0, diff; 727 728 /* Replicate the test tag and compare. */ 729 cmp *= 0x11; 730 diff = *mem-- ^ cmp; 731 732 if (!odd) { 733 goto start_even; 734 } 735 736 while (1) { 737 /* Test odd tag. */ 738 if (unlikely((diff) & 0xf0)) { 739 break; 740 } 741 if (++n == count) { 742 break; 743 } 744 745 start_even: 746 /* Test even tag. */ 747 if (unlikely((diff) & 0x0f)) { 748 break; 749 } 750 if (++n == count) { 751 break; 752 } 753 754 diff = *mem-- ^ cmp; 755 } 756 return n; 757 } 758 759 /** 760 * mte_probe_int() - helper for mte_probe and mte_check 761 * @env: CPU environment 762 * @desc: MTEDESC descriptor 763 * @ptr: virtual address of the base of the access 764 * @fault: return virtual address of the first check failure 765 * 766 * Internal routine for both mte_probe and mte_check. 767 * Return zero on failure, filling in *fault. 768 * Return negative on trivial success for tbi disabled. 769 * Return positive on success with tbi enabled. 770 */ 771 static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, 772 uintptr_t ra, uint64_t *fault) 773 { 774 int mmu_idx, ptr_tag, bit55; 775 uint64_t ptr_last, prev_page, next_page; 776 uint64_t tag_first, tag_last; 777 uint32_t sizem1, tag_count, n, c; 778 uint8_t *mem1, *mem2; 779 MMUAccessType type; 780 781 bit55 = extract64(ptr, 55, 1); 782 *fault = ptr; 783 784 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */ 785 if (unlikely(!tbi_check(desc, bit55))) { 786 return -1; 787 } 788 789 ptr_tag = allocation_tag_from_addr(ptr); 790 791 if (tcma_check(desc, bit55, ptr_tag)) { 792 return 1; 793 } 794 795 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); 796 type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD; 797 sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1); 798 799 /* Find the addr of the end of the access */ 800 ptr_last = ptr + sizem1; 801 802 /* Round the bounds to the tag granule, and compute the number of tags. */ 803 tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE); 804 tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE); 805 tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; 806 807 /* Locate the page boundaries. */ 808 prev_page = ptr & TARGET_PAGE_MASK; 809 next_page = prev_page + TARGET_PAGE_SIZE; 810 811 if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) { 812 /* Memory access stays on one page. */ 813 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1, 814 MMU_DATA_LOAD, ra); 815 if (!mem1) { 816 return 1; 817 } 818 /* Perform all of the comparisons. */ 819 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count); 820 } else { 821 /* Memory access crosses to next page. */ 822 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr, 823 MMU_DATA_LOAD, ra); 824 825 mem2 = allocation_tag_mem(env, mmu_idx, next_page, type, 826 ptr_last - next_page + 1, 827 MMU_DATA_LOAD, ra); 828 829 /* 830 * Perform all of the comparisons. 831 * Note the possible but unlikely case of the operation spanning 832 * two pages that do not both have tagging enabled. 833 */ 834 n = c = (next_page - tag_first) / TAG_GRANULE; 835 if (mem1) { 836 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c); 837 } 838 if (n == c) { 839 if (!mem2) { 840 return 1; 841 } 842 n += checkN(mem2, 0, ptr_tag, tag_count - c); 843 } 844 } 845 846 if (likely(n == tag_count)) { 847 return 1; 848 } 849 850 /* 851 * If we failed, we know which granule. For the first granule, the 852 * failure address is @ptr, the first byte accessed. Otherwise the 853 * failure address is the first byte of the nth granule. 854 */ 855 if (n > 0) { 856 *fault = tag_first + n * TAG_GRANULE; 857 } 858 return 0; 859 } 860 861 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra) 862 { 863 uint64_t fault; 864 int ret = mte_probe_int(env, desc, ptr, ra, &fault); 865 866 if (unlikely(ret == 0)) { 867 mte_check_fail(env, desc, fault, ra); 868 } else if (ret < 0) { 869 return ptr; 870 } 871 return useronly_clean_ptr(ptr); 872 } 873 874 uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr) 875 { 876 /* 877 * R_XCHFJ: Alignment check not caused by memory type is priority 1, 878 * higher than any translation fault. When MTE is disabled, tcg 879 * performs the alignment check during the code generated for the 880 * memory access. With MTE enabled, we must check this here before 881 * raising any translation fault in allocation_tag_mem. 882 */ 883 unsigned align = FIELD_EX32(desc, MTEDESC, ALIGN); 884 if (unlikely(align)) { 885 align = (1u << align) - 1; 886 if (unlikely(ptr & align)) { 887 int idx = FIELD_EX32(desc, MTEDESC, MIDX); 888 bool w = FIELD_EX32(desc, MTEDESC, WRITE); 889 MMUAccessType type = w ? MMU_DATA_STORE : MMU_DATA_LOAD; 890 arm_cpu_do_unaligned_access(env_cpu(env), ptr, type, idx, GETPC()); 891 } 892 } 893 894 return mte_check(env, desc, ptr, GETPC()); 895 } 896 897 /* 898 * No-fault version of mte_check, to be used by SVE for MemSingleNF. 899 * Returns false if the access is Checked and the check failed. This 900 * is only intended to probe the tag -- the validity of the page must 901 * be checked beforehand. 902 */ 903 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr) 904 { 905 uint64_t fault; 906 int ret = mte_probe_int(env, desc, ptr, 0, &fault); 907 908 return ret != 0; 909 } 910 911 /* 912 * Perform an MTE checked access for DC_ZVA. 913 */ 914 uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr) 915 { 916 uintptr_t ra = GETPC(); 917 int log2_dcz_bytes, log2_tag_bytes; 918 int mmu_idx, bit55; 919 intptr_t dcz_bytes, tag_bytes, i; 920 void *mem; 921 uint64_t ptr_tag, mem_tag, align_ptr; 922 923 bit55 = extract64(ptr, 55, 1); 924 925 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */ 926 if (unlikely(!tbi_check(desc, bit55))) { 927 return ptr; 928 } 929 930 ptr_tag = allocation_tag_from_addr(ptr); 931 932 if (tcma_check(desc, bit55, ptr_tag)) { 933 goto done; 934 } 935 936 /* 937 * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1, 938 * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make 939 * sure that we can access one complete tag byte here. 940 */ 941 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2; 942 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1); 943 dcz_bytes = (intptr_t)1 << log2_dcz_bytes; 944 tag_bytes = (intptr_t)1 << log2_tag_bytes; 945 align_ptr = ptr & -dcz_bytes; 946 947 /* 948 * Trap if accessing an invalid page. DC_ZVA requires that we supply 949 * the original pointer for an invalid page. But watchpoints require 950 * that we probe the actual space. So do both. 951 */ 952 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); 953 (void) probe_write(env, ptr, 1, mmu_idx, ra); 954 mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE, 955 dcz_bytes, MMU_DATA_LOAD, ra); 956 if (!mem) { 957 goto done; 958 } 959 960 /* 961 * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus 962 * it is quite easy to perform all of the comparisons at once without 963 * any extra masking. 964 * 965 * The most common zva block size is 64; some of the thunderx cpus use 966 * a block size of 128. For user-only, aarch64_max_initfn will set the 967 * block size to 512. Fill out the other cases for future-proofing. 968 * 969 * In order to be able to find the first miscompare later, we want the 970 * tag bytes to be in little-endian order. 971 */ 972 switch (log2_tag_bytes) { 973 case 0: /* zva_blocksize 32 */ 974 mem_tag = *(uint8_t *)mem; 975 ptr_tag *= 0x11u; 976 break; 977 case 1: /* zva_blocksize 64 */ 978 mem_tag = cpu_to_le16(*(uint16_t *)mem); 979 ptr_tag *= 0x1111u; 980 break; 981 case 2: /* zva_blocksize 128 */ 982 mem_tag = cpu_to_le32(*(uint32_t *)mem); 983 ptr_tag *= 0x11111111u; 984 break; 985 case 3: /* zva_blocksize 256 */ 986 mem_tag = cpu_to_le64(*(uint64_t *)mem); 987 ptr_tag *= 0x1111111111111111ull; 988 break; 989 990 default: /* zva_blocksize 512, 1024, 2048 */ 991 ptr_tag *= 0x1111111111111111ull; 992 i = 0; 993 do { 994 mem_tag = cpu_to_le64(*(uint64_t *)(mem + i)); 995 if (unlikely(mem_tag != ptr_tag)) { 996 goto fail; 997 } 998 i += 8; 999 align_ptr += 16 * TAG_GRANULE; 1000 } while (i < tag_bytes); 1001 goto done; 1002 } 1003 1004 if (likely(mem_tag == ptr_tag)) { 1005 goto done; 1006 } 1007 1008 fail: 1009 /* Locate the first nibble that differs. */ 1010 i = ctz64(mem_tag ^ ptr_tag) >> 4; 1011 mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra); 1012 1013 done: 1014 return useronly_clean_ptr(ptr); 1015 } 1016 1017 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1018 uint32_t desc) 1019 { 1020 int mmu_idx, tag_count; 1021 uint64_t ptr_tag, tag_first, tag_last; 1022 void *mem; 1023 bool w = FIELD_EX32(desc, MTEDESC, WRITE); 1024 uint32_t n; 1025 1026 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); 1027 /* True probe; this will never fault */ 1028 mem = allocation_tag_mem_probe(env, mmu_idx, ptr, 1029 w ? MMU_DATA_STORE : MMU_DATA_LOAD, 1030 size, MMU_DATA_LOAD, true, 0); 1031 if (!mem) { 1032 return size; 1033 } 1034 1035 /* 1036 * TODO: checkN() is not designed for checks of the size we expect 1037 * for FEAT_MOPS operations, so we should implement this differently. 1038 * Maybe we should do something like 1039 * if (region start and size are aligned nicely) { 1040 * do direct loads of 64 tag bits at a time; 1041 * } else { 1042 * call checkN() 1043 * } 1044 */ 1045 /* Round the bounds to the tag granule, and compute the number of tags. */ 1046 ptr_tag = allocation_tag_from_addr(ptr); 1047 tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE); 1048 tag_last = QEMU_ALIGN_DOWN(ptr + size - 1, TAG_GRANULE); 1049 tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; 1050 n = checkN(mem, ptr & TAG_GRANULE, ptr_tag, tag_count); 1051 if (likely(n == tag_count)) { 1052 return size; 1053 } 1054 1055 /* 1056 * Failure; for the first granule, it's at @ptr. Otherwise 1057 * it's at the first byte of the nth granule. Calculate how 1058 * many bytes we can access without hitting that failure. 1059 */ 1060 if (n == 0) { 1061 return 0; 1062 } else { 1063 return n * TAG_GRANULE - (ptr - tag_first); 1064 } 1065 } 1066 1067 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1068 uint32_t desc) 1069 { 1070 int mmu_idx, tag_count; 1071 uint64_t ptr_tag, tag_first, tag_last; 1072 void *mem; 1073 bool w = FIELD_EX32(desc, MTEDESC, WRITE); 1074 uint32_t n; 1075 1076 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); 1077 /* 1078 * True probe; this will never fault. Note that our caller passes 1079 * us a pointer to the end of the region, but allocation_tag_mem_probe() 1080 * wants a pointer to the start. Because we know we don't span a page 1081 * boundary and that allocation_tag_mem_probe() doesn't otherwise care 1082 * about the size, pass in a size of 1 byte. This is simpler than 1083 * adjusting the ptr to point to the start of the region and then having 1084 * to adjust the returned 'mem' to get the end of the tag memory. 1085 */ 1086 mem = allocation_tag_mem_probe(env, mmu_idx, ptr, 1087 w ? MMU_DATA_STORE : MMU_DATA_LOAD, 1088 1, MMU_DATA_LOAD, true, 0); 1089 if (!mem) { 1090 return size; 1091 } 1092 1093 /* 1094 * TODO: checkNrev() is not designed for checks of the size we expect 1095 * for FEAT_MOPS operations, so we should implement this differently. 1096 * Maybe we should do something like 1097 * if (region start and size are aligned nicely) { 1098 * do direct loads of 64 tag bits at a time; 1099 * } else { 1100 * call checkN() 1101 * } 1102 */ 1103 /* Round the bounds to the tag granule, and compute the number of tags. */ 1104 ptr_tag = allocation_tag_from_addr(ptr); 1105 tag_first = QEMU_ALIGN_DOWN(ptr - (size - 1), TAG_GRANULE); 1106 tag_last = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE); 1107 tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; 1108 n = checkNrev(mem, ptr & TAG_GRANULE, ptr_tag, tag_count); 1109 if (likely(n == tag_count)) { 1110 return size; 1111 } 1112 1113 /* 1114 * Failure; for the first granule, it's at @ptr. Otherwise 1115 * it's at the last byte of the nth granule. Calculate how 1116 * many bytes we can access without hitting that failure. 1117 */ 1118 if (n == 0) { 1119 return 0; 1120 } else { 1121 return (n - 1) * TAG_GRANULE + ((ptr + 1) - tag_last); 1122 } 1123 } 1124 1125 void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size, 1126 uint32_t desc) 1127 { 1128 int mmu_idx, tag_count; 1129 uint64_t ptr_tag; 1130 void *mem; 1131 1132 if (!desc) { 1133 /* Tags not actually enabled */ 1134 return; 1135 } 1136 1137 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); 1138 /* True probe: this will never fault */ 1139 mem = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE, size, 1140 MMU_DATA_STORE, true, 0); 1141 if (!mem) { 1142 return; 1143 } 1144 1145 /* 1146 * We know that ptr and size are both TAG_GRANULE aligned; store 1147 * the tag from the pointer value into the tag memory. 1148 */ 1149 ptr_tag = allocation_tag_from_addr(ptr); 1150 tag_count = size / TAG_GRANULE; 1151 if (ptr & TAG_GRANULE) { 1152 /* Not 2*TAG_GRANULE-aligned: store tag to first nibble */ 1153 store_tag1_parallel(TAG_GRANULE, mem, ptr_tag); 1154 mem++; 1155 tag_count--; 1156 } 1157 memset(mem, ptr_tag | (ptr_tag << 4), tag_count / 2); 1158 if (tag_count & 1) { 1159 /* Final trailing unaligned nibble */ 1160 mem += tag_count / 2; 1161 store_tag1_parallel(0, mem, ptr_tag); 1162 } 1163 } 1164