1 /* 2 * ARM generic helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/units.h" 11 #include "target/arm/idau.h" 12 #include "trace.h" 13 #include "cpu.h" 14 #include "internals.h" 15 #include "exec/gdbstub.h" 16 #include "exec/helper-proto.h" 17 #include "qemu/host-utils.h" 18 #include "qemu/main-loop.h" 19 #include "sysemu/sysemu.h" 20 #include "qemu/bitops.h" 21 #include "qemu/crc32c.h" 22 #include "qemu/qemu-print.h" 23 #include "exec/exec-all.h" 24 #include <zlib.h> /* For crc32 */ 25 #include "hw/irq.h" 26 #include "hw/semihosting/semihost.h" 27 #include "sysemu/cpus.h" 28 #include "sysemu/kvm.h" 29 #include "qemu/range.h" 30 #include "qapi/qapi-commands-machine-target.h" 31 #include "qapi/error.h" 32 #include "qemu/guest-random.h" 33 #ifdef CONFIG_TCG 34 #include "arm_ldst.h" 35 #include "exec/cpu_ldst.h" 36 #endif 37 38 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 39 40 #ifndef CONFIG_USER_ONLY 41 42 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 43 MMUAccessType access_type, ARMMMUIdx mmu_idx, 44 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 45 target_ulong *page_size_ptr, 46 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 47 #endif 48 49 static void switch_mode(CPUARMState *env, int mode); 50 51 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 52 { 53 int nregs; 54 55 /* VFP data registers are always little-endian. */ 56 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 57 if (reg < nregs) { 58 stq_le_p(buf, *aa32_vfp_dreg(env, reg)); 59 return 8; 60 } 61 if (arm_feature(env, ARM_FEATURE_NEON)) { 62 /* Aliases for Q regs. */ 63 nregs += 16; 64 if (reg < nregs) { 65 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 66 stq_le_p(buf, q[0]); 67 stq_le_p(buf + 8, q[1]); 68 return 16; 69 } 70 } 71 switch (reg - nregs) { 72 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4; 73 case 1: stl_p(buf, vfp_get_fpscr(env)); return 4; 74 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4; 75 } 76 return 0; 77 } 78 79 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 80 { 81 int nregs; 82 83 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 84 if (reg < nregs) { 85 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); 86 return 8; 87 } 88 if (arm_feature(env, ARM_FEATURE_NEON)) { 89 nregs += 16; 90 if (reg < nregs) { 91 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 92 q[0] = ldq_le_p(buf); 93 q[1] = ldq_le_p(buf + 8); 94 return 16; 95 } 96 } 97 switch (reg - nregs) { 98 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 99 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4; 100 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; 101 } 102 return 0; 103 } 104 105 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 106 { 107 switch (reg) { 108 case 0 ... 31: 109 /* 128 bit FP register */ 110 { 111 uint64_t *q = aa64_vfp_qreg(env, reg); 112 stq_le_p(buf, q[0]); 113 stq_le_p(buf + 8, q[1]); 114 return 16; 115 } 116 case 32: 117 /* FPSR */ 118 stl_p(buf, vfp_get_fpsr(env)); 119 return 4; 120 case 33: 121 /* FPCR */ 122 stl_p(buf, vfp_get_fpcr(env)); 123 return 4; 124 default: 125 return 0; 126 } 127 } 128 129 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 130 { 131 switch (reg) { 132 case 0 ... 31: 133 /* 128 bit FP register */ 134 { 135 uint64_t *q = aa64_vfp_qreg(env, reg); 136 q[0] = ldq_le_p(buf); 137 q[1] = ldq_le_p(buf + 8); 138 return 16; 139 } 140 case 32: 141 /* FPSR */ 142 vfp_set_fpsr(env, ldl_p(buf)); 143 return 4; 144 case 33: 145 /* FPCR */ 146 vfp_set_fpcr(env, ldl_p(buf)); 147 return 4; 148 default: 149 return 0; 150 } 151 } 152 153 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 154 { 155 assert(ri->fieldoffset); 156 if (cpreg_field_is_64bit(ri)) { 157 return CPREG_FIELD64(env, ri); 158 } else { 159 return CPREG_FIELD32(env, ri); 160 } 161 } 162 163 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 164 uint64_t value) 165 { 166 assert(ri->fieldoffset); 167 if (cpreg_field_is_64bit(ri)) { 168 CPREG_FIELD64(env, ri) = value; 169 } else { 170 CPREG_FIELD32(env, ri) = value; 171 } 172 } 173 174 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 175 { 176 return (char *)env + ri->fieldoffset; 177 } 178 179 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 180 { 181 /* Raw read of a coprocessor register (as needed for migration, etc). */ 182 if (ri->type & ARM_CP_CONST) { 183 return ri->resetvalue; 184 } else if (ri->raw_readfn) { 185 return ri->raw_readfn(env, ri); 186 } else if (ri->readfn) { 187 return ri->readfn(env, ri); 188 } else { 189 return raw_read(env, ri); 190 } 191 } 192 193 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 194 uint64_t v) 195 { 196 /* Raw write of a coprocessor register (as needed for migration, etc). 197 * Note that constant registers are treated as write-ignored; the 198 * caller should check for success by whether a readback gives the 199 * value written. 200 */ 201 if (ri->type & ARM_CP_CONST) { 202 return; 203 } else if (ri->raw_writefn) { 204 ri->raw_writefn(env, ri, v); 205 } else if (ri->writefn) { 206 ri->writefn(env, ri, v); 207 } else { 208 raw_write(env, ri, v); 209 } 210 } 211 212 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg) 213 { 214 ARMCPU *cpu = env_archcpu(env); 215 const ARMCPRegInfo *ri; 216 uint32_t key; 217 218 key = cpu->dyn_xml.cpregs_keys[reg]; 219 ri = get_arm_cp_reginfo(cpu->cp_regs, key); 220 if (ri) { 221 if (cpreg_field_is_64bit(ri)) { 222 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); 223 } else { 224 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); 225 } 226 } 227 return 0; 228 } 229 230 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) 231 { 232 return 0; 233 } 234 235 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 236 { 237 /* Return true if the regdef would cause an assertion if you called 238 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 239 * program bug for it not to have the NO_RAW flag). 240 * NB that returning false here doesn't necessarily mean that calling 241 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 242 * read/write access functions which are safe for raw use" from "has 243 * read/write access functions which have side effects but has forgotten 244 * to provide raw access functions". 245 * The tests here line up with the conditions in read/write_raw_cp_reg() 246 * and assertions in raw_read()/raw_write(). 247 */ 248 if ((ri->type & ARM_CP_CONST) || 249 ri->fieldoffset || 250 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 251 return false; 252 } 253 return true; 254 } 255 256 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) 257 { 258 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 259 int i; 260 bool ok = true; 261 262 for (i = 0; i < cpu->cpreg_array_len; i++) { 263 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 264 const ARMCPRegInfo *ri; 265 uint64_t newval; 266 267 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 268 if (!ri) { 269 ok = false; 270 continue; 271 } 272 if (ri->type & ARM_CP_NO_RAW) { 273 continue; 274 } 275 276 newval = read_raw_cp_reg(&cpu->env, ri); 277 if (kvm_sync) { 278 /* 279 * Only sync if the previous list->cpustate sync succeeded. 280 * Rather than tracking the success/failure state for every 281 * item in the list, we just recheck "does the raw write we must 282 * have made in write_list_to_cpustate() read back OK" here. 283 */ 284 uint64_t oldval = cpu->cpreg_values[i]; 285 286 if (oldval == newval) { 287 continue; 288 } 289 290 write_raw_cp_reg(&cpu->env, ri, oldval); 291 if (read_raw_cp_reg(&cpu->env, ri) != oldval) { 292 continue; 293 } 294 295 write_raw_cp_reg(&cpu->env, ri, newval); 296 } 297 cpu->cpreg_values[i] = newval; 298 } 299 return ok; 300 } 301 302 bool write_list_to_cpustate(ARMCPU *cpu) 303 { 304 int i; 305 bool ok = true; 306 307 for (i = 0; i < cpu->cpreg_array_len; i++) { 308 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 309 uint64_t v = cpu->cpreg_values[i]; 310 const ARMCPRegInfo *ri; 311 312 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 313 if (!ri) { 314 ok = false; 315 continue; 316 } 317 if (ri->type & ARM_CP_NO_RAW) { 318 continue; 319 } 320 /* Write value and confirm it reads back as written 321 * (to catch read-only registers and partially read-only 322 * registers where the incoming migration value doesn't match) 323 */ 324 write_raw_cp_reg(&cpu->env, ri, v); 325 if (read_raw_cp_reg(&cpu->env, ri) != v) { 326 ok = false; 327 } 328 } 329 return ok; 330 } 331 332 static void add_cpreg_to_list(gpointer key, gpointer opaque) 333 { 334 ARMCPU *cpu = opaque; 335 uint64_t regidx; 336 const ARMCPRegInfo *ri; 337 338 regidx = *(uint32_t *)key; 339 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 340 341 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 342 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 343 /* The value array need not be initialized at this point */ 344 cpu->cpreg_array_len++; 345 } 346 } 347 348 static void count_cpreg(gpointer key, gpointer opaque) 349 { 350 ARMCPU *cpu = opaque; 351 uint64_t regidx; 352 const ARMCPRegInfo *ri; 353 354 regidx = *(uint32_t *)key; 355 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 356 357 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 358 cpu->cpreg_array_len++; 359 } 360 } 361 362 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 363 { 364 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); 365 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); 366 367 if (aidx > bidx) { 368 return 1; 369 } 370 if (aidx < bidx) { 371 return -1; 372 } 373 return 0; 374 } 375 376 void init_cpreg_list(ARMCPU *cpu) 377 { 378 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 379 * Note that we require cpreg_tuples[] to be sorted by key ID. 380 */ 381 GList *keys; 382 int arraylen; 383 384 keys = g_hash_table_get_keys(cpu->cp_regs); 385 keys = g_list_sort(keys, cpreg_key_compare); 386 387 cpu->cpreg_array_len = 0; 388 389 g_list_foreach(keys, count_cpreg, cpu); 390 391 arraylen = cpu->cpreg_array_len; 392 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 393 cpu->cpreg_values = g_new(uint64_t, arraylen); 394 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 395 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 396 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 397 cpu->cpreg_array_len = 0; 398 399 g_list_foreach(keys, add_cpreg_to_list, cpu); 400 401 assert(cpu->cpreg_array_len == arraylen); 402 403 g_list_free(keys); 404 } 405 406 /* 407 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but 408 * they are accessible when EL3 is using AArch64 regardless of EL3.NS. 409 * 410 * access_el3_aa32ns: Used to check AArch32 register views. 411 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. 412 */ 413 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 414 const ARMCPRegInfo *ri, 415 bool isread) 416 { 417 bool secure = arm_is_secure_below_el3(env); 418 419 assert(!arm_el_is_aa64(env, 3)); 420 if (secure) { 421 return CP_ACCESS_TRAP_UNCATEGORIZED; 422 } 423 return CP_ACCESS_OK; 424 } 425 426 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, 427 const ARMCPRegInfo *ri, 428 bool isread) 429 { 430 if (!arm_el_is_aa64(env, 3)) { 431 return access_el3_aa32ns(env, ri, isread); 432 } 433 return CP_ACCESS_OK; 434 } 435 436 /* Some secure-only AArch32 registers trap to EL3 if used from 437 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 438 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 439 * We assume that the .access field is set to PL1_RW. 440 */ 441 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 442 const ARMCPRegInfo *ri, 443 bool isread) 444 { 445 if (arm_current_el(env) == 3) { 446 return CP_ACCESS_OK; 447 } 448 if (arm_is_secure_below_el3(env)) { 449 return CP_ACCESS_TRAP_EL3; 450 } 451 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 452 return CP_ACCESS_TRAP_UNCATEGORIZED; 453 } 454 455 /* Check for traps to "powerdown debug" registers, which are controlled 456 * by MDCR.TDOSA 457 */ 458 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 459 bool isread) 460 { 461 int el = arm_current_el(env); 462 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) || 463 (env->cp15.mdcr_el2 & MDCR_TDE) || 464 (arm_hcr_el2_eff(env) & HCR_TGE); 465 466 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) { 467 return CP_ACCESS_TRAP_EL2; 468 } 469 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 470 return CP_ACCESS_TRAP_EL3; 471 } 472 return CP_ACCESS_OK; 473 } 474 475 /* Check for traps to "debug ROM" registers, which are controlled 476 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 477 */ 478 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 479 bool isread) 480 { 481 int el = arm_current_el(env); 482 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) || 483 (env->cp15.mdcr_el2 & MDCR_TDE) || 484 (arm_hcr_el2_eff(env) & HCR_TGE); 485 486 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) { 487 return CP_ACCESS_TRAP_EL2; 488 } 489 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 490 return CP_ACCESS_TRAP_EL3; 491 } 492 return CP_ACCESS_OK; 493 } 494 495 /* Check for traps to general debug registers, which are controlled 496 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 497 */ 498 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 499 bool isread) 500 { 501 int el = arm_current_el(env); 502 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) || 503 (env->cp15.mdcr_el2 & MDCR_TDE) || 504 (arm_hcr_el2_eff(env) & HCR_TGE); 505 506 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) { 507 return CP_ACCESS_TRAP_EL2; 508 } 509 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 510 return CP_ACCESS_TRAP_EL3; 511 } 512 return CP_ACCESS_OK; 513 } 514 515 /* Check for traps to performance monitor registers, which are controlled 516 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 517 */ 518 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 519 bool isread) 520 { 521 int el = arm_current_el(env); 522 523 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 524 && !arm_is_secure_below_el3(env)) { 525 return CP_ACCESS_TRAP_EL2; 526 } 527 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 528 return CP_ACCESS_TRAP_EL3; 529 } 530 return CP_ACCESS_OK; 531 } 532 533 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 534 { 535 ARMCPU *cpu = env_archcpu(env); 536 537 raw_write(env, ri, value); 538 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 539 } 540 541 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 542 { 543 ARMCPU *cpu = env_archcpu(env); 544 545 if (raw_read(env, ri) != value) { 546 /* Unlike real hardware the qemu TLB uses virtual addresses, 547 * not modified virtual addresses, so this causes a TLB flush. 548 */ 549 tlb_flush(CPU(cpu)); 550 raw_write(env, ri, value); 551 } 552 } 553 554 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 555 uint64_t value) 556 { 557 ARMCPU *cpu = env_archcpu(env); 558 559 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 560 && !extended_addresses_enabled(env)) { 561 /* For VMSA (when not using the LPAE long descriptor page table 562 * format) this register includes the ASID, so do a TLB flush. 563 * For PMSA it is purely a process ID and no action is needed. 564 */ 565 tlb_flush(CPU(cpu)); 566 } 567 raw_write(env, ri, value); 568 } 569 570 /* IS variants of TLB operations must affect all cores */ 571 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 572 uint64_t value) 573 { 574 CPUState *cs = env_cpu(env); 575 576 tlb_flush_all_cpus_synced(cs); 577 } 578 579 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 580 uint64_t value) 581 { 582 CPUState *cs = env_cpu(env); 583 584 tlb_flush_all_cpus_synced(cs); 585 } 586 587 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 588 uint64_t value) 589 { 590 CPUState *cs = env_cpu(env); 591 592 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 593 } 594 595 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 596 uint64_t value) 597 { 598 CPUState *cs = env_cpu(env); 599 600 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 601 } 602 603 /* 604 * Non-IS variants of TLB operations are upgraded to 605 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to 606 * force broadcast of these operations. 607 */ 608 static bool tlb_force_broadcast(CPUARMState *env) 609 { 610 return (env->cp15.hcr_el2 & HCR_FB) && 611 arm_current_el(env) == 1 && arm_is_secure_below_el3(env); 612 } 613 614 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 615 uint64_t value) 616 { 617 /* Invalidate all (TLBIALL) */ 618 ARMCPU *cpu = env_archcpu(env); 619 620 if (tlb_force_broadcast(env)) { 621 tlbiall_is_write(env, NULL, value); 622 return; 623 } 624 625 tlb_flush(CPU(cpu)); 626 } 627 628 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 629 uint64_t value) 630 { 631 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 632 ARMCPU *cpu = env_archcpu(env); 633 634 if (tlb_force_broadcast(env)) { 635 tlbimva_is_write(env, NULL, value); 636 return; 637 } 638 639 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 640 } 641 642 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 643 uint64_t value) 644 { 645 /* Invalidate by ASID (TLBIASID) */ 646 ARMCPU *cpu = env_archcpu(env); 647 648 if (tlb_force_broadcast(env)) { 649 tlbiasid_is_write(env, NULL, value); 650 return; 651 } 652 653 tlb_flush(CPU(cpu)); 654 } 655 656 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 657 uint64_t value) 658 { 659 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 660 ARMCPU *cpu = env_archcpu(env); 661 662 if (tlb_force_broadcast(env)) { 663 tlbimvaa_is_write(env, NULL, value); 664 return; 665 } 666 667 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 668 } 669 670 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 671 uint64_t value) 672 { 673 CPUState *cs = env_cpu(env); 674 675 tlb_flush_by_mmuidx(cs, 676 ARMMMUIdxBit_S12NSE1 | 677 ARMMMUIdxBit_S12NSE0 | 678 ARMMMUIdxBit_S2NS); 679 } 680 681 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 682 uint64_t value) 683 { 684 CPUState *cs = env_cpu(env); 685 686 tlb_flush_by_mmuidx_all_cpus_synced(cs, 687 ARMMMUIdxBit_S12NSE1 | 688 ARMMMUIdxBit_S12NSE0 | 689 ARMMMUIdxBit_S2NS); 690 } 691 692 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, 693 uint64_t value) 694 { 695 /* Invalidate by IPA. This has to invalidate any structures that 696 * contain only stage 2 translation information, but does not need 697 * to apply to structures that contain combined stage 1 and stage 2 698 * translation information. 699 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 700 */ 701 CPUState *cs = env_cpu(env); 702 uint64_t pageaddr; 703 704 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 705 return; 706 } 707 708 pageaddr = sextract64(value << 12, 0, 40); 709 710 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 711 } 712 713 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 714 uint64_t value) 715 { 716 CPUState *cs = env_cpu(env); 717 uint64_t pageaddr; 718 719 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 720 return; 721 } 722 723 pageaddr = sextract64(value << 12, 0, 40); 724 725 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 726 ARMMMUIdxBit_S2NS); 727 } 728 729 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 730 uint64_t value) 731 { 732 CPUState *cs = env_cpu(env); 733 734 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 735 } 736 737 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 738 uint64_t value) 739 { 740 CPUState *cs = env_cpu(env); 741 742 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 743 } 744 745 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 746 uint64_t value) 747 { 748 CPUState *cs = env_cpu(env); 749 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 750 751 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 752 } 753 754 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 755 uint64_t value) 756 { 757 CPUState *cs = env_cpu(env); 758 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 759 760 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 761 ARMMMUIdxBit_S1E2); 762 } 763 764 static const ARMCPRegInfo cp_reginfo[] = { 765 /* Define the secure and non-secure FCSE identifier CP registers 766 * separately because there is no secure bank in V8 (no _EL3). This allows 767 * the secure register to be properly reset and migrated. There is also no 768 * v8 EL1 version of the register so the non-secure instance stands alone. 769 */ 770 { .name = "FCSEIDR", 771 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 772 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 773 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 774 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 775 { .name = "FCSEIDR_S", 776 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 777 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 778 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 779 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 780 /* Define the secure and non-secure context identifier CP registers 781 * separately because there is no secure bank in V8 (no _EL3). This allows 782 * the secure register to be properly reset and migrated. In the 783 * non-secure case, the 32-bit register will have reset and migration 784 * disabled during registration as it is handled by the 64-bit instance. 785 */ 786 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 787 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 788 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 789 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 790 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 791 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 792 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 793 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 794 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 795 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 796 REGINFO_SENTINEL 797 }; 798 799 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 800 /* NB: Some of these registers exist in v8 but with more precise 801 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 802 */ 803 /* MMU Domain access control / MPU write buffer control */ 804 { .name = "DACR", 805 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 806 .access = PL1_RW, .resetvalue = 0, 807 .writefn = dacr_write, .raw_writefn = raw_write, 808 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 809 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 810 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 811 * For v6 and v5, these mappings are overly broad. 812 */ 813 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 814 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 815 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 816 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 817 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 818 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 819 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 820 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 821 /* Cache maintenance ops; some of this space may be overridden later. */ 822 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 823 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 824 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 825 REGINFO_SENTINEL 826 }; 827 828 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 829 /* Not all pre-v6 cores implemented this WFI, so this is slightly 830 * over-broad. 831 */ 832 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 833 .access = PL1_W, .type = ARM_CP_WFI }, 834 REGINFO_SENTINEL 835 }; 836 837 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 838 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 839 * is UNPREDICTABLE; we choose to NOP as most implementations do). 840 */ 841 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 842 .access = PL1_W, .type = ARM_CP_WFI }, 843 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 844 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 845 * OMAPCP will override this space. 846 */ 847 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 848 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 849 .resetvalue = 0 }, 850 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 851 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 852 .resetvalue = 0 }, 853 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 854 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 855 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 856 .resetvalue = 0 }, 857 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 858 * implementing it as RAZ means the "debug architecture version" bits 859 * will read as a reserved value, which should cause Linux to not try 860 * to use the debug hardware. 861 */ 862 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 863 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 864 /* MMU TLB control. Note that the wildcarding means we cover not just 865 * the unified TLB ops but also the dside/iside/inner-shareable variants. 866 */ 867 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 868 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 869 .type = ARM_CP_NO_RAW }, 870 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 871 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 872 .type = ARM_CP_NO_RAW }, 873 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 874 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 875 .type = ARM_CP_NO_RAW }, 876 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 877 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 878 .type = ARM_CP_NO_RAW }, 879 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 880 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 881 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 882 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 883 REGINFO_SENTINEL 884 }; 885 886 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 887 uint64_t value) 888 { 889 uint32_t mask = 0; 890 891 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 892 if (!arm_feature(env, ARM_FEATURE_V8)) { 893 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 894 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 895 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 896 */ 897 if (arm_feature(env, ARM_FEATURE_VFP)) { 898 /* VFP coprocessor: cp10 & cp11 [23:20] */ 899 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 900 901 if (!arm_feature(env, ARM_FEATURE_NEON)) { 902 /* ASEDIS [31] bit is RAO/WI */ 903 value |= (1 << 31); 904 } 905 906 /* VFPv3 and upwards with NEON implement 32 double precision 907 * registers (D0-D31). 908 */ 909 if (!arm_feature(env, ARM_FEATURE_NEON) || 910 !arm_feature(env, ARM_FEATURE_VFP3)) { 911 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 912 value |= (1 << 30); 913 } 914 } 915 value &= mask; 916 } 917 918 /* 919 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 920 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 921 */ 922 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 923 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 924 value &= ~(0xf << 20); 925 value |= env->cp15.cpacr_el1 & (0xf << 20); 926 } 927 928 env->cp15.cpacr_el1 = value; 929 } 930 931 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) 932 { 933 /* 934 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 935 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 936 */ 937 uint64_t value = env->cp15.cpacr_el1; 938 939 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 940 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 941 value &= ~(0xf << 20); 942 } 943 return value; 944 } 945 946 947 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 948 { 949 /* Call cpacr_write() so that we reset with the correct RAO bits set 950 * for our CPU features. 951 */ 952 cpacr_write(env, ri, 0); 953 } 954 955 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 956 bool isread) 957 { 958 if (arm_feature(env, ARM_FEATURE_V8)) { 959 /* Check if CPACR accesses are to be trapped to EL2 */ 960 if (arm_current_el(env) == 1 && 961 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { 962 return CP_ACCESS_TRAP_EL2; 963 /* Check if CPACR accesses are to be trapped to EL3 */ 964 } else if (arm_current_el(env) < 3 && 965 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 966 return CP_ACCESS_TRAP_EL3; 967 } 968 } 969 970 return CP_ACCESS_OK; 971 } 972 973 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 974 bool isread) 975 { 976 /* Check if CPTR accesses are set to trap to EL3 */ 977 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 978 return CP_ACCESS_TRAP_EL3; 979 } 980 981 return CP_ACCESS_OK; 982 } 983 984 static const ARMCPRegInfo v6_cp_reginfo[] = { 985 /* prefetch by MVA in v6, NOP in v7 */ 986 { .name = "MVA_prefetch", 987 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 988 .access = PL1_W, .type = ARM_CP_NOP }, 989 /* We need to break the TB after ISB to execute self-modifying code 990 * correctly and also to take any pending interrupts immediately. 991 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 992 */ 993 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 994 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 995 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 996 .access = PL0_W, .type = ARM_CP_NOP }, 997 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 998 .access = PL0_W, .type = ARM_CP_NOP }, 999 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 1000 .access = PL1_RW, 1001 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 1002 offsetof(CPUARMState, cp15.ifar_ns) }, 1003 .resetvalue = 0, }, 1004 /* Watchpoint Fault Address Register : should actually only be present 1005 * for 1136, 1176, 11MPCore. 1006 */ 1007 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 1008 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 1009 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 1010 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 1011 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 1012 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, 1013 REGINFO_SENTINEL 1014 }; 1015 1016 /* Definitions for the PMU registers */ 1017 #define PMCRN_MASK 0xf800 1018 #define PMCRN_SHIFT 11 1019 #define PMCRLC 0x40 1020 #define PMCRDP 0x10 1021 #define PMCRD 0x8 1022 #define PMCRC 0x4 1023 #define PMCRP 0x2 1024 #define PMCRE 0x1 1025 1026 #define PMXEVTYPER_P 0x80000000 1027 #define PMXEVTYPER_U 0x40000000 1028 #define PMXEVTYPER_NSK 0x20000000 1029 #define PMXEVTYPER_NSU 0x10000000 1030 #define PMXEVTYPER_NSH 0x08000000 1031 #define PMXEVTYPER_M 0x04000000 1032 #define PMXEVTYPER_MT 0x02000000 1033 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1034 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1035 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1036 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1037 PMXEVTYPER_EVTCOUNT) 1038 1039 #define PMCCFILTR 0xf8000000 1040 #define PMCCFILTR_M PMXEVTYPER_M 1041 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1042 1043 static inline uint32_t pmu_num_counters(CPUARMState *env) 1044 { 1045 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; 1046 } 1047 1048 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1049 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1050 { 1051 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); 1052 } 1053 1054 typedef struct pm_event { 1055 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ 1056 /* If the event is supported on this CPU (used to generate PMCEID[01]) */ 1057 bool (*supported)(CPUARMState *); 1058 /* 1059 * Retrieve the current count of the underlying event. The programmed 1060 * counters hold a difference from the return value from this function 1061 */ 1062 uint64_t (*get_count)(CPUARMState *); 1063 /* 1064 * Return how many nanoseconds it will take (at a minimum) for count events 1065 * to occur. A negative value indicates the counter will never overflow, or 1066 * that the counter has otherwise arranged for the overflow bit to be set 1067 * and the PMU interrupt to be raised on overflow. 1068 */ 1069 int64_t (*ns_per_count)(uint64_t); 1070 } pm_event; 1071 1072 static bool event_always_supported(CPUARMState *env) 1073 { 1074 return true; 1075 } 1076 1077 static uint64_t swinc_get_count(CPUARMState *env) 1078 { 1079 /* 1080 * SW_INCR events are written directly to the pmevcntr's by writes to 1081 * PMSWINC, so there is no underlying count maintained by the PMU itself 1082 */ 1083 return 0; 1084 } 1085 1086 static int64_t swinc_ns_per(uint64_t ignored) 1087 { 1088 return -1; 1089 } 1090 1091 /* 1092 * Return the underlying cycle count for the PMU cycle counters. If we're in 1093 * usermode, simply return 0. 1094 */ 1095 static uint64_t cycles_get_count(CPUARMState *env) 1096 { 1097 #ifndef CONFIG_USER_ONLY 1098 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1099 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1100 #else 1101 return cpu_get_host_ticks(); 1102 #endif 1103 } 1104 1105 #ifndef CONFIG_USER_ONLY 1106 static int64_t cycles_ns_per(uint64_t cycles) 1107 { 1108 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; 1109 } 1110 1111 static bool instructions_supported(CPUARMState *env) 1112 { 1113 return use_icount == 1 /* Precise instruction counting */; 1114 } 1115 1116 static uint64_t instructions_get_count(CPUARMState *env) 1117 { 1118 return (uint64_t)cpu_get_icount_raw(); 1119 } 1120 1121 static int64_t instructions_ns_per(uint64_t icount) 1122 { 1123 return cpu_icount_to_ns((int64_t)icount); 1124 } 1125 #endif 1126 1127 static const pm_event pm_events[] = { 1128 { .number = 0x000, /* SW_INCR */ 1129 .supported = event_always_supported, 1130 .get_count = swinc_get_count, 1131 .ns_per_count = swinc_ns_per, 1132 }, 1133 #ifndef CONFIG_USER_ONLY 1134 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ 1135 .supported = instructions_supported, 1136 .get_count = instructions_get_count, 1137 .ns_per_count = instructions_ns_per, 1138 }, 1139 { .number = 0x011, /* CPU_CYCLES, Cycle */ 1140 .supported = event_always_supported, 1141 .get_count = cycles_get_count, 1142 .ns_per_count = cycles_ns_per, 1143 } 1144 #endif 1145 }; 1146 1147 /* 1148 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of 1149 * events (i.e. the statistical profiling extension), this implementation 1150 * should first be updated to something sparse instead of the current 1151 * supported_event_map[] array. 1152 */ 1153 #define MAX_EVENT_ID 0x11 1154 #define UNSUPPORTED_EVENT UINT16_MAX 1155 static uint16_t supported_event_map[MAX_EVENT_ID + 1]; 1156 1157 /* 1158 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map 1159 * of ARM event numbers to indices in our pm_events array. 1160 * 1161 * Note: Events in the 0x40XX range are not currently supported. 1162 */ 1163 void pmu_init(ARMCPU *cpu) 1164 { 1165 unsigned int i; 1166 1167 /* 1168 * Empty supported_event_map and cpu->pmceid[01] before adding supported 1169 * events to them 1170 */ 1171 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { 1172 supported_event_map[i] = UNSUPPORTED_EVENT; 1173 } 1174 cpu->pmceid0 = 0; 1175 cpu->pmceid1 = 0; 1176 1177 for (i = 0; i < ARRAY_SIZE(pm_events); i++) { 1178 const pm_event *cnt = &pm_events[i]; 1179 assert(cnt->number <= MAX_EVENT_ID); 1180 /* We do not currently support events in the 0x40xx range */ 1181 assert(cnt->number <= 0x3f); 1182 1183 if (cnt->supported(&cpu->env)) { 1184 supported_event_map[cnt->number] = i; 1185 uint64_t event_mask = 1ULL << (cnt->number & 0x1f); 1186 if (cnt->number & 0x20) { 1187 cpu->pmceid1 |= event_mask; 1188 } else { 1189 cpu->pmceid0 |= event_mask; 1190 } 1191 } 1192 } 1193 } 1194 1195 /* 1196 * Check at runtime whether a PMU event is supported for the current machine 1197 */ 1198 static bool event_supported(uint16_t number) 1199 { 1200 if (number > MAX_EVENT_ID) { 1201 return false; 1202 } 1203 return supported_event_map[number] != UNSUPPORTED_EVENT; 1204 } 1205 1206 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 1207 bool isread) 1208 { 1209 /* Performance monitor registers user accessibility is controlled 1210 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 1211 * trapping to EL2 or EL3 for other accesses. 1212 */ 1213 int el = arm_current_el(env); 1214 1215 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 1216 return CP_ACCESS_TRAP; 1217 } 1218 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 1219 && !arm_is_secure_below_el3(env)) { 1220 return CP_ACCESS_TRAP_EL2; 1221 } 1222 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 1223 return CP_ACCESS_TRAP_EL3; 1224 } 1225 1226 return CP_ACCESS_OK; 1227 } 1228 1229 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 1230 const ARMCPRegInfo *ri, 1231 bool isread) 1232 { 1233 /* ER: event counter read trap control */ 1234 if (arm_feature(env, ARM_FEATURE_V8) 1235 && arm_current_el(env) == 0 1236 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 1237 && isread) { 1238 return CP_ACCESS_OK; 1239 } 1240 1241 return pmreg_access(env, ri, isread); 1242 } 1243 1244 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 1245 const ARMCPRegInfo *ri, 1246 bool isread) 1247 { 1248 /* SW: software increment write trap control */ 1249 if (arm_feature(env, ARM_FEATURE_V8) 1250 && arm_current_el(env) == 0 1251 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 1252 && !isread) { 1253 return CP_ACCESS_OK; 1254 } 1255 1256 return pmreg_access(env, ri, isread); 1257 } 1258 1259 static CPAccessResult pmreg_access_selr(CPUARMState *env, 1260 const ARMCPRegInfo *ri, 1261 bool isread) 1262 { 1263 /* ER: event counter read trap control */ 1264 if (arm_feature(env, ARM_FEATURE_V8) 1265 && arm_current_el(env) == 0 1266 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 1267 return CP_ACCESS_OK; 1268 } 1269 1270 return pmreg_access(env, ri, isread); 1271 } 1272 1273 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 1274 const ARMCPRegInfo *ri, 1275 bool isread) 1276 { 1277 /* CR: cycle counter read trap control */ 1278 if (arm_feature(env, ARM_FEATURE_V8) 1279 && arm_current_el(env) == 0 1280 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 1281 && isread) { 1282 return CP_ACCESS_OK; 1283 } 1284 1285 return pmreg_access(env, ri, isread); 1286 } 1287 1288 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using 1289 * the current EL, security state, and register configuration. 1290 */ 1291 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) 1292 { 1293 uint64_t filter; 1294 bool e, p, u, nsk, nsu, nsh, m; 1295 bool enabled, prohibited, filtered; 1296 bool secure = arm_is_secure(env); 1297 int el = arm_current_el(env); 1298 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; 1299 1300 if (!arm_feature(env, ARM_FEATURE_PMU)) { 1301 return false; 1302 } 1303 1304 if (!arm_feature(env, ARM_FEATURE_EL2) || 1305 (counter < hpmn || counter == 31)) { 1306 e = env->cp15.c9_pmcr & PMCRE; 1307 } else { 1308 e = env->cp15.mdcr_el2 & MDCR_HPME; 1309 } 1310 enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); 1311 1312 if (!secure) { 1313 if (el == 2 && (counter < hpmn || counter == 31)) { 1314 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD; 1315 } else { 1316 prohibited = false; 1317 } 1318 } else { 1319 prohibited = arm_feature(env, ARM_FEATURE_EL3) && 1320 (env->cp15.mdcr_el3 & MDCR_SPME); 1321 } 1322 1323 if (prohibited && counter == 31) { 1324 prohibited = env->cp15.c9_pmcr & PMCRDP; 1325 } 1326 1327 if (counter == 31) { 1328 filter = env->cp15.pmccfiltr_el0; 1329 } else { 1330 filter = env->cp15.c14_pmevtyper[counter]; 1331 } 1332 1333 p = filter & PMXEVTYPER_P; 1334 u = filter & PMXEVTYPER_U; 1335 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); 1336 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); 1337 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); 1338 m = arm_el_is_aa64(env, 1) && 1339 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); 1340 1341 if (el == 0) { 1342 filtered = secure ? u : u != nsu; 1343 } else if (el == 1) { 1344 filtered = secure ? p : p != nsk; 1345 } else if (el == 2) { 1346 filtered = !nsh; 1347 } else { /* EL3 */ 1348 filtered = m != p; 1349 } 1350 1351 if (counter != 31) { 1352 /* 1353 * If not checking PMCCNTR, ensure the counter is setup to an event we 1354 * support 1355 */ 1356 uint16_t event = filter & PMXEVTYPER_EVTCOUNT; 1357 if (!event_supported(event)) { 1358 return false; 1359 } 1360 } 1361 1362 return enabled && !prohibited && !filtered; 1363 } 1364 1365 static void pmu_update_irq(CPUARMState *env) 1366 { 1367 ARMCPU *cpu = env_archcpu(env); 1368 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && 1369 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); 1370 } 1371 1372 /* 1373 * Ensure c15_ccnt is the guest-visible count so that operations such as 1374 * enabling/disabling the counter or filtering, modifying the count itself, 1375 * etc. can be done logically. This is essentially a no-op if the counter is 1376 * not enabled at the time of the call. 1377 */ 1378 static void pmccntr_op_start(CPUARMState *env) 1379 { 1380 uint64_t cycles = cycles_get_count(env); 1381 1382 if (pmu_counter_enabled(env, 31)) { 1383 uint64_t eff_cycles = cycles; 1384 if (env->cp15.c9_pmcr & PMCRD) { 1385 /* Increment once every 64 processor clock cycles */ 1386 eff_cycles /= 64; 1387 } 1388 1389 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; 1390 1391 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ 1392 1ull << 63 : 1ull << 31; 1393 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { 1394 env->cp15.c9_pmovsr |= (1 << 31); 1395 pmu_update_irq(env); 1396 } 1397 1398 env->cp15.c15_ccnt = new_pmccntr; 1399 } 1400 env->cp15.c15_ccnt_delta = cycles; 1401 } 1402 1403 /* 1404 * If PMCCNTR is enabled, recalculate the delta between the clock and the 1405 * guest-visible count. A call to pmccntr_op_finish should follow every call to 1406 * pmccntr_op_start. 1407 */ 1408 static void pmccntr_op_finish(CPUARMState *env) 1409 { 1410 if (pmu_counter_enabled(env, 31)) { 1411 #ifndef CONFIG_USER_ONLY 1412 /* Calculate when the counter will next overflow */ 1413 uint64_t remaining_cycles = -env->cp15.c15_ccnt; 1414 if (!(env->cp15.c9_pmcr & PMCRLC)) { 1415 remaining_cycles = (uint32_t)remaining_cycles; 1416 } 1417 int64_t overflow_in = cycles_ns_per(remaining_cycles); 1418 1419 if (overflow_in > 0) { 1420 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1421 overflow_in; 1422 ARMCPU *cpu = env_archcpu(env); 1423 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1424 } 1425 #endif 1426 1427 uint64_t prev_cycles = env->cp15.c15_ccnt_delta; 1428 if (env->cp15.c9_pmcr & PMCRD) { 1429 /* Increment once every 64 processor clock cycles */ 1430 prev_cycles /= 64; 1431 } 1432 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; 1433 } 1434 } 1435 1436 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) 1437 { 1438 1439 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1440 uint64_t count = 0; 1441 if (event_supported(event)) { 1442 uint16_t event_idx = supported_event_map[event]; 1443 count = pm_events[event_idx].get_count(env); 1444 } 1445 1446 if (pmu_counter_enabled(env, counter)) { 1447 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; 1448 1449 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) { 1450 env->cp15.c9_pmovsr |= (1 << counter); 1451 pmu_update_irq(env); 1452 } 1453 env->cp15.c14_pmevcntr[counter] = new_pmevcntr; 1454 } 1455 env->cp15.c14_pmevcntr_delta[counter] = count; 1456 } 1457 1458 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) 1459 { 1460 if (pmu_counter_enabled(env, counter)) { 1461 #ifndef CONFIG_USER_ONLY 1462 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1463 uint16_t event_idx = supported_event_map[event]; 1464 uint64_t delta = UINT32_MAX - 1465 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1; 1466 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); 1467 1468 if (overflow_in > 0) { 1469 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1470 overflow_in; 1471 ARMCPU *cpu = env_archcpu(env); 1472 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1473 } 1474 #endif 1475 1476 env->cp15.c14_pmevcntr_delta[counter] -= 1477 env->cp15.c14_pmevcntr[counter]; 1478 } 1479 } 1480 1481 void pmu_op_start(CPUARMState *env) 1482 { 1483 unsigned int i; 1484 pmccntr_op_start(env); 1485 for (i = 0; i < pmu_num_counters(env); i++) { 1486 pmevcntr_op_start(env, i); 1487 } 1488 } 1489 1490 void pmu_op_finish(CPUARMState *env) 1491 { 1492 unsigned int i; 1493 pmccntr_op_finish(env); 1494 for (i = 0; i < pmu_num_counters(env); i++) { 1495 pmevcntr_op_finish(env, i); 1496 } 1497 } 1498 1499 void pmu_pre_el_change(ARMCPU *cpu, void *ignored) 1500 { 1501 pmu_op_start(&cpu->env); 1502 } 1503 1504 void pmu_post_el_change(ARMCPU *cpu, void *ignored) 1505 { 1506 pmu_op_finish(&cpu->env); 1507 } 1508 1509 void arm_pmu_timer_cb(void *opaque) 1510 { 1511 ARMCPU *cpu = opaque; 1512 1513 /* 1514 * Update all the counter values based on the current underlying counts, 1515 * triggering interrupts to be raised, if necessary. pmu_op_finish() also 1516 * has the effect of setting the cpu->pmu_timer to the next earliest time a 1517 * counter may expire. 1518 */ 1519 pmu_op_start(&cpu->env); 1520 pmu_op_finish(&cpu->env); 1521 } 1522 1523 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1524 uint64_t value) 1525 { 1526 pmu_op_start(env); 1527 1528 if (value & PMCRC) { 1529 /* The counter has been reset */ 1530 env->cp15.c15_ccnt = 0; 1531 } 1532 1533 if (value & PMCRP) { 1534 unsigned int i; 1535 for (i = 0; i < pmu_num_counters(env); i++) { 1536 env->cp15.c14_pmevcntr[i] = 0; 1537 } 1538 } 1539 1540 /* only the DP, X, D and E bits are writable */ 1541 env->cp15.c9_pmcr &= ~0x39; 1542 env->cp15.c9_pmcr |= (value & 0x39); 1543 1544 pmu_op_finish(env); 1545 } 1546 1547 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, 1548 uint64_t value) 1549 { 1550 unsigned int i; 1551 for (i = 0; i < pmu_num_counters(env); i++) { 1552 /* Increment a counter's count iff: */ 1553 if ((value & (1 << i)) && /* counter's bit is set */ 1554 /* counter is enabled and not filtered */ 1555 pmu_counter_enabled(env, i) && 1556 /* counter is SW_INCR */ 1557 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { 1558 pmevcntr_op_start(env, i); 1559 1560 /* 1561 * Detect if this write causes an overflow since we can't predict 1562 * PMSWINC overflows like we can for other events 1563 */ 1564 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; 1565 1566 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { 1567 env->cp15.c9_pmovsr |= (1 << i); 1568 pmu_update_irq(env); 1569 } 1570 1571 env->cp15.c14_pmevcntr[i] = new_pmswinc; 1572 1573 pmevcntr_op_finish(env, i); 1574 } 1575 } 1576 } 1577 1578 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1579 { 1580 uint64_t ret; 1581 pmccntr_op_start(env); 1582 ret = env->cp15.c15_ccnt; 1583 pmccntr_op_finish(env); 1584 return ret; 1585 } 1586 1587 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1588 uint64_t value) 1589 { 1590 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1591 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1592 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1593 * accessed. 1594 */ 1595 env->cp15.c9_pmselr = value & 0x1f; 1596 } 1597 1598 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1599 uint64_t value) 1600 { 1601 pmccntr_op_start(env); 1602 env->cp15.c15_ccnt = value; 1603 pmccntr_op_finish(env); 1604 } 1605 1606 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1607 uint64_t value) 1608 { 1609 uint64_t cur_val = pmccntr_read(env, NULL); 1610 1611 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1612 } 1613 1614 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1615 uint64_t value) 1616 { 1617 pmccntr_op_start(env); 1618 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; 1619 pmccntr_op_finish(env); 1620 } 1621 1622 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, 1623 uint64_t value) 1624 { 1625 pmccntr_op_start(env); 1626 /* M is not accessible from AArch32 */ 1627 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | 1628 (value & PMCCFILTR); 1629 pmccntr_op_finish(env); 1630 } 1631 1632 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) 1633 { 1634 /* M is not visible in AArch32 */ 1635 return env->cp15.pmccfiltr_el0 & PMCCFILTR; 1636 } 1637 1638 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1639 uint64_t value) 1640 { 1641 value &= pmu_counter_mask(env); 1642 env->cp15.c9_pmcnten |= value; 1643 } 1644 1645 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1646 uint64_t value) 1647 { 1648 value &= pmu_counter_mask(env); 1649 env->cp15.c9_pmcnten &= ~value; 1650 } 1651 1652 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1653 uint64_t value) 1654 { 1655 value &= pmu_counter_mask(env); 1656 env->cp15.c9_pmovsr &= ~value; 1657 pmu_update_irq(env); 1658 } 1659 1660 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1661 uint64_t value) 1662 { 1663 value &= pmu_counter_mask(env); 1664 env->cp15.c9_pmovsr |= value; 1665 pmu_update_irq(env); 1666 } 1667 1668 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1669 uint64_t value, const uint8_t counter) 1670 { 1671 if (counter == 31) { 1672 pmccfiltr_write(env, ri, value); 1673 } else if (counter < pmu_num_counters(env)) { 1674 pmevcntr_op_start(env, counter); 1675 1676 /* 1677 * If this counter's event type is changing, store the current 1678 * underlying count for the new type in c14_pmevcntr_delta[counter] so 1679 * pmevcntr_op_finish has the correct baseline when it converts back to 1680 * a delta. 1681 */ 1682 uint16_t old_event = env->cp15.c14_pmevtyper[counter] & 1683 PMXEVTYPER_EVTCOUNT; 1684 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; 1685 if (old_event != new_event) { 1686 uint64_t count = 0; 1687 if (event_supported(new_event)) { 1688 uint16_t event_idx = supported_event_map[new_event]; 1689 count = pm_events[event_idx].get_count(env); 1690 } 1691 env->cp15.c14_pmevcntr_delta[counter] = count; 1692 } 1693 1694 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; 1695 pmevcntr_op_finish(env, counter); 1696 } 1697 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1698 * PMSELR value is equal to or greater than the number of implemented 1699 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1700 */ 1701 } 1702 1703 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, 1704 const uint8_t counter) 1705 { 1706 if (counter == 31) { 1707 return env->cp15.pmccfiltr_el0; 1708 } else if (counter < pmu_num_counters(env)) { 1709 return env->cp15.c14_pmevtyper[counter]; 1710 } else { 1711 /* 1712 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1713 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). 1714 */ 1715 return 0; 1716 } 1717 } 1718 1719 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1720 uint64_t value) 1721 { 1722 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1723 pmevtyper_write(env, ri, value, counter); 1724 } 1725 1726 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1727 uint64_t value) 1728 { 1729 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1730 env->cp15.c14_pmevtyper[counter] = value; 1731 1732 /* 1733 * pmevtyper_rawwrite is called between a pair of pmu_op_start and 1734 * pmu_op_finish calls when loading saved state for a migration. Because 1735 * we're potentially updating the type of event here, the value written to 1736 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a 1737 * different counter type. Therefore, we need to set this value to the 1738 * current count for the counter type we're writing so that pmu_op_finish 1739 * has the correct count for its calculation. 1740 */ 1741 uint16_t event = value & PMXEVTYPER_EVTCOUNT; 1742 if (event_supported(event)) { 1743 uint16_t event_idx = supported_event_map[event]; 1744 env->cp15.c14_pmevcntr_delta[counter] = 1745 pm_events[event_idx].get_count(env); 1746 } 1747 } 1748 1749 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1750 { 1751 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1752 return pmevtyper_read(env, ri, counter); 1753 } 1754 1755 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1756 uint64_t value) 1757 { 1758 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); 1759 } 1760 1761 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1762 { 1763 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); 1764 } 1765 1766 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1767 uint64_t value, uint8_t counter) 1768 { 1769 if (counter < pmu_num_counters(env)) { 1770 pmevcntr_op_start(env, counter); 1771 env->cp15.c14_pmevcntr[counter] = value; 1772 pmevcntr_op_finish(env, counter); 1773 } 1774 /* 1775 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1776 * are CONSTRAINED UNPREDICTABLE. 1777 */ 1778 } 1779 1780 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, 1781 uint8_t counter) 1782 { 1783 if (counter < pmu_num_counters(env)) { 1784 uint64_t ret; 1785 pmevcntr_op_start(env, counter); 1786 ret = env->cp15.c14_pmevcntr[counter]; 1787 pmevcntr_op_finish(env, counter); 1788 return ret; 1789 } else { 1790 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1791 * are CONSTRAINED UNPREDICTABLE. */ 1792 return 0; 1793 } 1794 } 1795 1796 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1797 uint64_t value) 1798 { 1799 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1800 pmevcntr_write(env, ri, value, counter); 1801 } 1802 1803 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1804 { 1805 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1806 return pmevcntr_read(env, ri, counter); 1807 } 1808 1809 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1810 uint64_t value) 1811 { 1812 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1813 assert(counter < pmu_num_counters(env)); 1814 env->cp15.c14_pmevcntr[counter] = value; 1815 pmevcntr_write(env, ri, value, counter); 1816 } 1817 1818 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) 1819 { 1820 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1821 assert(counter < pmu_num_counters(env)); 1822 return env->cp15.c14_pmevcntr[counter]; 1823 } 1824 1825 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1826 uint64_t value) 1827 { 1828 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); 1829 } 1830 1831 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1832 { 1833 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); 1834 } 1835 1836 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1837 uint64_t value) 1838 { 1839 if (arm_feature(env, ARM_FEATURE_V8)) { 1840 env->cp15.c9_pmuserenr = value & 0xf; 1841 } else { 1842 env->cp15.c9_pmuserenr = value & 1; 1843 } 1844 } 1845 1846 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1847 uint64_t value) 1848 { 1849 /* We have no event counters so only the C bit can be changed */ 1850 value &= pmu_counter_mask(env); 1851 env->cp15.c9_pminten |= value; 1852 pmu_update_irq(env); 1853 } 1854 1855 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1856 uint64_t value) 1857 { 1858 value &= pmu_counter_mask(env); 1859 env->cp15.c9_pminten &= ~value; 1860 pmu_update_irq(env); 1861 } 1862 1863 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 1864 uint64_t value) 1865 { 1866 /* Note that even though the AArch64 view of this register has bits 1867 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 1868 * architectural requirements for bits which are RES0 only in some 1869 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 1870 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 1871 */ 1872 raw_write(env, ri, value & ~0x1FULL); 1873 } 1874 1875 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 1876 { 1877 /* Begin with base v8.0 state. */ 1878 uint32_t valid_mask = 0x3fff; 1879 ARMCPU *cpu = env_archcpu(env); 1880 1881 if (arm_el_is_aa64(env, 3)) { 1882 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ 1883 valid_mask &= ~SCR_NET; 1884 } else { 1885 valid_mask &= ~(SCR_RW | SCR_ST); 1886 } 1887 1888 if (!arm_feature(env, ARM_FEATURE_EL2)) { 1889 valid_mask &= ~SCR_HCE; 1890 1891 /* On ARMv7, SMD (or SCD as it is called in v7) is only 1892 * supported if EL2 exists. The bit is UNK/SBZP when 1893 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 1894 * when EL2 is unavailable. 1895 * On ARMv8, this bit is always available. 1896 */ 1897 if (arm_feature(env, ARM_FEATURE_V7) && 1898 !arm_feature(env, ARM_FEATURE_V8)) { 1899 valid_mask &= ~SCR_SMD; 1900 } 1901 } 1902 if (cpu_isar_feature(aa64_lor, cpu)) { 1903 valid_mask |= SCR_TLOR; 1904 } 1905 if (cpu_isar_feature(aa64_pauth, cpu)) { 1906 valid_mask |= SCR_API | SCR_APK; 1907 } 1908 1909 /* Clear all-context RES0 bits. */ 1910 value &= valid_mask; 1911 raw_write(env, ri, value); 1912 } 1913 1914 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1915 { 1916 ARMCPU *cpu = env_archcpu(env); 1917 1918 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 1919 * bank 1920 */ 1921 uint32_t index = A32_BANKED_REG_GET(env, csselr, 1922 ri->secure & ARM_CP_SECSTATE_S); 1923 1924 return cpu->ccsidr[index]; 1925 } 1926 1927 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1928 uint64_t value) 1929 { 1930 raw_write(env, ri, value & 0xf); 1931 } 1932 1933 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1934 { 1935 CPUState *cs = env_cpu(env); 1936 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 1937 uint64_t ret = 0; 1938 1939 if (hcr_el2 & HCR_IMO) { 1940 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { 1941 ret |= CPSR_I; 1942 } 1943 } else { 1944 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 1945 ret |= CPSR_I; 1946 } 1947 } 1948 1949 if (hcr_el2 & HCR_FMO) { 1950 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { 1951 ret |= CPSR_F; 1952 } 1953 } else { 1954 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 1955 ret |= CPSR_F; 1956 } 1957 } 1958 1959 /* External aborts are not possible in QEMU so A bit is always clear */ 1960 return ret; 1961 } 1962 1963 static const ARMCPRegInfo v7_cp_reginfo[] = { 1964 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 1965 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 1966 .access = PL1_W, .type = ARM_CP_NOP }, 1967 /* Performance monitors are implementation defined in v7, 1968 * but with an ARM recommended set of registers, which we 1969 * follow. 1970 * 1971 * Performance registers fall into three categories: 1972 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 1973 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 1974 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 1975 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 1976 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 1977 */ 1978 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 1979 .access = PL0_RW, .type = ARM_CP_ALIAS, 1980 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1981 .writefn = pmcntenset_write, 1982 .accessfn = pmreg_access, 1983 .raw_writefn = raw_write }, 1984 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 1985 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 1986 .access = PL0_RW, .accessfn = pmreg_access, 1987 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 1988 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 1989 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 1990 .access = PL0_RW, 1991 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1992 .accessfn = pmreg_access, 1993 .writefn = pmcntenclr_write, 1994 .type = ARM_CP_ALIAS }, 1995 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 1996 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 1997 .access = PL0_RW, .accessfn = pmreg_access, 1998 .type = ARM_CP_ALIAS, 1999 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 2000 .writefn = pmcntenclr_write }, 2001 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 2002 .access = PL0_RW, .type = ARM_CP_IO, 2003 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2004 .accessfn = pmreg_access, 2005 .writefn = pmovsr_write, 2006 .raw_writefn = raw_write }, 2007 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 2008 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 2009 .access = PL0_RW, .accessfn = pmreg_access, 2010 .type = ARM_CP_ALIAS | ARM_CP_IO, 2011 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2012 .writefn = pmovsr_write, 2013 .raw_writefn = raw_write }, 2014 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 2015 .access = PL0_W, .accessfn = pmreg_access_swinc, 2016 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2017 .writefn = pmswinc_write }, 2018 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, 2019 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, 2020 .access = PL0_W, .accessfn = pmreg_access_swinc, 2021 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2022 .writefn = pmswinc_write }, 2023 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 2024 .access = PL0_RW, .type = ARM_CP_ALIAS, 2025 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 2026 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 2027 .raw_writefn = raw_write}, 2028 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 2029 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 2030 .access = PL0_RW, .accessfn = pmreg_access_selr, 2031 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 2032 .writefn = pmselr_write, .raw_writefn = raw_write, }, 2033 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 2034 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 2035 .readfn = pmccntr_read, .writefn = pmccntr_write32, 2036 .accessfn = pmreg_access_ccntr }, 2037 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 2038 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 2039 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 2040 .type = ARM_CP_IO, 2041 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), 2042 .readfn = pmccntr_read, .writefn = pmccntr_write, 2043 .raw_readfn = raw_read, .raw_writefn = raw_write, }, 2044 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, 2045 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, 2046 .access = PL0_RW, .accessfn = pmreg_access, 2047 .type = ARM_CP_ALIAS | ARM_CP_IO, 2048 .resetvalue = 0, }, 2049 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 2050 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 2051 .writefn = pmccfiltr_write, .raw_writefn = raw_write, 2052 .access = PL0_RW, .accessfn = pmreg_access, 2053 .type = ARM_CP_IO, 2054 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 2055 .resetvalue = 0, }, 2056 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 2057 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2058 .accessfn = pmreg_access, 2059 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2060 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 2061 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 2062 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2063 .accessfn = pmreg_access, 2064 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2065 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 2066 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2067 .accessfn = pmreg_access_xevcntr, 2068 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2069 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, 2070 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, 2071 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2072 .accessfn = pmreg_access_xevcntr, 2073 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2074 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 2075 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 2076 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 2077 .resetvalue = 0, 2078 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2079 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 2080 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 2081 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 2082 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 2083 .resetvalue = 0, 2084 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2085 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 2086 .access = PL1_RW, .accessfn = access_tpm, 2087 .type = ARM_CP_ALIAS | ARM_CP_IO, 2088 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 2089 .resetvalue = 0, 2090 .writefn = pmintenset_write, .raw_writefn = raw_write }, 2091 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 2092 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 2093 .access = PL1_RW, .accessfn = access_tpm, 2094 .type = ARM_CP_IO, 2095 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2096 .writefn = pmintenset_write, .raw_writefn = raw_write, 2097 .resetvalue = 0x0 }, 2098 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 2099 .access = PL1_RW, .accessfn = access_tpm, 2100 .type = ARM_CP_ALIAS | ARM_CP_IO, 2101 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2102 .writefn = pmintenclr_write, }, 2103 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 2104 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 2105 .access = PL1_RW, .accessfn = access_tpm, 2106 .type = ARM_CP_ALIAS | ARM_CP_IO, 2107 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2108 .writefn = pmintenclr_write }, 2109 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 2110 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 2111 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 2112 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 2113 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 2114 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0, 2115 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 2116 offsetof(CPUARMState, cp15.csselr_ns) } }, 2117 /* Auxiliary ID register: this actually has an IMPDEF value but for now 2118 * just RAZ for all cores: 2119 */ 2120 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 2121 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 2122 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 2123 /* Auxiliary fault status registers: these also are IMPDEF, and we 2124 * choose to RAZ/WI for all cores. 2125 */ 2126 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 2127 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 2128 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 2129 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 2130 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 2131 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 2132 /* MAIR can just read-as-written because we don't implement caches 2133 * and so don't need to care about memory attributes. 2134 */ 2135 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 2136 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2137 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 2138 .resetvalue = 0 }, 2139 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 2140 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 2141 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 2142 .resetvalue = 0 }, 2143 /* For non-long-descriptor page tables these are PRRR and NMRR; 2144 * regardless they still act as reads-as-written for QEMU. 2145 */ 2146 /* MAIR0/1 are defined separately from their 64-bit counterpart which 2147 * allows them to assign the correct fieldoffset based on the endianness 2148 * handled in the field definitions. 2149 */ 2150 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 2151 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, 2152 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 2153 offsetof(CPUARMState, cp15.mair0_ns) }, 2154 .resetfn = arm_cp_reset_ignore }, 2155 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 2156 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW, 2157 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 2158 offsetof(CPUARMState, cp15.mair1_ns) }, 2159 .resetfn = arm_cp_reset_ignore }, 2160 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 2161 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 2162 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 2163 /* 32 bit ITLB invalidates */ 2164 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 2165 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 2166 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 2167 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 2168 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 2169 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 2170 /* 32 bit DTLB invalidates */ 2171 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 2172 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 2173 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 2174 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 2175 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 2176 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 2177 /* 32 bit TLB invalidates */ 2178 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 2179 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 2180 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 2181 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 2182 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 2183 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 2184 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 2185 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 2186 REGINFO_SENTINEL 2187 }; 2188 2189 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 2190 /* 32 bit TLB invalidates, Inner Shareable */ 2191 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 2192 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write }, 2193 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 2194 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 2195 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 2196 .type = ARM_CP_NO_RAW, .access = PL1_W, 2197 .writefn = tlbiasid_is_write }, 2198 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 2199 .type = ARM_CP_NO_RAW, .access = PL1_W, 2200 .writefn = tlbimvaa_is_write }, 2201 REGINFO_SENTINEL 2202 }; 2203 2204 static const ARMCPRegInfo pmovsset_cp_reginfo[] = { 2205 /* PMOVSSET is not implemented in v7 before v7ve */ 2206 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, 2207 .access = PL0_RW, .accessfn = pmreg_access, 2208 .type = ARM_CP_ALIAS | ARM_CP_IO, 2209 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2210 .writefn = pmovsset_write, 2211 .raw_writefn = raw_write }, 2212 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, 2213 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, 2214 .access = PL0_RW, .accessfn = pmreg_access, 2215 .type = ARM_CP_ALIAS | ARM_CP_IO, 2216 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2217 .writefn = pmovsset_write, 2218 .raw_writefn = raw_write }, 2219 REGINFO_SENTINEL 2220 }; 2221 2222 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2223 uint64_t value) 2224 { 2225 value &= 1; 2226 env->teecr = value; 2227 } 2228 2229 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 2230 bool isread) 2231 { 2232 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 2233 return CP_ACCESS_TRAP; 2234 } 2235 return CP_ACCESS_OK; 2236 } 2237 2238 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 2239 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 2240 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 2241 .resetvalue = 0, 2242 .writefn = teecr_write }, 2243 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 2244 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 2245 .accessfn = teehbr_access, .resetvalue = 0 }, 2246 REGINFO_SENTINEL 2247 }; 2248 2249 static const ARMCPRegInfo v6k_cp_reginfo[] = { 2250 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 2251 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 2252 .access = PL0_RW, 2253 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 2254 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 2255 .access = PL0_RW, 2256 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 2257 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 2258 .resetfn = arm_cp_reset_ignore }, 2259 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 2260 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 2261 .access = PL0_R|PL1_W, 2262 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 2263 .resetvalue = 0}, 2264 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 2265 .access = PL0_R|PL1_W, 2266 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 2267 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 2268 .resetfn = arm_cp_reset_ignore }, 2269 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 2270 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 2271 .access = PL1_RW, 2272 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 2273 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 2274 .access = PL1_RW, 2275 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 2276 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 2277 .resetvalue = 0 }, 2278 REGINFO_SENTINEL 2279 }; 2280 2281 #ifndef CONFIG_USER_ONLY 2282 2283 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 2284 bool isread) 2285 { 2286 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 2287 * Writable only at the highest implemented exception level. 2288 */ 2289 int el = arm_current_el(env); 2290 2291 switch (el) { 2292 case 0: 2293 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) { 2294 return CP_ACCESS_TRAP; 2295 } 2296 break; 2297 case 1: 2298 if (!isread && ri->state == ARM_CP_STATE_AA32 && 2299 arm_is_secure_below_el3(env)) { 2300 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 2301 return CP_ACCESS_TRAP_UNCATEGORIZED; 2302 } 2303 break; 2304 case 2: 2305 case 3: 2306 break; 2307 } 2308 2309 if (!isread && el < arm_highest_el(env)) { 2310 return CP_ACCESS_TRAP_UNCATEGORIZED; 2311 } 2312 2313 return CP_ACCESS_OK; 2314 } 2315 2316 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 2317 bool isread) 2318 { 2319 unsigned int cur_el = arm_current_el(env); 2320 bool secure = arm_is_secure(env); 2321 2322 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */ 2323 if (cur_el == 0 && 2324 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 2325 return CP_ACCESS_TRAP; 2326 } 2327 2328 if (arm_feature(env, ARM_FEATURE_EL2) && 2329 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 2330 !extract32(env->cp15.cnthctl_el2, 0, 1)) { 2331 return CP_ACCESS_TRAP_EL2; 2332 } 2333 return CP_ACCESS_OK; 2334 } 2335 2336 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 2337 bool isread) 2338 { 2339 unsigned int cur_el = arm_current_el(env); 2340 bool secure = arm_is_secure(env); 2341 2342 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if 2343 * EL0[PV]TEN is zero. 2344 */ 2345 if (cur_el == 0 && 2346 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 2347 return CP_ACCESS_TRAP; 2348 } 2349 2350 if (arm_feature(env, ARM_FEATURE_EL2) && 2351 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 2352 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 2353 return CP_ACCESS_TRAP_EL2; 2354 } 2355 return CP_ACCESS_OK; 2356 } 2357 2358 static CPAccessResult gt_pct_access(CPUARMState *env, 2359 const ARMCPRegInfo *ri, 2360 bool isread) 2361 { 2362 return gt_counter_access(env, GTIMER_PHYS, isread); 2363 } 2364 2365 static CPAccessResult gt_vct_access(CPUARMState *env, 2366 const ARMCPRegInfo *ri, 2367 bool isread) 2368 { 2369 return gt_counter_access(env, GTIMER_VIRT, isread); 2370 } 2371 2372 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2373 bool isread) 2374 { 2375 return gt_timer_access(env, GTIMER_PHYS, isread); 2376 } 2377 2378 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2379 bool isread) 2380 { 2381 return gt_timer_access(env, GTIMER_VIRT, isread); 2382 } 2383 2384 static CPAccessResult gt_stimer_access(CPUARMState *env, 2385 const ARMCPRegInfo *ri, 2386 bool isread) 2387 { 2388 /* The AArch64 register view of the secure physical timer is 2389 * always accessible from EL3, and configurably accessible from 2390 * Secure EL1. 2391 */ 2392 switch (arm_current_el(env)) { 2393 case 1: 2394 if (!arm_is_secure(env)) { 2395 return CP_ACCESS_TRAP; 2396 } 2397 if (!(env->cp15.scr_el3 & SCR_ST)) { 2398 return CP_ACCESS_TRAP_EL3; 2399 } 2400 return CP_ACCESS_OK; 2401 case 0: 2402 case 2: 2403 return CP_ACCESS_TRAP; 2404 case 3: 2405 return CP_ACCESS_OK; 2406 default: 2407 g_assert_not_reached(); 2408 } 2409 } 2410 2411 static uint64_t gt_get_countervalue(CPUARMState *env) 2412 { 2413 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE; 2414 } 2415 2416 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 2417 { 2418 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 2419 2420 if (gt->ctl & 1) { 2421 /* Timer enabled: calculate and set current ISTATUS, irq, and 2422 * reset timer to when ISTATUS next has to change 2423 */ 2424 uint64_t offset = timeridx == GTIMER_VIRT ? 2425 cpu->env.cp15.cntvoff_el2 : 0; 2426 uint64_t count = gt_get_countervalue(&cpu->env); 2427 /* Note that this must be unsigned 64 bit arithmetic: */ 2428 int istatus = count - offset >= gt->cval; 2429 uint64_t nexttick; 2430 int irqstate; 2431 2432 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 2433 2434 irqstate = (istatus && !(gt->ctl & 2)); 2435 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2436 2437 if (istatus) { 2438 /* Next transition is when count rolls back over to zero */ 2439 nexttick = UINT64_MAX; 2440 } else { 2441 /* Next transition is when we hit cval */ 2442 nexttick = gt->cval + offset; 2443 } 2444 /* Note that the desired next expiry time might be beyond the 2445 * signed-64-bit range of a QEMUTimer -- in this case we just 2446 * set the timer for as far in the future as possible. When the 2447 * timer expires we will reset the timer for any remaining period. 2448 */ 2449 if (nexttick > INT64_MAX / GTIMER_SCALE) { 2450 nexttick = INT64_MAX / GTIMER_SCALE; 2451 } 2452 timer_mod(cpu->gt_timer[timeridx], nexttick); 2453 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 2454 } else { 2455 /* Timer disabled: ISTATUS and timer output always clear */ 2456 gt->ctl &= ~4; 2457 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 2458 timer_del(cpu->gt_timer[timeridx]); 2459 trace_arm_gt_recalc_disabled(timeridx); 2460 } 2461 } 2462 2463 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 2464 int timeridx) 2465 { 2466 ARMCPU *cpu = env_archcpu(env); 2467 2468 timer_del(cpu->gt_timer[timeridx]); 2469 } 2470 2471 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2472 { 2473 return gt_get_countervalue(env); 2474 } 2475 2476 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2477 { 2478 return gt_get_countervalue(env) - env->cp15.cntvoff_el2; 2479 } 2480 2481 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2482 int timeridx, 2483 uint64_t value) 2484 { 2485 trace_arm_gt_cval_write(timeridx, value); 2486 env->cp15.c14_timer[timeridx].cval = value; 2487 gt_recalc_timer(env_archcpu(env), timeridx); 2488 } 2489 2490 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 2491 int timeridx) 2492 { 2493 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 2494 2495 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 2496 (gt_get_countervalue(env) - offset)); 2497 } 2498 2499 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2500 int timeridx, 2501 uint64_t value) 2502 { 2503 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 2504 2505 trace_arm_gt_tval_write(timeridx, value); 2506 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 2507 sextract64(value, 0, 32); 2508 gt_recalc_timer(env_archcpu(env), timeridx); 2509 } 2510 2511 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2512 int timeridx, 2513 uint64_t value) 2514 { 2515 ARMCPU *cpu = env_archcpu(env); 2516 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 2517 2518 trace_arm_gt_ctl_write(timeridx, value); 2519 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 2520 if ((oldval ^ value) & 1) { 2521 /* Enable toggled */ 2522 gt_recalc_timer(cpu, timeridx); 2523 } else if ((oldval ^ value) & 2) { 2524 /* IMASK toggled: don't need to recalculate, 2525 * just set the interrupt line based on ISTATUS 2526 */ 2527 int irqstate = (oldval & 4) && !(value & 2); 2528 2529 trace_arm_gt_imask_toggle(timeridx, irqstate); 2530 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2531 } 2532 } 2533 2534 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2535 { 2536 gt_timer_reset(env, ri, GTIMER_PHYS); 2537 } 2538 2539 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2540 uint64_t value) 2541 { 2542 gt_cval_write(env, ri, GTIMER_PHYS, value); 2543 } 2544 2545 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2546 { 2547 return gt_tval_read(env, ri, GTIMER_PHYS); 2548 } 2549 2550 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2551 uint64_t value) 2552 { 2553 gt_tval_write(env, ri, GTIMER_PHYS, value); 2554 } 2555 2556 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2557 uint64_t value) 2558 { 2559 gt_ctl_write(env, ri, GTIMER_PHYS, value); 2560 } 2561 2562 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2563 { 2564 gt_timer_reset(env, ri, GTIMER_VIRT); 2565 } 2566 2567 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2568 uint64_t value) 2569 { 2570 gt_cval_write(env, ri, GTIMER_VIRT, value); 2571 } 2572 2573 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2574 { 2575 return gt_tval_read(env, ri, GTIMER_VIRT); 2576 } 2577 2578 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2579 uint64_t value) 2580 { 2581 gt_tval_write(env, ri, GTIMER_VIRT, value); 2582 } 2583 2584 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2585 uint64_t value) 2586 { 2587 gt_ctl_write(env, ri, GTIMER_VIRT, value); 2588 } 2589 2590 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 2591 uint64_t value) 2592 { 2593 ARMCPU *cpu = env_archcpu(env); 2594 2595 trace_arm_gt_cntvoff_write(value); 2596 raw_write(env, ri, value); 2597 gt_recalc_timer(cpu, GTIMER_VIRT); 2598 } 2599 2600 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2601 { 2602 gt_timer_reset(env, ri, GTIMER_HYP); 2603 } 2604 2605 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2606 uint64_t value) 2607 { 2608 gt_cval_write(env, ri, GTIMER_HYP, value); 2609 } 2610 2611 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2612 { 2613 return gt_tval_read(env, ri, GTIMER_HYP); 2614 } 2615 2616 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2617 uint64_t value) 2618 { 2619 gt_tval_write(env, ri, GTIMER_HYP, value); 2620 } 2621 2622 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2623 uint64_t value) 2624 { 2625 gt_ctl_write(env, ri, GTIMER_HYP, value); 2626 } 2627 2628 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2629 { 2630 gt_timer_reset(env, ri, GTIMER_SEC); 2631 } 2632 2633 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2634 uint64_t value) 2635 { 2636 gt_cval_write(env, ri, GTIMER_SEC, value); 2637 } 2638 2639 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2640 { 2641 return gt_tval_read(env, ri, GTIMER_SEC); 2642 } 2643 2644 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2645 uint64_t value) 2646 { 2647 gt_tval_write(env, ri, GTIMER_SEC, value); 2648 } 2649 2650 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2651 uint64_t value) 2652 { 2653 gt_ctl_write(env, ri, GTIMER_SEC, value); 2654 } 2655 2656 void arm_gt_ptimer_cb(void *opaque) 2657 { 2658 ARMCPU *cpu = opaque; 2659 2660 gt_recalc_timer(cpu, GTIMER_PHYS); 2661 } 2662 2663 void arm_gt_vtimer_cb(void *opaque) 2664 { 2665 ARMCPU *cpu = opaque; 2666 2667 gt_recalc_timer(cpu, GTIMER_VIRT); 2668 } 2669 2670 void arm_gt_htimer_cb(void *opaque) 2671 { 2672 ARMCPU *cpu = opaque; 2673 2674 gt_recalc_timer(cpu, GTIMER_HYP); 2675 } 2676 2677 void arm_gt_stimer_cb(void *opaque) 2678 { 2679 ARMCPU *cpu = opaque; 2680 2681 gt_recalc_timer(cpu, GTIMER_SEC); 2682 } 2683 2684 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 2685 /* Note that CNTFRQ is purely reads-as-written for the benefit 2686 * of software; writing it doesn't actually change the timer frequency. 2687 * Our reset value matches the fixed frequency we implement the timer at. 2688 */ 2689 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 2690 .type = ARM_CP_ALIAS, 2691 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2692 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 2693 }, 2694 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 2695 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 2696 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2697 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 2698 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE, 2699 }, 2700 /* overall control: mostly access permissions */ 2701 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 2702 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 2703 .access = PL1_RW, 2704 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 2705 .resetvalue = 0, 2706 }, 2707 /* per-timer control */ 2708 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2709 .secure = ARM_CP_SECSTATE_NS, 2710 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 2711 .accessfn = gt_ptimer_access, 2712 .fieldoffset = offsetoflow32(CPUARMState, 2713 cp15.c14_timer[GTIMER_PHYS].ctl), 2714 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 2715 }, 2716 { .name = "CNTP_CTL_S", 2717 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2718 .secure = ARM_CP_SECSTATE_S, 2719 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 2720 .accessfn = gt_ptimer_access, 2721 .fieldoffset = offsetoflow32(CPUARMState, 2722 cp15.c14_timer[GTIMER_SEC].ctl), 2723 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2724 }, 2725 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 2726 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 2727 .type = ARM_CP_IO, .access = PL0_RW, 2728 .accessfn = gt_ptimer_access, 2729 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 2730 .resetvalue = 0, 2731 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 2732 }, 2733 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 2734 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 2735 .accessfn = gt_vtimer_access, 2736 .fieldoffset = offsetoflow32(CPUARMState, 2737 cp15.c14_timer[GTIMER_VIRT].ctl), 2738 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 2739 }, 2740 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 2741 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 2742 .type = ARM_CP_IO, .access = PL0_RW, 2743 .accessfn = gt_vtimer_access, 2744 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 2745 .resetvalue = 0, 2746 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 2747 }, 2748 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 2749 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2750 .secure = ARM_CP_SECSTATE_NS, 2751 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 2752 .accessfn = gt_ptimer_access, 2753 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2754 }, 2755 { .name = "CNTP_TVAL_S", 2756 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2757 .secure = ARM_CP_SECSTATE_S, 2758 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 2759 .accessfn = gt_ptimer_access, 2760 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 2761 }, 2762 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2763 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 2764 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 2765 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 2766 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2767 }, 2768 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 2769 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 2770 .accessfn = gt_vtimer_access, 2771 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2772 }, 2773 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2774 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 2775 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 2776 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 2777 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2778 }, 2779 /* The counter itself */ 2780 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 2781 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2782 .accessfn = gt_pct_access, 2783 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 2784 }, 2785 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 2786 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 2787 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2788 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 2789 }, 2790 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 2791 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2792 .accessfn = gt_vct_access, 2793 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 2794 }, 2795 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 2796 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 2797 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2798 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 2799 }, 2800 /* Comparison value, indicating when the timer goes off */ 2801 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 2802 .secure = ARM_CP_SECSTATE_NS, 2803 .access = PL0_RW, 2804 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2805 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2806 .accessfn = gt_ptimer_access, 2807 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2808 }, 2809 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 2810 .secure = ARM_CP_SECSTATE_S, 2811 .access = PL0_RW, 2812 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2813 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2814 .accessfn = gt_ptimer_access, 2815 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2816 }, 2817 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2818 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 2819 .access = PL0_RW, 2820 .type = ARM_CP_IO, 2821 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2822 .resetvalue = 0, .accessfn = gt_ptimer_access, 2823 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2824 }, 2825 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 2826 .access = PL0_RW, 2827 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2828 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2829 .accessfn = gt_vtimer_access, 2830 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2831 }, 2832 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2833 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 2834 .access = PL0_RW, 2835 .type = ARM_CP_IO, 2836 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2837 .resetvalue = 0, .accessfn = gt_vtimer_access, 2838 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2839 }, 2840 /* Secure timer -- this is actually restricted to only EL3 2841 * and configurably Secure-EL1 via the accessfn. 2842 */ 2843 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 2844 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 2845 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 2846 .accessfn = gt_stimer_access, 2847 .readfn = gt_sec_tval_read, 2848 .writefn = gt_sec_tval_write, 2849 .resetfn = gt_sec_timer_reset, 2850 }, 2851 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 2852 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 2853 .type = ARM_CP_IO, .access = PL1_RW, 2854 .accessfn = gt_stimer_access, 2855 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 2856 .resetvalue = 0, 2857 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2858 }, 2859 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 2860 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 2861 .type = ARM_CP_IO, .access = PL1_RW, 2862 .accessfn = gt_stimer_access, 2863 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2864 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2865 }, 2866 REGINFO_SENTINEL 2867 }; 2868 2869 #else 2870 2871 /* In user-mode most of the generic timer registers are inaccessible 2872 * however modern kernels (4.12+) allow access to cntvct_el0 2873 */ 2874 2875 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2876 { 2877 /* Currently we have no support for QEMUTimer in linux-user so we 2878 * can't call gt_get_countervalue(env), instead we directly 2879 * call the lower level functions. 2880 */ 2881 return cpu_get_clock() / GTIMER_SCALE; 2882 } 2883 2884 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 2885 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 2886 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 2887 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, 2888 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 2889 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE, 2890 }, 2891 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 2892 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 2893 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2894 .readfn = gt_virt_cnt_read, 2895 }, 2896 REGINFO_SENTINEL 2897 }; 2898 2899 #endif 2900 2901 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2902 { 2903 if (arm_feature(env, ARM_FEATURE_LPAE)) { 2904 raw_write(env, ri, value); 2905 } else if (arm_feature(env, ARM_FEATURE_V7)) { 2906 raw_write(env, ri, value & 0xfffff6ff); 2907 } else { 2908 raw_write(env, ri, value & 0xfffff1ff); 2909 } 2910 } 2911 2912 #ifndef CONFIG_USER_ONLY 2913 /* get_phys_addr() isn't present for user-mode-only targets */ 2914 2915 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 2916 bool isread) 2917 { 2918 if (ri->opc2 & 4) { 2919 /* The ATS12NSO* operations must trap to EL3 if executed in 2920 * Secure EL1 (which can only happen if EL3 is AArch64). 2921 * They are simply UNDEF if executed from NS EL1. 2922 * They function normally from EL2 or EL3. 2923 */ 2924 if (arm_current_el(env) == 1) { 2925 if (arm_is_secure_below_el3(env)) { 2926 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 2927 } 2928 return CP_ACCESS_TRAP_UNCATEGORIZED; 2929 } 2930 } 2931 return CP_ACCESS_OK; 2932 } 2933 2934 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 2935 MMUAccessType access_type, ARMMMUIdx mmu_idx) 2936 { 2937 hwaddr phys_addr; 2938 target_ulong page_size; 2939 int prot; 2940 bool ret; 2941 uint64_t par64; 2942 bool format64 = false; 2943 MemTxAttrs attrs = {}; 2944 ARMMMUFaultInfo fi = {}; 2945 ARMCacheAttrs cacheattrs = {}; 2946 2947 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 2948 &prot, &page_size, &fi, &cacheattrs); 2949 2950 if (is_a64(env)) { 2951 format64 = true; 2952 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 2953 /* 2954 * ATS1Cxx: 2955 * * TTBCR.EAE determines whether the result is returned using the 2956 * 32-bit or the 64-bit PAR format 2957 * * Instructions executed in Hyp mode always use the 64bit format 2958 * 2959 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 2960 * * The Non-secure TTBCR.EAE bit is set to 1 2961 * * The implementation includes EL2, and the value of HCR.VM is 1 2962 * 2963 * (Note that HCR.DC makes HCR.VM behave as if it is 1.) 2964 * 2965 * ATS1Hx always uses the 64bit format. 2966 */ 2967 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 2968 2969 if (arm_feature(env, ARM_FEATURE_EL2)) { 2970 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 2971 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); 2972 } else { 2973 format64 |= arm_current_el(env) == 2; 2974 } 2975 } 2976 } 2977 2978 if (format64) { 2979 /* Create a 64-bit PAR */ 2980 par64 = (1 << 11); /* LPAE bit always set */ 2981 if (!ret) { 2982 par64 |= phys_addr & ~0xfffULL; 2983 if (!attrs.secure) { 2984 par64 |= (1 << 9); /* NS */ 2985 } 2986 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 2987 par64 |= cacheattrs.shareability << 7; /* SH */ 2988 } else { 2989 uint32_t fsr = arm_fi_to_lfsc(&fi); 2990 2991 par64 |= 1; /* F */ 2992 par64 |= (fsr & 0x3f) << 1; /* FS */ 2993 if (fi.stage2) { 2994 par64 |= (1 << 9); /* S */ 2995 } 2996 if (fi.s1ptw) { 2997 par64 |= (1 << 8); /* PTW */ 2998 } 2999 } 3000 } else { 3001 /* fsr is a DFSR/IFSR value for the short descriptor 3002 * translation table format (with WnR always clear). 3003 * Convert it to a 32-bit PAR. 3004 */ 3005 if (!ret) { 3006 /* We do not set any attribute bits in the PAR */ 3007 if (page_size == (1 << 24) 3008 && arm_feature(env, ARM_FEATURE_V7)) { 3009 par64 = (phys_addr & 0xff000000) | (1 << 1); 3010 } else { 3011 par64 = phys_addr & 0xfffff000; 3012 } 3013 if (!attrs.secure) { 3014 par64 |= (1 << 9); /* NS */ 3015 } 3016 } else { 3017 uint32_t fsr = arm_fi_to_sfsc(&fi); 3018 3019 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 3020 ((fsr & 0xf) << 1) | 1; 3021 } 3022 } 3023 return par64; 3024 } 3025 3026 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3027 { 3028 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3029 uint64_t par64; 3030 ARMMMUIdx mmu_idx; 3031 int el = arm_current_el(env); 3032 bool secure = arm_is_secure_below_el3(env); 3033 3034 switch (ri->opc2 & 6) { 3035 case 0: 3036 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */ 3037 switch (el) { 3038 case 3: 3039 mmu_idx = ARMMMUIdx_S1E3; 3040 break; 3041 case 2: 3042 mmu_idx = ARMMMUIdx_S1NSE1; 3043 break; 3044 case 1: 3045 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 3046 break; 3047 default: 3048 g_assert_not_reached(); 3049 } 3050 break; 3051 case 2: 3052 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 3053 switch (el) { 3054 case 3: 3055 mmu_idx = ARMMMUIdx_S1SE0; 3056 break; 3057 case 2: 3058 mmu_idx = ARMMMUIdx_S1NSE0; 3059 break; 3060 case 1: 3061 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 3062 break; 3063 default: 3064 g_assert_not_reached(); 3065 } 3066 break; 3067 case 4: 3068 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 3069 mmu_idx = ARMMMUIdx_S12NSE1; 3070 break; 3071 case 6: 3072 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 3073 mmu_idx = ARMMMUIdx_S12NSE0; 3074 break; 3075 default: 3076 g_assert_not_reached(); 3077 } 3078 3079 par64 = do_ats_write(env, value, access_type, mmu_idx); 3080 3081 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3082 } 3083 3084 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 3085 uint64_t value) 3086 { 3087 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3088 uint64_t par64; 3089 3090 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S1E2); 3091 3092 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3093 } 3094 3095 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 3096 bool isread) 3097 { 3098 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { 3099 return CP_ACCESS_TRAP; 3100 } 3101 return CP_ACCESS_OK; 3102 } 3103 3104 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 3105 uint64_t value) 3106 { 3107 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3108 ARMMMUIdx mmu_idx; 3109 int secure = arm_is_secure_below_el3(env); 3110 3111 switch (ri->opc2 & 6) { 3112 case 0: 3113 switch (ri->opc1) { 3114 case 0: /* AT S1E1R, AT S1E1W */ 3115 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 3116 break; 3117 case 4: /* AT S1E2R, AT S1E2W */ 3118 mmu_idx = ARMMMUIdx_S1E2; 3119 break; 3120 case 6: /* AT S1E3R, AT S1E3W */ 3121 mmu_idx = ARMMMUIdx_S1E3; 3122 break; 3123 default: 3124 g_assert_not_reached(); 3125 } 3126 break; 3127 case 2: /* AT S1E0R, AT S1E0W */ 3128 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 3129 break; 3130 case 4: /* AT S12E1R, AT S12E1W */ 3131 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1; 3132 break; 3133 case 6: /* AT S12E0R, AT S12E0W */ 3134 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0; 3135 break; 3136 default: 3137 g_assert_not_reached(); 3138 } 3139 3140 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 3141 } 3142 #endif 3143 3144 static const ARMCPRegInfo vapa_cp_reginfo[] = { 3145 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 3146 .access = PL1_RW, .resetvalue = 0, 3147 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 3148 offsetoflow32(CPUARMState, cp15.par_ns) }, 3149 .writefn = par_write }, 3150 #ifndef CONFIG_USER_ONLY 3151 /* This underdecoding is safe because the reginfo is NO_RAW. */ 3152 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 3153 .access = PL1_W, .accessfn = ats_access, 3154 .writefn = ats_write, .type = ARM_CP_NO_RAW }, 3155 #endif 3156 REGINFO_SENTINEL 3157 }; 3158 3159 /* Return basic MPU access permission bits. */ 3160 static uint32_t simple_mpu_ap_bits(uint32_t val) 3161 { 3162 uint32_t ret; 3163 uint32_t mask; 3164 int i; 3165 ret = 0; 3166 mask = 3; 3167 for (i = 0; i < 16; i += 2) { 3168 ret |= (val >> i) & mask; 3169 mask <<= 2; 3170 } 3171 return ret; 3172 } 3173 3174 /* Pad basic MPU access permission bits to extended format. */ 3175 static uint32_t extended_mpu_ap_bits(uint32_t val) 3176 { 3177 uint32_t ret; 3178 uint32_t mask; 3179 int i; 3180 ret = 0; 3181 mask = 3; 3182 for (i = 0; i < 16; i += 2) { 3183 ret |= (val & mask) << i; 3184 mask <<= 2; 3185 } 3186 return ret; 3187 } 3188 3189 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3190 uint64_t value) 3191 { 3192 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 3193 } 3194 3195 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3196 { 3197 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 3198 } 3199 3200 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3201 uint64_t value) 3202 { 3203 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 3204 } 3205 3206 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3207 { 3208 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 3209 } 3210 3211 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 3212 { 3213 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3214 3215 if (!u32p) { 3216 return 0; 3217 } 3218 3219 u32p += env->pmsav7.rnr[M_REG_NS]; 3220 return *u32p; 3221 } 3222 3223 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 3224 uint64_t value) 3225 { 3226 ARMCPU *cpu = env_archcpu(env); 3227 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3228 3229 if (!u32p) { 3230 return; 3231 } 3232 3233 u32p += env->pmsav7.rnr[M_REG_NS]; 3234 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3235 *u32p = value; 3236 } 3237 3238 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3239 uint64_t value) 3240 { 3241 ARMCPU *cpu = env_archcpu(env); 3242 uint32_t nrgs = cpu->pmsav7_dregion; 3243 3244 if (value >= nrgs) { 3245 qemu_log_mask(LOG_GUEST_ERROR, 3246 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 3247 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 3248 return; 3249 } 3250 3251 raw_write(env, ri, value); 3252 } 3253 3254 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 3255 /* Reset for all these registers is handled in arm_cpu_reset(), 3256 * because the PMSAv7 is also used by M-profile CPUs, which do 3257 * not register cpregs but still need the state to be reset. 3258 */ 3259 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 3260 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3261 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 3262 .readfn = pmsav7_read, .writefn = pmsav7_write, 3263 .resetfn = arm_cp_reset_ignore }, 3264 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 3265 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3266 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 3267 .readfn = pmsav7_read, .writefn = pmsav7_write, 3268 .resetfn = arm_cp_reset_ignore }, 3269 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 3270 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3271 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 3272 .readfn = pmsav7_read, .writefn = pmsav7_write, 3273 .resetfn = arm_cp_reset_ignore }, 3274 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 3275 .access = PL1_RW, 3276 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 3277 .writefn = pmsav7_rgnr_write, 3278 .resetfn = arm_cp_reset_ignore }, 3279 REGINFO_SENTINEL 3280 }; 3281 3282 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 3283 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3284 .access = PL1_RW, .type = ARM_CP_ALIAS, 3285 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3286 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 3287 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3288 .access = PL1_RW, .type = ARM_CP_ALIAS, 3289 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3290 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 3291 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 3292 .access = PL1_RW, 3293 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3294 .resetvalue = 0, }, 3295 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 3296 .access = PL1_RW, 3297 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3298 .resetvalue = 0, }, 3299 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 3300 .access = PL1_RW, 3301 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 3302 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 3303 .access = PL1_RW, 3304 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 3305 /* Protection region base and size registers */ 3306 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 3307 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3308 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 3309 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 3310 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3311 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 3312 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 3313 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3314 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 3315 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 3316 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3317 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 3318 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 3319 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3320 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 3321 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 3322 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3323 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 3324 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 3325 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3326 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 3327 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 3328 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3329 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 3330 REGINFO_SENTINEL 3331 }; 3332 3333 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 3334 uint64_t value) 3335 { 3336 TCR *tcr = raw_ptr(env, ri); 3337 int maskshift = extract32(value, 0, 3); 3338 3339 if (!arm_feature(env, ARM_FEATURE_V8)) { 3340 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 3341 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 3342 * using Long-desciptor translation table format */ 3343 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 3344 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 3345 /* In an implementation that includes the Security Extensions 3346 * TTBCR has additional fields PD0 [4] and PD1 [5] for 3347 * Short-descriptor translation table format. 3348 */ 3349 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 3350 } else { 3351 value &= TTBCR_N; 3352 } 3353 } 3354 3355 /* Update the masks corresponding to the TCR bank being written 3356 * Note that we always calculate mask and base_mask, but 3357 * they are only used for short-descriptor tables (ie if EAE is 0); 3358 * for long-descriptor tables the TCR fields are used differently 3359 * and the mask and base_mask values are meaningless. 3360 */ 3361 tcr->raw_tcr = value; 3362 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 3363 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 3364 } 3365 3366 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3367 uint64_t value) 3368 { 3369 ARMCPU *cpu = env_archcpu(env); 3370 TCR *tcr = raw_ptr(env, ri); 3371 3372 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3373 /* With LPAE the TTBCR could result in a change of ASID 3374 * via the TTBCR.A1 bit, so do a TLB flush. 3375 */ 3376 tlb_flush(CPU(cpu)); 3377 } 3378 /* Preserve the high half of TCR_EL1, set via TTBCR2. */ 3379 value = deposit64(tcr->raw_tcr, 0, 32, value); 3380 vmsa_ttbcr_raw_write(env, ri, value); 3381 } 3382 3383 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3384 { 3385 TCR *tcr = raw_ptr(env, ri); 3386 3387 /* Reset both the TCR as well as the masks corresponding to the bank of 3388 * the TCR being reset. 3389 */ 3390 tcr->raw_tcr = 0; 3391 tcr->mask = 0; 3392 tcr->base_mask = 0xffffc000u; 3393 } 3394 3395 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3396 uint64_t value) 3397 { 3398 ARMCPU *cpu = env_archcpu(env); 3399 TCR *tcr = raw_ptr(env, ri); 3400 3401 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 3402 tlb_flush(CPU(cpu)); 3403 tcr->raw_tcr = value; 3404 } 3405 3406 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3407 uint64_t value) 3408 { 3409 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ 3410 if (cpreg_field_is_64bit(ri) && 3411 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { 3412 ARMCPU *cpu = env_archcpu(env); 3413 tlb_flush(CPU(cpu)); 3414 } 3415 raw_write(env, ri, value); 3416 } 3417 3418 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3419 uint64_t value) 3420 { 3421 ARMCPU *cpu = env_archcpu(env); 3422 CPUState *cs = CPU(cpu); 3423 3424 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */ 3425 if (raw_read(env, ri) != value) { 3426 tlb_flush_by_mmuidx(cs, 3427 ARMMMUIdxBit_S12NSE1 | 3428 ARMMMUIdxBit_S12NSE0 | 3429 ARMMMUIdxBit_S2NS); 3430 raw_write(env, ri, value); 3431 } 3432 } 3433 3434 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 3435 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3436 .access = PL1_RW, .type = ARM_CP_ALIAS, 3437 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 3438 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 3439 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3440 .access = PL1_RW, .resetvalue = 0, 3441 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 3442 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 3443 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 3444 .access = PL1_RW, .resetvalue = 0, 3445 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 3446 offsetof(CPUARMState, cp15.dfar_ns) } }, 3447 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 3448 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 3449 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 3450 .resetvalue = 0, }, 3451 REGINFO_SENTINEL 3452 }; 3453 3454 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 3455 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 3456 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 3457 .access = PL1_RW, 3458 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 3459 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 3460 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 3461 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 3462 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 3463 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 3464 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 3465 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 3466 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 3467 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 3468 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 3469 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 3470 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 3471 .access = PL1_RW, .writefn = vmsa_tcr_el1_write, 3472 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 3473 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 3474 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 3475 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 3476 .raw_writefn = vmsa_ttbcr_raw_write, 3477 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), 3478 offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, 3479 REGINFO_SENTINEL 3480 }; 3481 3482 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing 3483 * qemu tlbs nor adjusting cached masks. 3484 */ 3485 static const ARMCPRegInfo ttbcr2_reginfo = { 3486 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, 3487 .access = PL1_RW, .type = ARM_CP_ALIAS, 3488 .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]), 3489 offsetofhigh32(CPUARMState, cp15.tcr_el[1]) }, 3490 }; 3491 3492 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 3493 uint64_t value) 3494 { 3495 env->cp15.c15_ticonfig = value & 0xe7; 3496 /* The OS_TYPE bit in this register changes the reported CPUID! */ 3497 env->cp15.c0_cpuid = (value & (1 << 5)) ? 3498 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 3499 } 3500 3501 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 3502 uint64_t value) 3503 { 3504 env->cp15.c15_threadid = value & 0xffff; 3505 } 3506 3507 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 3508 uint64_t value) 3509 { 3510 /* Wait-for-interrupt (deprecated) */ 3511 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); 3512 } 3513 3514 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 3515 uint64_t value) 3516 { 3517 /* On OMAP there are registers indicating the max/min index of dcache lines 3518 * containing a dirty line; cache flush operations have to reset these. 3519 */ 3520 env->cp15.c15_i_max = 0x000; 3521 env->cp15.c15_i_min = 0xff0; 3522 } 3523 3524 static const ARMCPRegInfo omap_cp_reginfo[] = { 3525 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 3526 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 3527 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 3528 .resetvalue = 0, }, 3529 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 3530 .access = PL1_RW, .type = ARM_CP_NOP }, 3531 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 3532 .access = PL1_RW, 3533 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 3534 .writefn = omap_ticonfig_write }, 3535 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 3536 .access = PL1_RW, 3537 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 3538 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 3539 .access = PL1_RW, .resetvalue = 0xff0, 3540 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 3541 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 3542 .access = PL1_RW, 3543 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 3544 .writefn = omap_threadid_write }, 3545 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 3546 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 3547 .type = ARM_CP_NO_RAW, 3548 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 3549 /* TODO: Peripheral port remap register: 3550 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 3551 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 3552 * when MMU is off. 3553 */ 3554 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 3555 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 3556 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 3557 .writefn = omap_cachemaint_write }, 3558 { .name = "C9", .cp = 15, .crn = 9, 3559 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 3560 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 3561 REGINFO_SENTINEL 3562 }; 3563 3564 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 3565 uint64_t value) 3566 { 3567 env->cp15.c15_cpar = value & 0x3fff; 3568 } 3569 3570 static const ARMCPRegInfo xscale_cp_reginfo[] = { 3571 { .name = "XSCALE_CPAR", 3572 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 3573 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 3574 .writefn = xscale_cpar_write, }, 3575 { .name = "XSCALE_AUXCR", 3576 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 3577 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 3578 .resetvalue = 0, }, 3579 /* XScale specific cache-lockdown: since we have no cache we NOP these 3580 * and hope the guest does not really rely on cache behaviour. 3581 */ 3582 { .name = "XSCALE_LOCK_ICACHE_LINE", 3583 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 3584 .access = PL1_W, .type = ARM_CP_NOP }, 3585 { .name = "XSCALE_UNLOCK_ICACHE", 3586 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 3587 .access = PL1_W, .type = ARM_CP_NOP }, 3588 { .name = "XSCALE_DCACHE_LOCK", 3589 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 3590 .access = PL1_RW, .type = ARM_CP_NOP }, 3591 { .name = "XSCALE_UNLOCK_DCACHE", 3592 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 3593 .access = PL1_W, .type = ARM_CP_NOP }, 3594 REGINFO_SENTINEL 3595 }; 3596 3597 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 3598 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 3599 * implementation of this implementation-defined space. 3600 * Ideally this should eventually disappear in favour of actually 3601 * implementing the correct behaviour for all cores. 3602 */ 3603 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 3604 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 3605 .access = PL1_RW, 3606 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 3607 .resetvalue = 0 }, 3608 REGINFO_SENTINEL 3609 }; 3610 3611 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 3612 /* Cache status: RAZ because we have no cache so it's always clean */ 3613 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 3614 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 3615 .resetvalue = 0 }, 3616 REGINFO_SENTINEL 3617 }; 3618 3619 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 3620 /* We never have a a block transfer operation in progress */ 3621 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 3622 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 3623 .resetvalue = 0 }, 3624 /* The cache ops themselves: these all NOP for QEMU */ 3625 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 3626 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3627 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 3628 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3629 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 3630 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3631 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 3632 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3633 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 3634 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3635 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 3636 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3637 REGINFO_SENTINEL 3638 }; 3639 3640 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 3641 /* The cache test-and-clean instructions always return (1 << 30) 3642 * to indicate that there are no dirty cache lines. 3643 */ 3644 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 3645 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 3646 .resetvalue = (1 << 30) }, 3647 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 3648 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 3649 .resetvalue = (1 << 30) }, 3650 REGINFO_SENTINEL 3651 }; 3652 3653 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 3654 /* Ignore ReadBuffer accesses */ 3655 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 3656 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 3657 .access = PL1_RW, .resetvalue = 0, 3658 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 3659 REGINFO_SENTINEL 3660 }; 3661 3662 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3663 { 3664 ARMCPU *cpu = env_archcpu(env); 3665 unsigned int cur_el = arm_current_el(env); 3666 bool secure = arm_is_secure(env); 3667 3668 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 3669 return env->cp15.vpidr_el2; 3670 } 3671 return raw_read(env, ri); 3672 } 3673 3674 static uint64_t mpidr_read_val(CPUARMState *env) 3675 { 3676 ARMCPU *cpu = env_archcpu(env); 3677 uint64_t mpidr = cpu->mp_affinity; 3678 3679 if (arm_feature(env, ARM_FEATURE_V7MP)) { 3680 mpidr |= (1U << 31); 3681 /* Cores which are uniprocessor (non-coherent) 3682 * but still implement the MP extensions set 3683 * bit 30. (For instance, Cortex-R5). 3684 */ 3685 if (cpu->mp_is_up) { 3686 mpidr |= (1u << 30); 3687 } 3688 } 3689 return mpidr; 3690 } 3691 3692 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3693 { 3694 unsigned int cur_el = arm_current_el(env); 3695 bool secure = arm_is_secure(env); 3696 3697 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 3698 return env->cp15.vmpidr_el2; 3699 } 3700 return mpidr_read_val(env); 3701 } 3702 3703 static const ARMCPRegInfo lpae_cp_reginfo[] = { 3704 /* NOP AMAIR0/1 */ 3705 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 3706 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 3707 .access = PL1_RW, .type = ARM_CP_CONST, 3708 .resetvalue = 0 }, 3709 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 3710 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 3711 .access = PL1_RW, .type = ARM_CP_CONST, 3712 .resetvalue = 0 }, 3713 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 3714 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 3715 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 3716 offsetof(CPUARMState, cp15.par_ns)} }, 3717 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 3718 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3719 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 3720 offsetof(CPUARMState, cp15.ttbr0_ns) }, 3721 .writefn = vmsa_ttbr_write, }, 3722 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 3723 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3724 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 3725 offsetof(CPUARMState, cp15.ttbr1_ns) }, 3726 .writefn = vmsa_ttbr_write, }, 3727 REGINFO_SENTINEL 3728 }; 3729 3730 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3731 { 3732 return vfp_get_fpcr(env); 3733 } 3734 3735 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3736 uint64_t value) 3737 { 3738 vfp_set_fpcr(env, value); 3739 } 3740 3741 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3742 { 3743 return vfp_get_fpsr(env); 3744 } 3745 3746 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3747 uint64_t value) 3748 { 3749 vfp_set_fpsr(env, value); 3750 } 3751 3752 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 3753 bool isread) 3754 { 3755 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) { 3756 return CP_ACCESS_TRAP; 3757 } 3758 return CP_ACCESS_OK; 3759 } 3760 3761 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 3762 uint64_t value) 3763 { 3764 env->daif = value & PSTATE_DAIF; 3765 } 3766 3767 static CPAccessResult aa64_cacheop_access(CPUARMState *env, 3768 const ARMCPRegInfo *ri, 3769 bool isread) 3770 { 3771 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless 3772 * SCTLR_EL1.UCI is set. 3773 */ 3774 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) { 3775 return CP_ACCESS_TRAP; 3776 } 3777 return CP_ACCESS_OK; 3778 } 3779 3780 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 3781 * Page D4-1736 (DDI0487A.b) 3782 */ 3783 3784 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3785 uint64_t value) 3786 { 3787 CPUState *cs = env_cpu(env); 3788 bool sec = arm_is_secure_below_el3(env); 3789 3790 if (sec) { 3791 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3792 ARMMMUIdxBit_S1SE1 | 3793 ARMMMUIdxBit_S1SE0); 3794 } else { 3795 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3796 ARMMMUIdxBit_S12NSE1 | 3797 ARMMMUIdxBit_S12NSE0); 3798 } 3799 } 3800 3801 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3802 uint64_t value) 3803 { 3804 CPUState *cs = env_cpu(env); 3805 3806 if (tlb_force_broadcast(env)) { 3807 tlbi_aa64_vmalle1is_write(env, NULL, value); 3808 return; 3809 } 3810 3811 if (arm_is_secure_below_el3(env)) { 3812 tlb_flush_by_mmuidx(cs, 3813 ARMMMUIdxBit_S1SE1 | 3814 ARMMMUIdxBit_S1SE0); 3815 } else { 3816 tlb_flush_by_mmuidx(cs, 3817 ARMMMUIdxBit_S12NSE1 | 3818 ARMMMUIdxBit_S12NSE0); 3819 } 3820 } 3821 3822 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3823 uint64_t value) 3824 { 3825 /* Note that the 'ALL' scope must invalidate both stage 1 and 3826 * stage 2 translations, whereas most other scopes only invalidate 3827 * stage 1 translations. 3828 */ 3829 ARMCPU *cpu = env_archcpu(env); 3830 CPUState *cs = CPU(cpu); 3831 3832 if (arm_is_secure_below_el3(env)) { 3833 tlb_flush_by_mmuidx(cs, 3834 ARMMMUIdxBit_S1SE1 | 3835 ARMMMUIdxBit_S1SE0); 3836 } else { 3837 if (arm_feature(env, ARM_FEATURE_EL2)) { 3838 tlb_flush_by_mmuidx(cs, 3839 ARMMMUIdxBit_S12NSE1 | 3840 ARMMMUIdxBit_S12NSE0 | 3841 ARMMMUIdxBit_S2NS); 3842 } else { 3843 tlb_flush_by_mmuidx(cs, 3844 ARMMMUIdxBit_S12NSE1 | 3845 ARMMMUIdxBit_S12NSE0); 3846 } 3847 } 3848 } 3849 3850 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3851 uint64_t value) 3852 { 3853 ARMCPU *cpu = env_archcpu(env); 3854 CPUState *cs = CPU(cpu); 3855 3856 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 3857 } 3858 3859 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3860 uint64_t value) 3861 { 3862 ARMCPU *cpu = env_archcpu(env); 3863 CPUState *cs = CPU(cpu); 3864 3865 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3); 3866 } 3867 3868 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3869 uint64_t value) 3870 { 3871 /* Note that the 'ALL' scope must invalidate both stage 1 and 3872 * stage 2 translations, whereas most other scopes only invalidate 3873 * stage 1 translations. 3874 */ 3875 CPUState *cs = env_cpu(env); 3876 bool sec = arm_is_secure_below_el3(env); 3877 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); 3878 3879 if (sec) { 3880 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3881 ARMMMUIdxBit_S1SE1 | 3882 ARMMMUIdxBit_S1SE0); 3883 } else if (has_el2) { 3884 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3885 ARMMMUIdxBit_S12NSE1 | 3886 ARMMMUIdxBit_S12NSE0 | 3887 ARMMMUIdxBit_S2NS); 3888 } else { 3889 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3890 ARMMMUIdxBit_S12NSE1 | 3891 ARMMMUIdxBit_S12NSE0); 3892 } 3893 } 3894 3895 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3896 uint64_t value) 3897 { 3898 CPUState *cs = env_cpu(env); 3899 3900 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 3901 } 3902 3903 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3904 uint64_t value) 3905 { 3906 CPUState *cs = env_cpu(env); 3907 3908 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3); 3909 } 3910 3911 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3912 uint64_t value) 3913 { 3914 /* Invalidate by VA, EL2 3915 * Currently handles both VAE2 and VALE2, since we don't support 3916 * flush-last-level-only. 3917 */ 3918 ARMCPU *cpu = env_archcpu(env); 3919 CPUState *cs = CPU(cpu); 3920 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3921 3922 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 3923 } 3924 3925 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3926 uint64_t value) 3927 { 3928 /* Invalidate by VA, EL3 3929 * Currently handles both VAE3 and VALE3, since we don't support 3930 * flush-last-level-only. 3931 */ 3932 ARMCPU *cpu = env_archcpu(env); 3933 CPUState *cs = CPU(cpu); 3934 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3935 3936 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3); 3937 } 3938 3939 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3940 uint64_t value) 3941 { 3942 ARMCPU *cpu = env_archcpu(env); 3943 CPUState *cs = CPU(cpu); 3944 bool sec = arm_is_secure_below_el3(env); 3945 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3946 3947 if (sec) { 3948 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3949 ARMMMUIdxBit_S1SE1 | 3950 ARMMMUIdxBit_S1SE0); 3951 } else { 3952 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3953 ARMMMUIdxBit_S12NSE1 | 3954 ARMMMUIdxBit_S12NSE0); 3955 } 3956 } 3957 3958 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3959 uint64_t value) 3960 { 3961 /* Invalidate by VA, EL1&0 (AArch64 version). 3962 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 3963 * since we don't support flush-for-specific-ASID-only or 3964 * flush-last-level-only. 3965 */ 3966 ARMCPU *cpu = env_archcpu(env); 3967 CPUState *cs = CPU(cpu); 3968 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3969 3970 if (tlb_force_broadcast(env)) { 3971 tlbi_aa64_vae1is_write(env, NULL, value); 3972 return; 3973 } 3974 3975 if (arm_is_secure_below_el3(env)) { 3976 tlb_flush_page_by_mmuidx(cs, pageaddr, 3977 ARMMMUIdxBit_S1SE1 | 3978 ARMMMUIdxBit_S1SE0); 3979 } else { 3980 tlb_flush_page_by_mmuidx(cs, pageaddr, 3981 ARMMMUIdxBit_S12NSE1 | 3982 ARMMMUIdxBit_S12NSE0); 3983 } 3984 } 3985 3986 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3987 uint64_t value) 3988 { 3989 CPUState *cs = env_cpu(env); 3990 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3991 3992 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3993 ARMMMUIdxBit_S1E2); 3994 } 3995 3996 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3997 uint64_t value) 3998 { 3999 CPUState *cs = env_cpu(env); 4000 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4001 4002 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4003 ARMMMUIdxBit_S1E3); 4004 } 4005 4006 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4007 uint64_t value) 4008 { 4009 /* Invalidate by IPA. This has to invalidate any structures that 4010 * contain only stage 2 translation information, but does not need 4011 * to apply to structures that contain combined stage 1 and stage 2 4012 * translation information. 4013 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 4014 */ 4015 ARMCPU *cpu = env_archcpu(env); 4016 CPUState *cs = CPU(cpu); 4017 uint64_t pageaddr; 4018 4019 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 4020 return; 4021 } 4022 4023 pageaddr = sextract64(value << 12, 0, 48); 4024 4025 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 4026 } 4027 4028 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4029 uint64_t value) 4030 { 4031 CPUState *cs = env_cpu(env); 4032 uint64_t pageaddr; 4033 4034 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 4035 return; 4036 } 4037 4038 pageaddr = sextract64(value << 12, 0, 48); 4039 4040 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4041 ARMMMUIdxBit_S2NS); 4042 } 4043 4044 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 4045 bool isread) 4046 { 4047 /* We don't implement EL2, so the only control on DC ZVA is the 4048 * bit in the SCTLR which can prohibit access for EL0. 4049 */ 4050 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 4051 return CP_ACCESS_TRAP; 4052 } 4053 return CP_ACCESS_OK; 4054 } 4055 4056 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 4057 { 4058 ARMCPU *cpu = env_archcpu(env); 4059 int dzp_bit = 1 << 4; 4060 4061 /* DZP indicates whether DC ZVA access is allowed */ 4062 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 4063 dzp_bit = 0; 4064 } 4065 return cpu->dcz_blocksize | dzp_bit; 4066 } 4067 4068 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4069 bool isread) 4070 { 4071 if (!(env->pstate & PSTATE_SP)) { 4072 /* Access to SP_EL0 is undefined if it's being used as 4073 * the stack pointer. 4074 */ 4075 return CP_ACCESS_TRAP_UNCATEGORIZED; 4076 } 4077 return CP_ACCESS_OK; 4078 } 4079 4080 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 4081 { 4082 return env->pstate & PSTATE_SP; 4083 } 4084 4085 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 4086 { 4087 update_spsel(env, val); 4088 } 4089 4090 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4091 uint64_t value) 4092 { 4093 ARMCPU *cpu = env_archcpu(env); 4094 4095 if (raw_read(env, ri) == value) { 4096 /* Skip the TLB flush if nothing actually changed; Linux likes 4097 * to do a lot of pointless SCTLR writes. 4098 */ 4099 return; 4100 } 4101 4102 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 4103 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 4104 value &= ~SCTLR_M; 4105 } 4106 4107 raw_write(env, ri, value); 4108 /* ??? Lots of these bits are not implemented. */ 4109 /* This may enable/disable the MMU, so do a TLB flush. */ 4110 tlb_flush(CPU(cpu)); 4111 } 4112 4113 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, 4114 bool isread) 4115 { 4116 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { 4117 return CP_ACCESS_TRAP_FP_EL2; 4118 } 4119 if (env->cp15.cptr_el[3] & CPTR_TFP) { 4120 return CP_ACCESS_TRAP_FP_EL3; 4121 } 4122 return CP_ACCESS_OK; 4123 } 4124 4125 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4126 uint64_t value) 4127 { 4128 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 4129 } 4130 4131 static const ARMCPRegInfo v8_cp_reginfo[] = { 4132 /* Minimal set of EL0-visible registers. This will need to be expanded 4133 * significantly for system emulation of AArch64 CPUs. 4134 */ 4135 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 4136 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 4137 .access = PL0_RW, .type = ARM_CP_NZCV }, 4138 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 4139 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 4140 .type = ARM_CP_NO_RAW, 4141 .access = PL0_RW, .accessfn = aa64_daif_access, 4142 .fieldoffset = offsetof(CPUARMState, daif), 4143 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 4144 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 4145 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 4146 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4147 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 4148 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 4149 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 4150 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4151 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 4152 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 4153 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 4154 .access = PL0_R, .type = ARM_CP_NO_RAW, 4155 .readfn = aa64_dczid_read }, 4156 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 4157 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 4158 .access = PL0_W, .type = ARM_CP_DC_ZVA, 4159 #ifndef CONFIG_USER_ONLY 4160 /* Avoid overhead of an access check that always passes in user-mode */ 4161 .accessfn = aa64_zva_access, 4162 #endif 4163 }, 4164 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 4165 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 4166 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 4167 /* Cache ops: all NOPs since we don't emulate caches */ 4168 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 4169 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4170 .access = PL1_W, .type = ARM_CP_NOP }, 4171 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 4172 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4173 .access = PL1_W, .type = ARM_CP_NOP }, 4174 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 4175 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 4176 .access = PL0_W, .type = ARM_CP_NOP, 4177 .accessfn = aa64_cacheop_access }, 4178 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 4179 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4180 .access = PL1_W, .type = ARM_CP_NOP }, 4181 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 4182 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4183 .access = PL1_W, .type = ARM_CP_NOP }, 4184 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 4185 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 4186 .access = PL0_W, .type = ARM_CP_NOP, 4187 .accessfn = aa64_cacheop_access }, 4188 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 4189 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4190 .access = PL1_W, .type = ARM_CP_NOP }, 4191 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 4192 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 4193 .access = PL0_W, .type = ARM_CP_NOP, 4194 .accessfn = aa64_cacheop_access }, 4195 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 4196 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 4197 .access = PL0_W, .type = ARM_CP_NOP, 4198 .accessfn = aa64_cacheop_access }, 4199 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 4200 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4201 .access = PL1_W, .type = ARM_CP_NOP }, 4202 /* TLBI operations */ 4203 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 4204 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 4205 .access = PL1_W, .type = ARM_CP_NO_RAW, 4206 .writefn = tlbi_aa64_vmalle1is_write }, 4207 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 4208 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 4209 .access = PL1_W, .type = ARM_CP_NO_RAW, 4210 .writefn = tlbi_aa64_vae1is_write }, 4211 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 4212 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 4213 .access = PL1_W, .type = ARM_CP_NO_RAW, 4214 .writefn = tlbi_aa64_vmalle1is_write }, 4215 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 4216 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 4217 .access = PL1_W, .type = ARM_CP_NO_RAW, 4218 .writefn = tlbi_aa64_vae1is_write }, 4219 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 4220 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4221 .access = PL1_W, .type = ARM_CP_NO_RAW, 4222 .writefn = tlbi_aa64_vae1is_write }, 4223 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 4224 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4225 .access = PL1_W, .type = ARM_CP_NO_RAW, 4226 .writefn = tlbi_aa64_vae1is_write }, 4227 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 4228 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 4229 .access = PL1_W, .type = ARM_CP_NO_RAW, 4230 .writefn = tlbi_aa64_vmalle1_write }, 4231 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 4232 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 4233 .access = PL1_W, .type = ARM_CP_NO_RAW, 4234 .writefn = tlbi_aa64_vae1_write }, 4235 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 4236 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 4237 .access = PL1_W, .type = ARM_CP_NO_RAW, 4238 .writefn = tlbi_aa64_vmalle1_write }, 4239 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 4240 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 4241 .access = PL1_W, .type = ARM_CP_NO_RAW, 4242 .writefn = tlbi_aa64_vae1_write }, 4243 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 4244 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4245 .access = PL1_W, .type = ARM_CP_NO_RAW, 4246 .writefn = tlbi_aa64_vae1_write }, 4247 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 4248 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4249 .access = PL1_W, .type = ARM_CP_NO_RAW, 4250 .writefn = tlbi_aa64_vae1_write }, 4251 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 4252 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4253 .access = PL2_W, .type = ARM_CP_NO_RAW, 4254 .writefn = tlbi_aa64_ipas2e1is_write }, 4255 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 4256 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4257 .access = PL2_W, .type = ARM_CP_NO_RAW, 4258 .writefn = tlbi_aa64_ipas2e1is_write }, 4259 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 4260 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 4261 .access = PL2_W, .type = ARM_CP_NO_RAW, 4262 .writefn = tlbi_aa64_alle1is_write }, 4263 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 4264 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 4265 .access = PL2_W, .type = ARM_CP_NO_RAW, 4266 .writefn = tlbi_aa64_alle1is_write }, 4267 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 4268 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4269 .access = PL2_W, .type = ARM_CP_NO_RAW, 4270 .writefn = tlbi_aa64_ipas2e1_write }, 4271 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 4272 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4273 .access = PL2_W, .type = ARM_CP_NO_RAW, 4274 .writefn = tlbi_aa64_ipas2e1_write }, 4275 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 4276 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 4277 .access = PL2_W, .type = ARM_CP_NO_RAW, 4278 .writefn = tlbi_aa64_alle1_write }, 4279 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 4280 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 4281 .access = PL2_W, .type = ARM_CP_NO_RAW, 4282 .writefn = tlbi_aa64_alle1is_write }, 4283 #ifndef CONFIG_USER_ONLY 4284 /* 64 bit address translation operations */ 4285 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 4286 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 4287 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4288 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 4289 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 4290 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4291 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 4292 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 4293 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4294 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 4295 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 4296 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4297 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 4298 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 4299 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4300 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 4301 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 4302 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4303 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 4304 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 4305 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4306 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 4307 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 4308 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4309 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 4310 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 4311 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 4312 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4313 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 4314 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 4315 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4316 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 4317 .type = ARM_CP_ALIAS, 4318 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 4319 .access = PL1_RW, .resetvalue = 0, 4320 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 4321 .writefn = par_write }, 4322 #endif 4323 /* TLB invalidate last level of translation table walk */ 4324 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4325 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 4326 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4327 .type = ARM_CP_NO_RAW, .access = PL1_W, 4328 .writefn = tlbimvaa_is_write }, 4329 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4330 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 4331 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4332 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 4333 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 4334 .type = ARM_CP_NO_RAW, .access = PL2_W, 4335 .writefn = tlbimva_hyp_write }, 4336 { .name = "TLBIMVALHIS", 4337 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 4338 .type = ARM_CP_NO_RAW, .access = PL2_W, 4339 .writefn = tlbimva_hyp_is_write }, 4340 { .name = "TLBIIPAS2", 4341 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4342 .type = ARM_CP_NO_RAW, .access = PL2_W, 4343 .writefn = tlbiipas2_write }, 4344 { .name = "TLBIIPAS2IS", 4345 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4346 .type = ARM_CP_NO_RAW, .access = PL2_W, 4347 .writefn = tlbiipas2_is_write }, 4348 { .name = "TLBIIPAS2L", 4349 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4350 .type = ARM_CP_NO_RAW, .access = PL2_W, 4351 .writefn = tlbiipas2_write }, 4352 { .name = "TLBIIPAS2LIS", 4353 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4354 .type = ARM_CP_NO_RAW, .access = PL2_W, 4355 .writefn = tlbiipas2_is_write }, 4356 /* 32 bit cache operations */ 4357 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4358 .type = ARM_CP_NOP, .access = PL1_W }, 4359 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 4360 .type = ARM_CP_NOP, .access = PL1_W }, 4361 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4362 .type = ARM_CP_NOP, .access = PL1_W }, 4363 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 4364 .type = ARM_CP_NOP, .access = PL1_W }, 4365 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 4366 .type = ARM_CP_NOP, .access = PL1_W }, 4367 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 4368 .type = ARM_CP_NOP, .access = PL1_W }, 4369 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4370 .type = ARM_CP_NOP, .access = PL1_W }, 4371 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4372 .type = ARM_CP_NOP, .access = PL1_W }, 4373 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 4374 .type = ARM_CP_NOP, .access = PL1_W }, 4375 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4376 .type = ARM_CP_NOP, .access = PL1_W }, 4377 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 4378 .type = ARM_CP_NOP, .access = PL1_W }, 4379 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 4380 .type = ARM_CP_NOP, .access = PL1_W }, 4381 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4382 .type = ARM_CP_NOP, .access = PL1_W }, 4383 /* MMU Domain access control / MPU write buffer control */ 4384 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 4385 .access = PL1_RW, .resetvalue = 0, 4386 .writefn = dacr_write, .raw_writefn = raw_write, 4387 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 4388 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 4389 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 4390 .type = ARM_CP_ALIAS, 4391 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 4392 .access = PL1_RW, 4393 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 4394 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 4395 .type = ARM_CP_ALIAS, 4396 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 4397 .access = PL1_RW, 4398 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 4399 /* We rely on the access checks not allowing the guest to write to the 4400 * state field when SPSel indicates that it's being used as the stack 4401 * pointer. 4402 */ 4403 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 4404 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 4405 .access = PL1_RW, .accessfn = sp_el0_access, 4406 .type = ARM_CP_ALIAS, 4407 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 4408 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 4409 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 4410 .access = PL2_RW, .type = ARM_CP_ALIAS, 4411 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 4412 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 4413 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 4414 .type = ARM_CP_NO_RAW, 4415 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 4416 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 4417 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 4418 .type = ARM_CP_ALIAS, 4419 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), 4420 .access = PL2_RW, .accessfn = fpexc32_access }, 4421 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 4422 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 4423 .access = PL2_RW, .resetvalue = 0, 4424 .writefn = dacr_write, .raw_writefn = raw_write, 4425 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 4426 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 4427 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 4428 .access = PL2_RW, .resetvalue = 0, 4429 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 4430 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 4431 .type = ARM_CP_ALIAS, 4432 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 4433 .access = PL2_RW, 4434 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 4435 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 4436 .type = ARM_CP_ALIAS, 4437 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 4438 .access = PL2_RW, 4439 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 4440 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 4441 .type = ARM_CP_ALIAS, 4442 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 4443 .access = PL2_RW, 4444 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 4445 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 4446 .type = ARM_CP_ALIAS, 4447 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 4448 .access = PL2_RW, 4449 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 4450 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 4451 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 4452 .resetvalue = 0, 4453 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 4454 { .name = "SDCR", .type = ARM_CP_ALIAS, 4455 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 4456 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4457 .writefn = sdcr_write, 4458 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 4459 REGINFO_SENTINEL 4460 }; 4461 4462 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 4463 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 4464 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 4465 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 4466 .access = PL2_RW, 4467 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 4468 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, 4469 .type = ARM_CP_NO_RAW, 4470 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 4471 .access = PL2_RW, 4472 .type = ARM_CP_CONST, .resetvalue = 0 }, 4473 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 4474 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 4475 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4476 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 4477 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 4478 .access = PL2_RW, 4479 .type = ARM_CP_CONST, .resetvalue = 0 }, 4480 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 4481 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 4482 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4483 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 4484 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 4485 .access = PL2_RW, .type = ARM_CP_CONST, 4486 .resetvalue = 0 }, 4487 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 4488 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 4489 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4490 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 4491 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 4492 .access = PL2_RW, .type = ARM_CP_CONST, 4493 .resetvalue = 0 }, 4494 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 4495 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 4496 .access = PL2_RW, .type = ARM_CP_CONST, 4497 .resetvalue = 0 }, 4498 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 4499 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 4500 .access = PL2_RW, .type = ARM_CP_CONST, 4501 .resetvalue = 0 }, 4502 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 4503 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 4504 .access = PL2_RW, .type = ARM_CP_CONST, 4505 .resetvalue = 0 }, 4506 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 4507 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 4508 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4509 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 4510 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 4511 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 4512 .type = ARM_CP_CONST, .resetvalue = 0 }, 4513 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 4514 .cp = 15, .opc1 = 6, .crm = 2, 4515 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4516 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 4517 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 4518 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 4519 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4520 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 4521 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 4522 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4523 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 4524 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 4525 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4526 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 4527 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 4528 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4529 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 4530 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 4531 .resetvalue = 0 }, 4532 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 4533 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 4534 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4535 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 4536 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 4537 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4538 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 4539 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 4540 .resetvalue = 0 }, 4541 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 4542 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 4543 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4544 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 4545 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 4546 .resetvalue = 0 }, 4547 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 4548 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 4549 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4550 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 4551 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 4552 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4553 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 4554 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 4555 .access = PL2_RW, .accessfn = access_tda, 4556 .type = ARM_CP_CONST, .resetvalue = 0 }, 4557 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 4558 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4559 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 4560 .type = ARM_CP_CONST, .resetvalue = 0 }, 4561 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 4562 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 4563 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4564 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 4565 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 4566 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4567 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 4568 .type = ARM_CP_CONST, 4569 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 4570 .access = PL2_RW, .resetvalue = 0 }, 4571 REGINFO_SENTINEL 4572 }; 4573 4574 /* Ditto, but for registers which exist in ARMv8 but not v7 */ 4575 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { 4576 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 4577 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 4578 .access = PL2_RW, 4579 .type = ARM_CP_CONST, .resetvalue = 0 }, 4580 REGINFO_SENTINEL 4581 }; 4582 4583 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 4584 { 4585 ARMCPU *cpu = env_archcpu(env); 4586 uint64_t valid_mask = HCR_MASK; 4587 4588 if (arm_feature(env, ARM_FEATURE_EL3)) { 4589 valid_mask &= ~HCR_HCD; 4590 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 4591 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 4592 * However, if we're using the SMC PSCI conduit then QEMU is 4593 * effectively acting like EL3 firmware and so the guest at 4594 * EL2 should retain the ability to prevent EL1 from being 4595 * able to make SMC calls into the ersatz firmware, so in 4596 * that case HCR.TSC should be read/write. 4597 */ 4598 valid_mask &= ~HCR_TSC; 4599 } 4600 if (cpu_isar_feature(aa64_lor, cpu)) { 4601 valid_mask |= HCR_TLOR; 4602 } 4603 if (cpu_isar_feature(aa64_pauth, cpu)) { 4604 valid_mask |= HCR_API | HCR_APK; 4605 } 4606 4607 /* Clear RES0 bits. */ 4608 value &= valid_mask; 4609 4610 /* These bits change the MMU setup: 4611 * HCR_VM enables stage 2 translation 4612 * HCR_PTW forbids certain page-table setups 4613 * HCR_DC Disables stage1 and enables stage2 translation 4614 */ 4615 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { 4616 tlb_flush(CPU(cpu)); 4617 } 4618 env->cp15.hcr_el2 = value; 4619 4620 /* 4621 * Updates to VI and VF require us to update the status of 4622 * virtual interrupts, which are the logical OR of these bits 4623 * and the state of the input lines from the GIC. (This requires 4624 * that we have the iothread lock, which is done by marking the 4625 * reginfo structs as ARM_CP_IO.) 4626 * Note that if a write to HCR pends a VIRQ or VFIQ it is never 4627 * possible for it to be taken immediately, because VIRQ and 4628 * VFIQ are masked unless running at EL0 or EL1, and HCR 4629 * can only be written at EL2. 4630 */ 4631 g_assert(qemu_mutex_iothread_locked()); 4632 arm_cpu_update_virq(cpu); 4633 arm_cpu_update_vfiq(cpu); 4634 } 4635 4636 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, 4637 uint64_t value) 4638 { 4639 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ 4640 value = deposit64(env->cp15.hcr_el2, 32, 32, value); 4641 hcr_write(env, NULL, value); 4642 } 4643 4644 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, 4645 uint64_t value) 4646 { 4647 /* Handle HCR write, i.e. write to low half of HCR_EL2 */ 4648 value = deposit64(env->cp15.hcr_el2, 0, 32, value); 4649 hcr_write(env, NULL, value); 4650 } 4651 4652 /* 4653 * Return the effective value of HCR_EL2. 4654 * Bits that are not included here: 4655 * RW (read from SCR_EL3.RW as needed) 4656 */ 4657 uint64_t arm_hcr_el2_eff(CPUARMState *env) 4658 { 4659 uint64_t ret = env->cp15.hcr_el2; 4660 4661 if (arm_is_secure_below_el3(env)) { 4662 /* 4663 * "This register has no effect if EL2 is not enabled in the 4664 * current Security state". This is ARMv8.4-SecEL2 speak for 4665 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). 4666 * 4667 * Prior to that, the language was "In an implementation that 4668 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves 4669 * as if this field is 0 for all purposes other than a direct 4670 * read or write access of HCR_EL2". With lots of enumeration 4671 * on a per-field basis. In current QEMU, this is condition 4672 * is arm_is_secure_below_el3. 4673 * 4674 * Since the v8.4 language applies to the entire register, and 4675 * appears to be backward compatible, use that. 4676 */ 4677 ret = 0; 4678 } else if (ret & HCR_TGE) { 4679 /* These bits are up-to-date as of ARMv8.4. */ 4680 if (ret & HCR_E2H) { 4681 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO | 4682 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE | 4683 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU | 4684 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE); 4685 } else { 4686 ret |= HCR_FMO | HCR_IMO | HCR_AMO; 4687 } 4688 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE | 4689 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR | 4690 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM | 4691 HCR_TLOR); 4692 } 4693 4694 return ret; 4695 } 4696 4697 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4698 uint64_t value) 4699 { 4700 /* 4701 * For A-profile AArch32 EL3, if NSACR.CP10 4702 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 4703 */ 4704 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 4705 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 4706 value &= ~(0x3 << 10); 4707 value |= env->cp15.cptr_el[2] & (0x3 << 10); 4708 } 4709 env->cp15.cptr_el[2] = value; 4710 } 4711 4712 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) 4713 { 4714 /* 4715 * For A-profile AArch32 EL3, if NSACR.CP10 4716 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 4717 */ 4718 uint64_t value = env->cp15.cptr_el[2]; 4719 4720 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 4721 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 4722 value |= 0x3 << 10; 4723 } 4724 return value; 4725 } 4726 4727 static const ARMCPRegInfo el2_cp_reginfo[] = { 4728 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 4729 .type = ARM_CP_IO, 4730 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 4731 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 4732 .writefn = hcr_write }, 4733 { .name = "HCR", .state = ARM_CP_STATE_AA32, 4734 .type = ARM_CP_ALIAS | ARM_CP_IO, 4735 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 4736 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 4737 .writefn = hcr_writelow }, 4738 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 4739 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 4740 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4741 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 4742 .type = ARM_CP_ALIAS, 4743 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 4744 .access = PL2_RW, 4745 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 4746 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 4747 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 4748 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 4749 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 4750 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 4751 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 4752 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 4753 .type = ARM_CP_ALIAS, 4754 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 4755 .access = PL2_RW, 4756 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, 4757 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 4758 .type = ARM_CP_ALIAS, 4759 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 4760 .access = PL2_RW, 4761 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 4762 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 4763 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 4764 .access = PL2_RW, .writefn = vbar_write, 4765 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 4766 .resetvalue = 0 }, 4767 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 4768 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 4769 .access = PL3_RW, .type = ARM_CP_ALIAS, 4770 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 4771 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 4772 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 4773 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 4774 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]), 4775 .readfn = cptr_el2_read, .writefn = cptr_el2_write }, 4776 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 4777 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 4778 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 4779 .resetvalue = 0 }, 4780 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 4781 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 4782 .access = PL2_RW, .type = ARM_CP_ALIAS, 4783 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 4784 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 4785 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 4786 .access = PL2_RW, .type = ARM_CP_CONST, 4787 .resetvalue = 0 }, 4788 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 4789 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 4790 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 4791 .access = PL2_RW, .type = ARM_CP_CONST, 4792 .resetvalue = 0 }, 4793 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 4794 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 4795 .access = PL2_RW, .type = ARM_CP_CONST, 4796 .resetvalue = 0 }, 4797 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 4798 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 4799 .access = PL2_RW, .type = ARM_CP_CONST, 4800 .resetvalue = 0 }, 4801 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 4802 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 4803 .access = PL2_RW, 4804 /* no .writefn needed as this can't cause an ASID change; 4805 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 4806 */ 4807 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 4808 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 4809 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 4810 .type = ARM_CP_ALIAS, 4811 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4812 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 4813 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 4814 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 4815 .access = PL2_RW, 4816 /* no .writefn needed as this can't cause an ASID change; 4817 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 4818 */ 4819 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 4820 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 4821 .cp = 15, .opc1 = 6, .crm = 2, 4822 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4823 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4824 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 4825 .writefn = vttbr_write }, 4826 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 4827 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 4828 .access = PL2_RW, .writefn = vttbr_write, 4829 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 4830 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 4831 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 4832 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 4833 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 4834 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 4835 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 4836 .access = PL2_RW, .resetvalue = 0, 4837 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 4838 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 4839 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 4840 .access = PL2_RW, .resetvalue = 0, 4841 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 4842 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 4843 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4844 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 4845 { .name = "TLBIALLNSNH", 4846 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 4847 .type = ARM_CP_NO_RAW, .access = PL2_W, 4848 .writefn = tlbiall_nsnh_write }, 4849 { .name = "TLBIALLNSNHIS", 4850 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 4851 .type = ARM_CP_NO_RAW, .access = PL2_W, 4852 .writefn = tlbiall_nsnh_is_write }, 4853 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 4854 .type = ARM_CP_NO_RAW, .access = PL2_W, 4855 .writefn = tlbiall_hyp_write }, 4856 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 4857 .type = ARM_CP_NO_RAW, .access = PL2_W, 4858 .writefn = tlbiall_hyp_is_write }, 4859 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 4860 .type = ARM_CP_NO_RAW, .access = PL2_W, 4861 .writefn = tlbimva_hyp_write }, 4862 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 4863 .type = ARM_CP_NO_RAW, .access = PL2_W, 4864 .writefn = tlbimva_hyp_is_write }, 4865 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 4866 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 4867 .type = ARM_CP_NO_RAW, .access = PL2_W, 4868 .writefn = tlbi_aa64_alle2_write }, 4869 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 4870 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 4871 .type = ARM_CP_NO_RAW, .access = PL2_W, 4872 .writefn = tlbi_aa64_vae2_write }, 4873 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 4874 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 4875 .access = PL2_W, .type = ARM_CP_NO_RAW, 4876 .writefn = tlbi_aa64_vae2_write }, 4877 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 4878 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 4879 .access = PL2_W, .type = ARM_CP_NO_RAW, 4880 .writefn = tlbi_aa64_alle2is_write }, 4881 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 4882 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 4883 .type = ARM_CP_NO_RAW, .access = PL2_W, 4884 .writefn = tlbi_aa64_vae2is_write }, 4885 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 4886 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 4887 .access = PL2_W, .type = ARM_CP_NO_RAW, 4888 .writefn = tlbi_aa64_vae2is_write }, 4889 #ifndef CONFIG_USER_ONLY 4890 /* Unlike the other EL2-related AT operations, these must 4891 * UNDEF from EL3 if EL2 is not implemented, which is why we 4892 * define them here rather than with the rest of the AT ops. 4893 */ 4894 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 4895 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 4896 .access = PL2_W, .accessfn = at_s1e2_access, 4897 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4898 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 4899 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 4900 .access = PL2_W, .accessfn = at_s1e2_access, 4901 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4902 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 4903 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 4904 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 4905 * to behave as if SCR.NS was 1. 4906 */ 4907 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 4908 .access = PL2_W, 4909 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 4910 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 4911 .access = PL2_W, 4912 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 4913 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 4914 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 4915 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 4916 * reset values as IMPDEF. We choose to reset to 3 to comply with 4917 * both ARMv7 and ARMv8. 4918 */ 4919 .access = PL2_RW, .resetvalue = 3, 4920 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 4921 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 4922 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 4923 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 4924 .writefn = gt_cntvoff_write, 4925 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 4926 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 4927 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 4928 .writefn = gt_cntvoff_write, 4929 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 4930 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 4931 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 4932 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 4933 .type = ARM_CP_IO, .access = PL2_RW, 4934 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4935 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 4936 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 4937 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 4938 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4939 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 4940 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 4941 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 4942 .resetfn = gt_hyp_timer_reset, 4943 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 4944 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 4945 .type = ARM_CP_IO, 4946 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 4947 .access = PL2_RW, 4948 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 4949 .resetvalue = 0, 4950 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 4951 #endif 4952 /* The only field of MDCR_EL2 that has a defined architectural reset value 4953 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we 4954 * don't implement any PMU event counters, so using zero as a reset 4955 * value for MDCR_EL2 is okay 4956 */ 4957 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 4958 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 4959 .access = PL2_RW, .resetvalue = 0, 4960 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 4961 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 4962 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4963 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4964 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4965 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 4966 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4967 .access = PL2_RW, 4968 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4969 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 4970 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 4971 .access = PL2_RW, 4972 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 4973 REGINFO_SENTINEL 4974 }; 4975 4976 static const ARMCPRegInfo el2_v8_cp_reginfo[] = { 4977 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 4978 .type = ARM_CP_ALIAS | ARM_CP_IO, 4979 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 4980 .access = PL2_RW, 4981 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), 4982 .writefn = hcr_writehigh }, 4983 REGINFO_SENTINEL 4984 }; 4985 4986 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 4987 bool isread) 4988 { 4989 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 4990 * At Secure EL1 it traps to EL3. 4991 */ 4992 if (arm_current_el(env) == 3) { 4993 return CP_ACCESS_OK; 4994 } 4995 if (arm_is_secure_below_el3(env)) { 4996 return CP_ACCESS_TRAP_EL3; 4997 } 4998 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 4999 if (isread) { 5000 return CP_ACCESS_OK; 5001 } 5002 return CP_ACCESS_TRAP_UNCATEGORIZED; 5003 } 5004 5005 static const ARMCPRegInfo el3_cp_reginfo[] = { 5006 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 5007 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 5008 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 5009 .resetvalue = 0, .writefn = scr_write }, 5010 { .name = "SCR", .type = ARM_CP_ALIAS, 5011 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 5012 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5013 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 5014 .writefn = scr_write }, 5015 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 5016 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 5017 .access = PL3_RW, .resetvalue = 0, 5018 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 5019 { .name = "SDER", 5020 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 5021 .access = PL3_RW, .resetvalue = 0, 5022 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 5023 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 5024 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5025 .writefn = vbar_write, .resetvalue = 0, 5026 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 5027 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 5028 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 5029 .access = PL3_RW, .resetvalue = 0, 5030 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 5031 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 5032 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 5033 .access = PL3_RW, 5034 /* no .writefn needed as this can't cause an ASID change; 5035 * we must provide a .raw_writefn and .resetfn because we handle 5036 * reset and migration for the AArch32 TTBCR(S), which might be 5037 * using mask and base_mask. 5038 */ 5039 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 5040 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 5041 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 5042 .type = ARM_CP_ALIAS, 5043 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 5044 .access = PL3_RW, 5045 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 5046 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 5047 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 5048 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 5049 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 5050 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 5051 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 5052 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 5053 .type = ARM_CP_ALIAS, 5054 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 5055 .access = PL3_RW, 5056 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 5057 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 5058 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 5059 .access = PL3_RW, .writefn = vbar_write, 5060 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 5061 .resetvalue = 0 }, 5062 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 5063 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 5064 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 5065 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 5066 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 5067 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 5068 .access = PL3_RW, .resetvalue = 0, 5069 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 5070 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 5071 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 5072 .access = PL3_RW, .type = ARM_CP_CONST, 5073 .resetvalue = 0 }, 5074 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 5075 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 5076 .access = PL3_RW, .type = ARM_CP_CONST, 5077 .resetvalue = 0 }, 5078 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 5079 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 5080 .access = PL3_RW, .type = ARM_CP_CONST, 5081 .resetvalue = 0 }, 5082 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 5083 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 5084 .access = PL3_W, .type = ARM_CP_NO_RAW, 5085 .writefn = tlbi_aa64_alle3is_write }, 5086 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 5087 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 5088 .access = PL3_W, .type = ARM_CP_NO_RAW, 5089 .writefn = tlbi_aa64_vae3is_write }, 5090 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 5091 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 5092 .access = PL3_W, .type = ARM_CP_NO_RAW, 5093 .writefn = tlbi_aa64_vae3is_write }, 5094 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 5095 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 5096 .access = PL3_W, .type = ARM_CP_NO_RAW, 5097 .writefn = tlbi_aa64_alle3_write }, 5098 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 5099 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 5100 .access = PL3_W, .type = ARM_CP_NO_RAW, 5101 .writefn = tlbi_aa64_vae3_write }, 5102 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 5103 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 5104 .access = PL3_W, .type = ARM_CP_NO_RAW, 5105 .writefn = tlbi_aa64_vae3_write }, 5106 REGINFO_SENTINEL 5107 }; 5108 5109 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 5110 bool isread) 5111 { 5112 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64, 5113 * but the AArch32 CTR has its own reginfo struct) 5114 */ 5115 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 5116 return CP_ACCESS_TRAP; 5117 } 5118 return CP_ACCESS_OK; 5119 } 5120 5121 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 5122 uint64_t value) 5123 { 5124 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 5125 * read via a bit in OSLSR_EL1. 5126 */ 5127 int oslock; 5128 5129 if (ri->state == ARM_CP_STATE_AA32) { 5130 oslock = (value == 0xC5ACCE55); 5131 } else { 5132 oslock = value & 1; 5133 } 5134 5135 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 5136 } 5137 5138 static const ARMCPRegInfo debug_cp_reginfo[] = { 5139 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 5140 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 5141 * unlike DBGDRAR it is never accessible from EL0. 5142 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 5143 * accessor. 5144 */ 5145 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 5146 .access = PL0_R, .accessfn = access_tdra, 5147 .type = ARM_CP_CONST, .resetvalue = 0 }, 5148 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 5149 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 5150 .access = PL1_R, .accessfn = access_tdra, 5151 .type = ARM_CP_CONST, .resetvalue = 0 }, 5152 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 5153 .access = PL0_R, .accessfn = access_tdra, 5154 .type = ARM_CP_CONST, .resetvalue = 0 }, 5155 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 5156 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 5157 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 5158 .access = PL1_RW, .accessfn = access_tda, 5159 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 5160 .resetvalue = 0 }, 5161 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. 5162 * We don't implement the configurable EL0 access. 5163 */ 5164 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, 5165 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 5166 .type = ARM_CP_ALIAS, 5167 .access = PL1_R, .accessfn = access_tda, 5168 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 5169 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 5170 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 5171 .access = PL1_W, .type = ARM_CP_NO_RAW, 5172 .accessfn = access_tdosa, 5173 .writefn = oslar_write }, 5174 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 5175 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 5176 .access = PL1_R, .resetvalue = 10, 5177 .accessfn = access_tdosa, 5178 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 5179 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 5180 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 5181 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 5182 .access = PL1_RW, .accessfn = access_tdosa, 5183 .type = ARM_CP_NOP }, 5184 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 5185 * implement vector catch debug events yet. 5186 */ 5187 { .name = "DBGVCR", 5188 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 5189 .access = PL1_RW, .accessfn = access_tda, 5190 .type = ARM_CP_NOP }, 5191 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 5192 * to save and restore a 32-bit guest's DBGVCR) 5193 */ 5194 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 5195 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 5196 .access = PL2_RW, .accessfn = access_tda, 5197 .type = ARM_CP_NOP }, 5198 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 5199 * Channel but Linux may try to access this register. The 32-bit 5200 * alias is DBGDCCINT. 5201 */ 5202 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 5203 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 5204 .access = PL1_RW, .accessfn = access_tda, 5205 .type = ARM_CP_NOP }, 5206 REGINFO_SENTINEL 5207 }; 5208 5209 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 5210 /* 64 bit access versions of the (dummy) debug registers */ 5211 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 5212 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 5213 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 5214 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 5215 REGINFO_SENTINEL 5216 }; 5217 5218 /* Return the exception level to which exceptions should be taken 5219 * via SVEAccessTrap. If an exception should be routed through 5220 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should 5221 * take care of raising that exception. 5222 * C.f. the ARM pseudocode function CheckSVEEnabled. 5223 */ 5224 int sve_exception_el(CPUARMState *env, int el) 5225 { 5226 #ifndef CONFIG_USER_ONLY 5227 if (el <= 1) { 5228 bool disabled = false; 5229 5230 /* The CPACR.ZEN controls traps to EL1: 5231 * 0, 2 : trap EL0 and EL1 accesses 5232 * 1 : trap only EL0 accesses 5233 * 3 : trap no accesses 5234 */ 5235 if (!extract32(env->cp15.cpacr_el1, 16, 1)) { 5236 disabled = true; 5237 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) { 5238 disabled = el == 0; 5239 } 5240 if (disabled) { 5241 /* route_to_el2 */ 5242 return (arm_feature(env, ARM_FEATURE_EL2) 5243 && (arm_hcr_el2_eff(env) & HCR_TGE) ? 2 : 1); 5244 } 5245 5246 /* Check CPACR.FPEN. */ 5247 if (!extract32(env->cp15.cpacr_el1, 20, 1)) { 5248 disabled = true; 5249 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) { 5250 disabled = el == 0; 5251 } 5252 if (disabled) { 5253 return 0; 5254 } 5255 } 5256 5257 /* CPTR_EL2. Since TZ and TFP are positive, 5258 * they will be zero when EL2 is not present. 5259 */ 5260 if (el <= 2 && !arm_is_secure_below_el3(env)) { 5261 if (env->cp15.cptr_el[2] & CPTR_TZ) { 5262 return 2; 5263 } 5264 if (env->cp15.cptr_el[2] & CPTR_TFP) { 5265 return 0; 5266 } 5267 } 5268 5269 /* CPTR_EL3. Since EZ is negative we must check for EL3. */ 5270 if (arm_feature(env, ARM_FEATURE_EL3) 5271 && !(env->cp15.cptr_el[3] & CPTR_EZ)) { 5272 return 3; 5273 } 5274 #endif 5275 return 0; 5276 } 5277 5278 /* 5279 * Given that SVE is enabled, return the vector length for EL. 5280 */ 5281 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) 5282 { 5283 ARMCPU *cpu = env_archcpu(env); 5284 uint32_t zcr_len = cpu->sve_max_vq - 1; 5285 5286 if (el <= 1) { 5287 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); 5288 } 5289 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) { 5290 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 5291 } 5292 if (arm_feature(env, ARM_FEATURE_EL3)) { 5293 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 5294 } 5295 return zcr_len; 5296 } 5297 5298 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5299 uint64_t value) 5300 { 5301 int cur_el = arm_current_el(env); 5302 int old_len = sve_zcr_len_for_el(env, cur_el); 5303 int new_len; 5304 5305 /* Bits other than [3:0] are RAZ/WI. */ 5306 raw_write(env, ri, value & 0xf); 5307 5308 /* 5309 * Because we arrived here, we know both FP and SVE are enabled; 5310 * otherwise we would have trapped access to the ZCR_ELn register. 5311 */ 5312 new_len = sve_zcr_len_for_el(env, cur_el); 5313 if (new_len < old_len) { 5314 aarch64_sve_narrow_vq(env, new_len + 1); 5315 } 5316 } 5317 5318 static const ARMCPRegInfo zcr_el1_reginfo = { 5319 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 5320 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 5321 .access = PL1_RW, .type = ARM_CP_SVE, 5322 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 5323 .writefn = zcr_write, .raw_writefn = raw_write 5324 }; 5325 5326 static const ARMCPRegInfo zcr_el2_reginfo = { 5327 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 5328 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 5329 .access = PL2_RW, .type = ARM_CP_SVE, 5330 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 5331 .writefn = zcr_write, .raw_writefn = raw_write 5332 }; 5333 5334 static const ARMCPRegInfo zcr_no_el2_reginfo = { 5335 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 5336 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 5337 .access = PL2_RW, .type = ARM_CP_SVE, 5338 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 5339 }; 5340 5341 static const ARMCPRegInfo zcr_el3_reginfo = { 5342 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 5343 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 5344 .access = PL3_RW, .type = ARM_CP_SVE, 5345 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 5346 .writefn = zcr_write, .raw_writefn = raw_write 5347 }; 5348 5349 void hw_watchpoint_update(ARMCPU *cpu, int n) 5350 { 5351 CPUARMState *env = &cpu->env; 5352 vaddr len = 0; 5353 vaddr wvr = env->cp15.dbgwvr[n]; 5354 uint64_t wcr = env->cp15.dbgwcr[n]; 5355 int mask; 5356 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 5357 5358 if (env->cpu_watchpoint[n]) { 5359 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 5360 env->cpu_watchpoint[n] = NULL; 5361 } 5362 5363 if (!extract64(wcr, 0, 1)) { 5364 /* E bit clear : watchpoint disabled */ 5365 return; 5366 } 5367 5368 switch (extract64(wcr, 3, 2)) { 5369 case 0: 5370 /* LSC 00 is reserved and must behave as if the wp is disabled */ 5371 return; 5372 case 1: 5373 flags |= BP_MEM_READ; 5374 break; 5375 case 2: 5376 flags |= BP_MEM_WRITE; 5377 break; 5378 case 3: 5379 flags |= BP_MEM_ACCESS; 5380 break; 5381 } 5382 5383 /* Attempts to use both MASK and BAS fields simultaneously are 5384 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 5385 * thus generating a watchpoint for every byte in the masked region. 5386 */ 5387 mask = extract64(wcr, 24, 4); 5388 if (mask == 1 || mask == 2) { 5389 /* Reserved values of MASK; we must act as if the mask value was 5390 * some non-reserved value, or as if the watchpoint were disabled. 5391 * We choose the latter. 5392 */ 5393 return; 5394 } else if (mask) { 5395 /* Watchpoint covers an aligned area up to 2GB in size */ 5396 len = 1ULL << mask; 5397 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 5398 * whether the watchpoint fires when the unmasked bits match; we opt 5399 * to generate the exceptions. 5400 */ 5401 wvr &= ~(len - 1); 5402 } else { 5403 /* Watchpoint covers bytes defined by the byte address select bits */ 5404 int bas = extract64(wcr, 5, 8); 5405 int basstart; 5406 5407 if (bas == 0) { 5408 /* This must act as if the watchpoint is disabled */ 5409 return; 5410 } 5411 5412 if (extract64(wvr, 2, 1)) { 5413 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 5414 * ignored, and BAS[3:0] define which bytes to watch. 5415 */ 5416 bas &= 0xf; 5417 } 5418 /* The BAS bits are supposed to be programmed to indicate a contiguous 5419 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 5420 * we fire for each byte in the word/doubleword addressed by the WVR. 5421 * We choose to ignore any non-zero bits after the first range of 1s. 5422 */ 5423 basstart = ctz32(bas); 5424 len = cto32(bas >> basstart); 5425 wvr += basstart; 5426 } 5427 5428 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 5429 &env->cpu_watchpoint[n]); 5430 } 5431 5432 void hw_watchpoint_update_all(ARMCPU *cpu) 5433 { 5434 int i; 5435 CPUARMState *env = &cpu->env; 5436 5437 /* Completely clear out existing QEMU watchpoints and our array, to 5438 * avoid possible stale entries following migration load. 5439 */ 5440 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 5441 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 5442 5443 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 5444 hw_watchpoint_update(cpu, i); 5445 } 5446 } 5447 5448 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5449 uint64_t value) 5450 { 5451 ARMCPU *cpu = env_archcpu(env); 5452 int i = ri->crm; 5453 5454 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the 5455 * register reads and behaves as if values written are sign extended. 5456 * Bits [1:0] are RES0. 5457 */ 5458 value = sextract64(value, 0, 49) & ~3ULL; 5459 5460 raw_write(env, ri, value); 5461 hw_watchpoint_update(cpu, i); 5462 } 5463 5464 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5465 uint64_t value) 5466 { 5467 ARMCPU *cpu = env_archcpu(env); 5468 int i = ri->crm; 5469 5470 raw_write(env, ri, value); 5471 hw_watchpoint_update(cpu, i); 5472 } 5473 5474 void hw_breakpoint_update(ARMCPU *cpu, int n) 5475 { 5476 CPUARMState *env = &cpu->env; 5477 uint64_t bvr = env->cp15.dbgbvr[n]; 5478 uint64_t bcr = env->cp15.dbgbcr[n]; 5479 vaddr addr; 5480 int bt; 5481 int flags = BP_CPU; 5482 5483 if (env->cpu_breakpoint[n]) { 5484 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 5485 env->cpu_breakpoint[n] = NULL; 5486 } 5487 5488 if (!extract64(bcr, 0, 1)) { 5489 /* E bit clear : watchpoint disabled */ 5490 return; 5491 } 5492 5493 bt = extract64(bcr, 20, 4); 5494 5495 switch (bt) { 5496 case 4: /* unlinked address mismatch (reserved if AArch64) */ 5497 case 5: /* linked address mismatch (reserved if AArch64) */ 5498 qemu_log_mask(LOG_UNIMP, 5499 "arm: address mismatch breakpoint types not implemented\n"); 5500 return; 5501 case 0: /* unlinked address match */ 5502 case 1: /* linked address match */ 5503 { 5504 /* Bits [63:49] are hardwired to the value of bit [48]; that is, 5505 * we behave as if the register was sign extended. Bits [1:0] are 5506 * RES0. The BAS field is used to allow setting breakpoints on 16 5507 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether 5508 * a bp will fire if the addresses covered by the bp and the addresses 5509 * covered by the insn overlap but the insn doesn't start at the 5510 * start of the bp address range. We choose to require the insn and 5511 * the bp to have the same address. The constraints on writing to 5512 * BAS enforced in dbgbcr_write mean we have only four cases: 5513 * 0b0000 => no breakpoint 5514 * 0b0011 => breakpoint on addr 5515 * 0b1100 => breakpoint on addr + 2 5516 * 0b1111 => breakpoint on addr 5517 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 5518 */ 5519 int bas = extract64(bcr, 5, 4); 5520 addr = sextract64(bvr, 0, 49) & ~3ULL; 5521 if (bas == 0) { 5522 return; 5523 } 5524 if (bas == 0xc) { 5525 addr += 2; 5526 } 5527 break; 5528 } 5529 case 2: /* unlinked context ID match */ 5530 case 8: /* unlinked VMID match (reserved if no EL2) */ 5531 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 5532 qemu_log_mask(LOG_UNIMP, 5533 "arm: unlinked context breakpoint types not implemented\n"); 5534 return; 5535 case 9: /* linked VMID match (reserved if no EL2) */ 5536 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 5537 case 3: /* linked context ID match */ 5538 default: 5539 /* We must generate no events for Linked context matches (unless 5540 * they are linked to by some other bp/wp, which is handled in 5541 * updates for the linking bp/wp). We choose to also generate no events 5542 * for reserved values. 5543 */ 5544 return; 5545 } 5546 5547 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 5548 } 5549 5550 void hw_breakpoint_update_all(ARMCPU *cpu) 5551 { 5552 int i; 5553 CPUARMState *env = &cpu->env; 5554 5555 /* Completely clear out existing QEMU breakpoints and our array, to 5556 * avoid possible stale entries following migration load. 5557 */ 5558 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 5559 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 5560 5561 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 5562 hw_breakpoint_update(cpu, i); 5563 } 5564 } 5565 5566 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5567 uint64_t value) 5568 { 5569 ARMCPU *cpu = env_archcpu(env); 5570 int i = ri->crm; 5571 5572 raw_write(env, ri, value); 5573 hw_breakpoint_update(cpu, i); 5574 } 5575 5576 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5577 uint64_t value) 5578 { 5579 ARMCPU *cpu = env_archcpu(env); 5580 int i = ri->crm; 5581 5582 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 5583 * copy of BAS[0]. 5584 */ 5585 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 5586 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 5587 5588 raw_write(env, ri, value); 5589 hw_breakpoint_update(cpu, i); 5590 } 5591 5592 static void define_debug_regs(ARMCPU *cpu) 5593 { 5594 /* Define v7 and v8 architectural debug registers. 5595 * These are just dummy implementations for now. 5596 */ 5597 int i; 5598 int wrps, brps, ctx_cmps; 5599 ARMCPRegInfo dbgdidr = { 5600 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 5601 .access = PL0_R, .accessfn = access_tda, 5602 .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr, 5603 }; 5604 5605 /* Note that all these register fields hold "number of Xs minus 1". */ 5606 brps = extract32(cpu->dbgdidr, 24, 4); 5607 wrps = extract32(cpu->dbgdidr, 28, 4); 5608 ctx_cmps = extract32(cpu->dbgdidr, 20, 4); 5609 5610 assert(ctx_cmps <= brps); 5611 5612 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties 5613 * of the debug registers such as number of breakpoints; 5614 * check that if they both exist then they agree. 5615 */ 5616 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 5617 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps); 5618 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps); 5619 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps); 5620 } 5621 5622 define_one_arm_cp_reg(cpu, &dbgdidr); 5623 define_arm_cp_regs(cpu, debug_cp_reginfo); 5624 5625 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 5626 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 5627 } 5628 5629 for (i = 0; i < brps + 1; i++) { 5630 ARMCPRegInfo dbgregs[] = { 5631 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 5632 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 5633 .access = PL1_RW, .accessfn = access_tda, 5634 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 5635 .writefn = dbgbvr_write, .raw_writefn = raw_write 5636 }, 5637 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 5638 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 5639 .access = PL1_RW, .accessfn = access_tda, 5640 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 5641 .writefn = dbgbcr_write, .raw_writefn = raw_write 5642 }, 5643 REGINFO_SENTINEL 5644 }; 5645 define_arm_cp_regs(cpu, dbgregs); 5646 } 5647 5648 for (i = 0; i < wrps + 1; i++) { 5649 ARMCPRegInfo dbgregs[] = { 5650 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 5651 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 5652 .access = PL1_RW, .accessfn = access_tda, 5653 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 5654 .writefn = dbgwvr_write, .raw_writefn = raw_write 5655 }, 5656 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 5657 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 5658 .access = PL1_RW, .accessfn = access_tda, 5659 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 5660 .writefn = dbgwcr_write, .raw_writefn = raw_write 5661 }, 5662 REGINFO_SENTINEL 5663 }; 5664 define_arm_cp_regs(cpu, dbgregs); 5665 } 5666 } 5667 5668 /* We don't know until after realize whether there's a GICv3 5669 * attached, and that is what registers the gicv3 sysregs. 5670 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 5671 * at runtime. 5672 */ 5673 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 5674 { 5675 ARMCPU *cpu = env_archcpu(env); 5676 uint64_t pfr1 = cpu->id_pfr1; 5677 5678 if (env->gicv3state) { 5679 pfr1 |= 1 << 28; 5680 } 5681 return pfr1; 5682 } 5683 5684 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 5685 { 5686 ARMCPU *cpu = env_archcpu(env); 5687 uint64_t pfr0 = cpu->isar.id_aa64pfr0; 5688 5689 if (env->gicv3state) { 5690 pfr0 |= 1 << 24; 5691 } 5692 return pfr0; 5693 } 5694 5695 /* Shared logic between LORID and the rest of the LOR* registers. 5696 * Secure state has already been delt with. 5697 */ 5698 static CPAccessResult access_lor_ns(CPUARMState *env) 5699 { 5700 int el = arm_current_el(env); 5701 5702 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { 5703 return CP_ACCESS_TRAP_EL2; 5704 } 5705 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { 5706 return CP_ACCESS_TRAP_EL3; 5707 } 5708 return CP_ACCESS_OK; 5709 } 5710 5711 static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri, 5712 bool isread) 5713 { 5714 if (arm_is_secure_below_el3(env)) { 5715 /* Access ok in secure mode. */ 5716 return CP_ACCESS_OK; 5717 } 5718 return access_lor_ns(env); 5719 } 5720 5721 static CPAccessResult access_lor_other(CPUARMState *env, 5722 const ARMCPRegInfo *ri, bool isread) 5723 { 5724 if (arm_is_secure_below_el3(env)) { 5725 /* Access denied in secure mode. */ 5726 return CP_ACCESS_TRAP; 5727 } 5728 return access_lor_ns(env); 5729 } 5730 5731 #ifdef TARGET_AARCH64 5732 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, 5733 bool isread) 5734 { 5735 int el = arm_current_el(env); 5736 5737 if (el < 2 && 5738 arm_feature(env, ARM_FEATURE_EL2) && 5739 !(arm_hcr_el2_eff(env) & HCR_APK)) { 5740 return CP_ACCESS_TRAP_EL2; 5741 } 5742 if (el < 3 && 5743 arm_feature(env, ARM_FEATURE_EL3) && 5744 !(env->cp15.scr_el3 & SCR_APK)) { 5745 return CP_ACCESS_TRAP_EL3; 5746 } 5747 return CP_ACCESS_OK; 5748 } 5749 5750 static const ARMCPRegInfo pauth_reginfo[] = { 5751 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 5752 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, 5753 .access = PL1_RW, .accessfn = access_pauth, 5754 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) }, 5755 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 5756 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, 5757 .access = PL1_RW, .accessfn = access_pauth, 5758 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) }, 5759 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 5760 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, 5761 .access = PL1_RW, .accessfn = access_pauth, 5762 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) }, 5763 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 5764 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, 5765 .access = PL1_RW, .accessfn = access_pauth, 5766 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) }, 5767 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 5768 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, 5769 .access = PL1_RW, .accessfn = access_pauth, 5770 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) }, 5771 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 5772 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, 5773 .access = PL1_RW, .accessfn = access_pauth, 5774 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) }, 5775 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 5776 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, 5777 .access = PL1_RW, .accessfn = access_pauth, 5778 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) }, 5779 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 5780 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, 5781 .access = PL1_RW, .accessfn = access_pauth, 5782 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) }, 5783 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 5784 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, 5785 .access = PL1_RW, .accessfn = access_pauth, 5786 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) }, 5787 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 5788 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, 5789 .access = PL1_RW, .accessfn = access_pauth, 5790 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, 5791 REGINFO_SENTINEL 5792 }; 5793 5794 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 5795 { 5796 Error *err = NULL; 5797 uint64_t ret; 5798 5799 /* Success sets NZCV = 0000. */ 5800 env->NF = env->CF = env->VF = 0, env->ZF = 1; 5801 5802 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) { 5803 /* 5804 * ??? Failed, for unknown reasons in the crypto subsystem. 5805 * The best we can do is log the reason and return the 5806 * timed-out indication to the guest. There is no reason 5807 * we know to expect this failure to be transitory, so the 5808 * guest may well hang retrying the operation. 5809 */ 5810 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", 5811 ri->name, error_get_pretty(err)); 5812 error_free(err); 5813 5814 env->ZF = 0; /* NZCF = 0100 */ 5815 return 0; 5816 } 5817 return ret; 5818 } 5819 5820 /* We do not support re-seeding, so the two registers operate the same. */ 5821 static const ARMCPRegInfo rndr_reginfo[] = { 5822 { .name = "RNDR", .state = ARM_CP_STATE_AA64, 5823 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 5824 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0, 5825 .access = PL0_R, .readfn = rndr_readfn }, 5826 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64, 5827 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 5828 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1, 5829 .access = PL0_R, .readfn = rndr_readfn }, 5830 REGINFO_SENTINEL 5831 }; 5832 #endif 5833 5834 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, 5835 bool isread) 5836 { 5837 int el = arm_current_el(env); 5838 5839 if (el == 0) { 5840 uint64_t sctlr = arm_sctlr(env, el); 5841 if (!(sctlr & SCTLR_EnRCTX)) { 5842 return CP_ACCESS_TRAP; 5843 } 5844 } else if (el == 1) { 5845 uint64_t hcr = arm_hcr_el2_eff(env); 5846 if (hcr & HCR_NV) { 5847 return CP_ACCESS_TRAP_EL2; 5848 } 5849 } 5850 return CP_ACCESS_OK; 5851 } 5852 5853 static const ARMCPRegInfo predinv_reginfo[] = { 5854 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64, 5855 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4, 5856 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 5857 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64, 5858 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5, 5859 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 5860 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64, 5861 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7, 5862 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 5863 /* 5864 * Note the AArch32 opcodes have a different OPC1. 5865 */ 5866 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32, 5867 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4, 5868 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 5869 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32, 5870 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5, 5871 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 5872 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32, 5873 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7, 5874 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 5875 REGINFO_SENTINEL 5876 }; 5877 5878 void register_cp_regs_for_features(ARMCPU *cpu) 5879 { 5880 /* Register all the coprocessor registers based on feature bits */ 5881 CPUARMState *env = &cpu->env; 5882 if (arm_feature(env, ARM_FEATURE_M)) { 5883 /* M profile has no coprocessor registers */ 5884 return; 5885 } 5886 5887 define_arm_cp_regs(cpu, cp_reginfo); 5888 if (!arm_feature(env, ARM_FEATURE_V8)) { 5889 /* Must go early as it is full of wildcards that may be 5890 * overridden by later definitions. 5891 */ 5892 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 5893 } 5894 5895 if (arm_feature(env, ARM_FEATURE_V6)) { 5896 /* The ID registers all have impdef reset values */ 5897 ARMCPRegInfo v6_idregs[] = { 5898 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 5899 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 5900 .access = PL1_R, .type = ARM_CP_CONST, 5901 .resetvalue = cpu->id_pfr0 }, 5902 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 5903 * the value of the GIC field until after we define these regs. 5904 */ 5905 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 5906 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 5907 .access = PL1_R, .type = ARM_CP_NO_RAW, 5908 .readfn = id_pfr1_read, 5909 .writefn = arm_cp_write_ignore }, 5910 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 5911 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 5912 .access = PL1_R, .type = ARM_CP_CONST, 5913 .resetvalue = cpu->id_dfr0 }, 5914 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 5915 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 5916 .access = PL1_R, .type = ARM_CP_CONST, 5917 .resetvalue = cpu->id_afr0 }, 5918 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 5919 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 5920 .access = PL1_R, .type = ARM_CP_CONST, 5921 .resetvalue = cpu->id_mmfr0 }, 5922 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 5923 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 5924 .access = PL1_R, .type = ARM_CP_CONST, 5925 .resetvalue = cpu->id_mmfr1 }, 5926 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 5927 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 5928 .access = PL1_R, .type = ARM_CP_CONST, 5929 .resetvalue = cpu->id_mmfr2 }, 5930 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 5931 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 5932 .access = PL1_R, .type = ARM_CP_CONST, 5933 .resetvalue = cpu->id_mmfr3 }, 5934 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 5935 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 5936 .access = PL1_R, .type = ARM_CP_CONST, 5937 .resetvalue = cpu->isar.id_isar0 }, 5938 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 5939 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 5940 .access = PL1_R, .type = ARM_CP_CONST, 5941 .resetvalue = cpu->isar.id_isar1 }, 5942 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 5943 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 5944 .access = PL1_R, .type = ARM_CP_CONST, 5945 .resetvalue = cpu->isar.id_isar2 }, 5946 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 5947 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 5948 .access = PL1_R, .type = ARM_CP_CONST, 5949 .resetvalue = cpu->isar.id_isar3 }, 5950 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 5951 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 5952 .access = PL1_R, .type = ARM_CP_CONST, 5953 .resetvalue = cpu->isar.id_isar4 }, 5954 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 5955 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 5956 .access = PL1_R, .type = ARM_CP_CONST, 5957 .resetvalue = cpu->isar.id_isar5 }, 5958 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 5959 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 5960 .access = PL1_R, .type = ARM_CP_CONST, 5961 .resetvalue = cpu->id_mmfr4 }, 5962 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, 5963 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 5964 .access = PL1_R, .type = ARM_CP_CONST, 5965 .resetvalue = cpu->isar.id_isar6 }, 5966 REGINFO_SENTINEL 5967 }; 5968 define_arm_cp_regs(cpu, v6_idregs); 5969 define_arm_cp_regs(cpu, v6_cp_reginfo); 5970 } else { 5971 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 5972 } 5973 if (arm_feature(env, ARM_FEATURE_V6K)) { 5974 define_arm_cp_regs(cpu, v6k_cp_reginfo); 5975 } 5976 if (arm_feature(env, ARM_FEATURE_V7MP) && 5977 !arm_feature(env, ARM_FEATURE_PMSA)) { 5978 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 5979 } 5980 if (arm_feature(env, ARM_FEATURE_V7VE)) { 5981 define_arm_cp_regs(cpu, pmovsset_cp_reginfo); 5982 } 5983 if (arm_feature(env, ARM_FEATURE_V7)) { 5984 /* v7 performance monitor control register: same implementor 5985 * field as main ID register, and we implement four counters in 5986 * addition to the cycle count register. 5987 */ 5988 unsigned int i, pmcrn = 4; 5989 ARMCPRegInfo pmcr = { 5990 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 5991 .access = PL0_RW, 5992 .type = ARM_CP_IO | ARM_CP_ALIAS, 5993 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 5994 .accessfn = pmreg_access, .writefn = pmcr_write, 5995 .raw_writefn = raw_write, 5996 }; 5997 ARMCPRegInfo pmcr64 = { 5998 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 5999 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 6000 .access = PL0_RW, .accessfn = pmreg_access, 6001 .type = ARM_CP_IO, 6002 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 6003 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT), 6004 .writefn = pmcr_write, .raw_writefn = raw_write, 6005 }; 6006 define_one_arm_cp_reg(cpu, &pmcr); 6007 define_one_arm_cp_reg(cpu, &pmcr64); 6008 for (i = 0; i < pmcrn; i++) { 6009 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); 6010 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); 6011 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); 6012 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); 6013 ARMCPRegInfo pmev_regs[] = { 6014 { .name = pmevcntr_name, .cp = 15, .crn = 14, 6015 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6016 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6017 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6018 .accessfn = pmreg_access }, 6019 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, 6020 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), 6021 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6022 .type = ARM_CP_IO, 6023 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6024 .raw_readfn = pmevcntr_rawread, 6025 .raw_writefn = pmevcntr_rawwrite }, 6026 { .name = pmevtyper_name, .cp = 15, .crn = 14, 6027 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6028 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6029 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6030 .accessfn = pmreg_access }, 6031 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, 6032 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), 6033 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6034 .type = ARM_CP_IO, 6035 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6036 .raw_writefn = pmevtyper_rawwrite }, 6037 REGINFO_SENTINEL 6038 }; 6039 define_arm_cp_regs(cpu, pmev_regs); 6040 g_free(pmevcntr_name); 6041 g_free(pmevcntr_el0_name); 6042 g_free(pmevtyper_name); 6043 g_free(pmevtyper_el0_name); 6044 } 6045 ARMCPRegInfo clidr = { 6046 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 6047 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 6048 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr 6049 }; 6050 define_one_arm_cp_reg(cpu, &clidr); 6051 define_arm_cp_regs(cpu, v7_cp_reginfo); 6052 define_debug_regs(cpu); 6053 } else { 6054 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 6055 } 6056 if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 && 6057 FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) { 6058 ARMCPRegInfo v81_pmu_regs[] = { 6059 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, 6060 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, 6061 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6062 .resetvalue = extract64(cpu->pmceid0, 32, 32) }, 6063 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, 6064 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, 6065 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6066 .resetvalue = extract64(cpu->pmceid1, 32, 32) }, 6067 REGINFO_SENTINEL 6068 }; 6069 define_arm_cp_regs(cpu, v81_pmu_regs); 6070 } 6071 if (arm_feature(env, ARM_FEATURE_V8)) { 6072 /* AArch64 ID registers, which all have impdef reset values. 6073 * Note that within the ID register ranges the unused slots 6074 * must all RAZ, not UNDEF; future architecture versions may 6075 * define new registers here. 6076 */ 6077 ARMCPRegInfo v8_idregs[] = { 6078 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't 6079 * know the right value for the GIC field until after we 6080 * define these regs. 6081 */ 6082 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 6083 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 6084 .access = PL1_R, .type = ARM_CP_NO_RAW, 6085 .readfn = id_aa64pfr0_read, 6086 .writefn = arm_cp_write_ignore }, 6087 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 6088 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 6089 .access = PL1_R, .type = ARM_CP_CONST, 6090 .resetvalue = cpu->isar.id_aa64pfr1}, 6091 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6092 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 6093 .access = PL1_R, .type = ARM_CP_CONST, 6094 .resetvalue = 0 }, 6095 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6096 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 6097 .access = PL1_R, .type = ARM_CP_CONST, 6098 .resetvalue = 0 }, 6099 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, 6100 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 6101 .access = PL1_R, .type = ARM_CP_CONST, 6102 /* At present, only SVEver == 0 is defined anyway. */ 6103 .resetvalue = 0 }, 6104 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6105 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 6106 .access = PL1_R, .type = ARM_CP_CONST, 6107 .resetvalue = 0 }, 6108 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6109 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 6110 .access = PL1_R, .type = ARM_CP_CONST, 6111 .resetvalue = 0 }, 6112 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6113 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 6114 .access = PL1_R, .type = ARM_CP_CONST, 6115 .resetvalue = 0 }, 6116 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 6117 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 6118 .access = PL1_R, .type = ARM_CP_CONST, 6119 .resetvalue = cpu->id_aa64dfr0 }, 6120 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 6121 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 6122 .access = PL1_R, .type = ARM_CP_CONST, 6123 .resetvalue = cpu->id_aa64dfr1 }, 6124 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6125 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 6126 .access = PL1_R, .type = ARM_CP_CONST, 6127 .resetvalue = 0 }, 6128 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6129 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 6130 .access = PL1_R, .type = ARM_CP_CONST, 6131 .resetvalue = 0 }, 6132 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 6133 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 6134 .access = PL1_R, .type = ARM_CP_CONST, 6135 .resetvalue = cpu->id_aa64afr0 }, 6136 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 6137 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 6138 .access = PL1_R, .type = ARM_CP_CONST, 6139 .resetvalue = cpu->id_aa64afr1 }, 6140 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6141 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 6142 .access = PL1_R, .type = ARM_CP_CONST, 6143 .resetvalue = 0 }, 6144 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6145 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 6146 .access = PL1_R, .type = ARM_CP_CONST, 6147 .resetvalue = 0 }, 6148 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 6149 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 6150 .access = PL1_R, .type = ARM_CP_CONST, 6151 .resetvalue = cpu->isar.id_aa64isar0 }, 6152 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 6153 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 6154 .access = PL1_R, .type = ARM_CP_CONST, 6155 .resetvalue = cpu->isar.id_aa64isar1 }, 6156 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6157 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 6158 .access = PL1_R, .type = ARM_CP_CONST, 6159 .resetvalue = 0 }, 6160 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6161 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 6162 .access = PL1_R, .type = ARM_CP_CONST, 6163 .resetvalue = 0 }, 6164 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6165 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 6166 .access = PL1_R, .type = ARM_CP_CONST, 6167 .resetvalue = 0 }, 6168 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6169 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 6170 .access = PL1_R, .type = ARM_CP_CONST, 6171 .resetvalue = 0 }, 6172 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6173 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 6174 .access = PL1_R, .type = ARM_CP_CONST, 6175 .resetvalue = 0 }, 6176 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6177 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 6178 .access = PL1_R, .type = ARM_CP_CONST, 6179 .resetvalue = 0 }, 6180 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 6181 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 6182 .access = PL1_R, .type = ARM_CP_CONST, 6183 .resetvalue = cpu->isar.id_aa64mmfr0 }, 6184 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 6185 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 6186 .access = PL1_R, .type = ARM_CP_CONST, 6187 .resetvalue = cpu->isar.id_aa64mmfr1 }, 6188 { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6189 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 6190 .access = PL1_R, .type = ARM_CP_CONST, 6191 .resetvalue = 0 }, 6192 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6193 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 6194 .access = PL1_R, .type = ARM_CP_CONST, 6195 .resetvalue = 0 }, 6196 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6197 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 6198 .access = PL1_R, .type = ARM_CP_CONST, 6199 .resetvalue = 0 }, 6200 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6201 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 6202 .access = PL1_R, .type = ARM_CP_CONST, 6203 .resetvalue = 0 }, 6204 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6205 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 6206 .access = PL1_R, .type = ARM_CP_CONST, 6207 .resetvalue = 0 }, 6208 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6209 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 6210 .access = PL1_R, .type = ARM_CP_CONST, 6211 .resetvalue = 0 }, 6212 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 6213 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 6214 .access = PL1_R, .type = ARM_CP_CONST, 6215 .resetvalue = cpu->isar.mvfr0 }, 6216 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 6217 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 6218 .access = PL1_R, .type = ARM_CP_CONST, 6219 .resetvalue = cpu->isar.mvfr1 }, 6220 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 6221 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 6222 .access = PL1_R, .type = ARM_CP_CONST, 6223 .resetvalue = cpu->isar.mvfr2 }, 6224 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6225 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 6226 .access = PL1_R, .type = ARM_CP_CONST, 6227 .resetvalue = 0 }, 6228 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6229 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 6230 .access = PL1_R, .type = ARM_CP_CONST, 6231 .resetvalue = 0 }, 6232 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6233 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 6234 .access = PL1_R, .type = ARM_CP_CONST, 6235 .resetvalue = 0 }, 6236 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6237 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 6238 .access = PL1_R, .type = ARM_CP_CONST, 6239 .resetvalue = 0 }, 6240 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6241 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 6242 .access = PL1_R, .type = ARM_CP_CONST, 6243 .resetvalue = 0 }, 6244 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 6245 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 6246 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6247 .resetvalue = extract64(cpu->pmceid0, 0, 32) }, 6248 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 6249 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 6250 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6251 .resetvalue = cpu->pmceid0 }, 6252 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 6253 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 6254 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6255 .resetvalue = extract64(cpu->pmceid1, 0, 32) }, 6256 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 6257 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 6258 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6259 .resetvalue = cpu->pmceid1 }, 6260 REGINFO_SENTINEL 6261 }; 6262 #ifdef CONFIG_USER_ONLY 6263 ARMCPRegUserSpaceInfo v8_user_idregs[] = { 6264 { .name = "ID_AA64PFR0_EL1", 6265 .exported_bits = 0x000f000f00ff0000, 6266 .fixed_bits = 0x0000000000000011 }, 6267 { .name = "ID_AA64PFR1_EL1", 6268 .exported_bits = 0x00000000000000f0 }, 6269 { .name = "ID_AA64PFR*_EL1_RESERVED", 6270 .is_glob = true }, 6271 { .name = "ID_AA64ZFR0_EL1" }, 6272 { .name = "ID_AA64MMFR0_EL1", 6273 .fixed_bits = 0x00000000ff000000 }, 6274 { .name = "ID_AA64MMFR1_EL1" }, 6275 { .name = "ID_AA64MMFR*_EL1_RESERVED", 6276 .is_glob = true }, 6277 { .name = "ID_AA64DFR0_EL1", 6278 .fixed_bits = 0x0000000000000006 }, 6279 { .name = "ID_AA64DFR1_EL1" }, 6280 { .name = "ID_AA64DFR*_EL1_RESERVED", 6281 .is_glob = true }, 6282 { .name = "ID_AA64AFR*", 6283 .is_glob = true }, 6284 { .name = "ID_AA64ISAR0_EL1", 6285 .exported_bits = 0x00fffffff0fffff0 }, 6286 { .name = "ID_AA64ISAR1_EL1", 6287 .exported_bits = 0x000000f0ffffffff }, 6288 { .name = "ID_AA64ISAR*_EL1_RESERVED", 6289 .is_glob = true }, 6290 REGUSERINFO_SENTINEL 6291 }; 6292 modify_arm_cp_regs(v8_idregs, v8_user_idregs); 6293 #endif 6294 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 6295 if (!arm_feature(env, ARM_FEATURE_EL3) && 6296 !arm_feature(env, ARM_FEATURE_EL2)) { 6297 ARMCPRegInfo rvbar = { 6298 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 6299 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 6300 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar 6301 }; 6302 define_one_arm_cp_reg(cpu, &rvbar); 6303 } 6304 define_arm_cp_regs(cpu, v8_idregs); 6305 define_arm_cp_regs(cpu, v8_cp_reginfo); 6306 } 6307 if (arm_feature(env, ARM_FEATURE_EL2)) { 6308 uint64_t vmpidr_def = mpidr_read_val(env); 6309 ARMCPRegInfo vpidr_regs[] = { 6310 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 6311 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 6312 .access = PL2_RW, .accessfn = access_el3_aa32ns, 6313 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, 6314 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 6315 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 6316 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 6317 .access = PL2_RW, .resetvalue = cpu->midr, 6318 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 6319 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 6320 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 6321 .access = PL2_RW, .accessfn = access_el3_aa32ns, 6322 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, 6323 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 6324 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 6325 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 6326 .access = PL2_RW, 6327 .resetvalue = vmpidr_def, 6328 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 6329 REGINFO_SENTINEL 6330 }; 6331 define_arm_cp_regs(cpu, vpidr_regs); 6332 define_arm_cp_regs(cpu, el2_cp_reginfo); 6333 if (arm_feature(env, ARM_FEATURE_V8)) { 6334 define_arm_cp_regs(cpu, el2_v8_cp_reginfo); 6335 } 6336 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 6337 if (!arm_feature(env, ARM_FEATURE_EL3)) { 6338 ARMCPRegInfo rvbar = { 6339 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 6340 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 6341 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar 6342 }; 6343 define_one_arm_cp_reg(cpu, &rvbar); 6344 } 6345 } else { 6346 /* If EL2 is missing but higher ELs are enabled, we need to 6347 * register the no_el2 reginfos. 6348 */ 6349 if (arm_feature(env, ARM_FEATURE_EL3)) { 6350 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 6351 * of MIDR_EL1 and MPIDR_EL1. 6352 */ 6353 ARMCPRegInfo vpidr_regs[] = { 6354 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 6355 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 6356 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 6357 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 6358 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 6359 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 6360 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 6361 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 6362 .type = ARM_CP_NO_RAW, 6363 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 6364 REGINFO_SENTINEL 6365 }; 6366 define_arm_cp_regs(cpu, vpidr_regs); 6367 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 6368 if (arm_feature(env, ARM_FEATURE_V8)) { 6369 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); 6370 } 6371 } 6372 } 6373 if (arm_feature(env, ARM_FEATURE_EL3)) { 6374 define_arm_cp_regs(cpu, el3_cp_reginfo); 6375 ARMCPRegInfo el3_regs[] = { 6376 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 6377 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 6378 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, 6379 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 6380 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 6381 .access = PL3_RW, 6382 .raw_writefn = raw_write, .writefn = sctlr_write, 6383 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 6384 .resetvalue = cpu->reset_sctlr }, 6385 REGINFO_SENTINEL 6386 }; 6387 6388 define_arm_cp_regs(cpu, el3_regs); 6389 } 6390 /* The behaviour of NSACR is sufficiently various that we don't 6391 * try to describe it in a single reginfo: 6392 * if EL3 is 64 bit, then trap to EL3 from S EL1, 6393 * reads as constant 0xc00 from NS EL1 and NS EL2 6394 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 6395 * if v7 without EL3, register doesn't exist 6396 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 6397 */ 6398 if (arm_feature(env, ARM_FEATURE_EL3)) { 6399 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 6400 ARMCPRegInfo nsacr = { 6401 .name = "NSACR", .type = ARM_CP_CONST, 6402 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 6403 .access = PL1_RW, .accessfn = nsacr_access, 6404 .resetvalue = 0xc00 6405 }; 6406 define_one_arm_cp_reg(cpu, &nsacr); 6407 } else { 6408 ARMCPRegInfo nsacr = { 6409 .name = "NSACR", 6410 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 6411 .access = PL3_RW | PL1_R, 6412 .resetvalue = 0, 6413 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 6414 }; 6415 define_one_arm_cp_reg(cpu, &nsacr); 6416 } 6417 } else { 6418 if (arm_feature(env, ARM_FEATURE_V8)) { 6419 ARMCPRegInfo nsacr = { 6420 .name = "NSACR", .type = ARM_CP_CONST, 6421 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 6422 .access = PL1_R, 6423 .resetvalue = 0xc00 6424 }; 6425 define_one_arm_cp_reg(cpu, &nsacr); 6426 } 6427 } 6428 6429 if (arm_feature(env, ARM_FEATURE_PMSA)) { 6430 if (arm_feature(env, ARM_FEATURE_V6)) { 6431 /* PMSAv6 not implemented */ 6432 assert(arm_feature(env, ARM_FEATURE_V7)); 6433 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 6434 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 6435 } else { 6436 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 6437 } 6438 } else { 6439 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 6440 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 6441 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */ 6442 if (FIELD_EX32(cpu->id_mmfr4, ID_MMFR4, HPDS) != 0) { 6443 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); 6444 } 6445 } 6446 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 6447 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 6448 } 6449 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 6450 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 6451 } 6452 if (arm_feature(env, ARM_FEATURE_VAPA)) { 6453 define_arm_cp_regs(cpu, vapa_cp_reginfo); 6454 } 6455 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 6456 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 6457 } 6458 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 6459 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 6460 } 6461 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 6462 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 6463 } 6464 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 6465 define_arm_cp_regs(cpu, omap_cp_reginfo); 6466 } 6467 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 6468 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 6469 } 6470 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 6471 define_arm_cp_regs(cpu, xscale_cp_reginfo); 6472 } 6473 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 6474 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 6475 } 6476 if (arm_feature(env, ARM_FEATURE_LPAE)) { 6477 define_arm_cp_regs(cpu, lpae_cp_reginfo); 6478 } 6479 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 6480 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 6481 * be read-only (ie write causes UNDEF exception). 6482 */ 6483 { 6484 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 6485 /* Pre-v8 MIDR space. 6486 * Note that the MIDR isn't a simple constant register because 6487 * of the TI925 behaviour where writes to another register can 6488 * cause the MIDR value to change. 6489 * 6490 * Unimplemented registers in the c15 0 0 0 space default to 6491 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 6492 * and friends override accordingly. 6493 */ 6494 { .name = "MIDR", 6495 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 6496 .access = PL1_R, .resetvalue = cpu->midr, 6497 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 6498 .readfn = midr_read, 6499 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 6500 .type = ARM_CP_OVERRIDE }, 6501 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 6502 { .name = "DUMMY", 6503 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 6504 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 6505 { .name = "DUMMY", 6506 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 6507 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 6508 { .name = "DUMMY", 6509 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 6510 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 6511 { .name = "DUMMY", 6512 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 6513 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 6514 { .name = "DUMMY", 6515 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 6516 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 6517 REGINFO_SENTINEL 6518 }; 6519 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 6520 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 6521 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 6522 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 6523 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 6524 .readfn = midr_read }, 6525 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 6526 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 6527 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 6528 .access = PL1_R, .resetvalue = cpu->midr }, 6529 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 6530 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 6531 .access = PL1_R, .resetvalue = cpu->midr }, 6532 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 6533 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 6534 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 6535 REGINFO_SENTINEL 6536 }; 6537 ARMCPRegInfo id_cp_reginfo[] = { 6538 /* These are common to v8 and pre-v8 */ 6539 { .name = "CTR", 6540 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 6541 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 6542 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 6543 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 6544 .access = PL0_R, .accessfn = ctr_el0_access, 6545 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 6546 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 6547 { .name = "TCMTR", 6548 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 6549 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 6550 REGINFO_SENTINEL 6551 }; 6552 /* TLBTR is specific to VMSA */ 6553 ARMCPRegInfo id_tlbtr_reginfo = { 6554 .name = "TLBTR", 6555 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 6556 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0, 6557 }; 6558 /* MPUIR is specific to PMSA V6+ */ 6559 ARMCPRegInfo id_mpuir_reginfo = { 6560 .name = "MPUIR", 6561 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 6562 .access = PL1_R, .type = ARM_CP_CONST, 6563 .resetvalue = cpu->pmsav7_dregion << 8 6564 }; 6565 ARMCPRegInfo crn0_wi_reginfo = { 6566 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 6567 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 6568 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 6569 }; 6570 #ifdef CONFIG_USER_ONLY 6571 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { 6572 { .name = "MIDR_EL1", 6573 .exported_bits = 0x00000000ffffffff }, 6574 { .name = "REVIDR_EL1" }, 6575 REGUSERINFO_SENTINEL 6576 }; 6577 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo); 6578 #endif 6579 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 6580 arm_feature(env, ARM_FEATURE_STRONGARM)) { 6581 ARMCPRegInfo *r; 6582 /* Register the blanket "writes ignored" value first to cover the 6583 * whole space. Then update the specific ID registers to allow write 6584 * access, so that they ignore writes rather than causing them to 6585 * UNDEF. 6586 */ 6587 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 6588 for (r = id_pre_v8_midr_cp_reginfo; 6589 r->type != ARM_CP_SENTINEL; r++) { 6590 r->access = PL1_RW; 6591 } 6592 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { 6593 r->access = PL1_RW; 6594 } 6595 id_mpuir_reginfo.access = PL1_RW; 6596 id_tlbtr_reginfo.access = PL1_RW; 6597 } 6598 if (arm_feature(env, ARM_FEATURE_V8)) { 6599 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 6600 } else { 6601 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 6602 } 6603 define_arm_cp_regs(cpu, id_cp_reginfo); 6604 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 6605 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 6606 } else if (arm_feature(env, ARM_FEATURE_V7)) { 6607 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 6608 } 6609 } 6610 6611 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 6612 ARMCPRegInfo mpidr_cp_reginfo[] = { 6613 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, 6614 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 6615 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 6616 REGINFO_SENTINEL 6617 }; 6618 #ifdef CONFIG_USER_ONLY 6619 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = { 6620 { .name = "MPIDR_EL1", 6621 .fixed_bits = 0x0000000080000000 }, 6622 REGUSERINFO_SENTINEL 6623 }; 6624 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo); 6625 #endif 6626 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 6627 } 6628 6629 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 6630 ARMCPRegInfo auxcr_reginfo[] = { 6631 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 6632 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 6633 .access = PL1_RW, .type = ARM_CP_CONST, 6634 .resetvalue = cpu->reset_auxcr }, 6635 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 6636 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 6637 .access = PL2_RW, .type = ARM_CP_CONST, 6638 .resetvalue = 0 }, 6639 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 6640 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 6641 .access = PL3_RW, .type = ARM_CP_CONST, 6642 .resetvalue = 0 }, 6643 REGINFO_SENTINEL 6644 }; 6645 define_arm_cp_regs(cpu, auxcr_reginfo); 6646 if (arm_feature(env, ARM_FEATURE_V8)) { 6647 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */ 6648 ARMCPRegInfo hactlr2_reginfo = { 6649 .name = "HACTLR2", .state = ARM_CP_STATE_AA32, 6650 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, 6651 .access = PL2_RW, .type = ARM_CP_CONST, 6652 .resetvalue = 0 6653 }; 6654 define_one_arm_cp_reg(cpu, &hactlr2_reginfo); 6655 } 6656 } 6657 6658 if (arm_feature(env, ARM_FEATURE_CBAR)) { 6659 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 6660 /* 32 bit view is [31:18] 0...0 [43:32]. */ 6661 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 6662 | extract64(cpu->reset_cbar, 32, 12); 6663 ARMCPRegInfo cbar_reginfo[] = { 6664 { .name = "CBAR", 6665 .type = ARM_CP_CONST, 6666 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 6667 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 6668 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 6669 .type = ARM_CP_CONST, 6670 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 6671 .access = PL1_R, .resetvalue = cbar32 }, 6672 REGINFO_SENTINEL 6673 }; 6674 /* We don't implement a r/w 64 bit CBAR currently */ 6675 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 6676 define_arm_cp_regs(cpu, cbar_reginfo); 6677 } else { 6678 ARMCPRegInfo cbar = { 6679 .name = "CBAR", 6680 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 6681 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 6682 .fieldoffset = offsetof(CPUARMState, 6683 cp15.c15_config_base_address) 6684 }; 6685 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 6686 cbar.access = PL1_R; 6687 cbar.fieldoffset = 0; 6688 cbar.type = ARM_CP_CONST; 6689 } 6690 define_one_arm_cp_reg(cpu, &cbar); 6691 } 6692 } 6693 6694 if (arm_feature(env, ARM_FEATURE_VBAR)) { 6695 ARMCPRegInfo vbar_cp_reginfo[] = { 6696 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 6697 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 6698 .access = PL1_RW, .writefn = vbar_write, 6699 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 6700 offsetof(CPUARMState, cp15.vbar_ns) }, 6701 .resetvalue = 0 }, 6702 REGINFO_SENTINEL 6703 }; 6704 define_arm_cp_regs(cpu, vbar_cp_reginfo); 6705 } 6706 6707 /* Generic registers whose values depend on the implementation */ 6708 { 6709 ARMCPRegInfo sctlr = { 6710 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 6711 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 6712 .access = PL1_RW, 6713 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 6714 offsetof(CPUARMState, cp15.sctlr_ns) }, 6715 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 6716 .raw_writefn = raw_write, 6717 }; 6718 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 6719 /* Normally we would always end the TB on an SCTLR write, but Linux 6720 * arch/arm/mach-pxa/sleep.S expects two instructions following 6721 * an MMU enable to execute from cache. Imitate this behaviour. 6722 */ 6723 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 6724 } 6725 define_one_arm_cp_reg(cpu, &sctlr); 6726 } 6727 6728 if (cpu_isar_feature(aa64_lor, cpu)) { 6729 /* 6730 * A trivial implementation of ARMv8.1-LOR leaves all of these 6731 * registers fixed at 0, which indicates that there are zero 6732 * supported Limited Ordering regions. 6733 */ 6734 static const ARMCPRegInfo lor_reginfo[] = { 6735 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, 6736 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, 6737 .access = PL1_RW, .accessfn = access_lor_other, 6738 .type = ARM_CP_CONST, .resetvalue = 0 }, 6739 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, 6740 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, 6741 .access = PL1_RW, .accessfn = access_lor_other, 6742 .type = ARM_CP_CONST, .resetvalue = 0 }, 6743 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, 6744 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, 6745 .access = PL1_RW, .accessfn = access_lor_other, 6746 .type = ARM_CP_CONST, .resetvalue = 0 }, 6747 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, 6748 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, 6749 .access = PL1_RW, .accessfn = access_lor_other, 6750 .type = ARM_CP_CONST, .resetvalue = 0 }, 6751 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, 6752 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, 6753 .access = PL1_R, .accessfn = access_lorid, 6754 .type = ARM_CP_CONST, .resetvalue = 0 }, 6755 REGINFO_SENTINEL 6756 }; 6757 define_arm_cp_regs(cpu, lor_reginfo); 6758 } 6759 6760 if (cpu_isar_feature(aa64_sve, cpu)) { 6761 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 6762 if (arm_feature(env, ARM_FEATURE_EL2)) { 6763 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 6764 } else { 6765 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 6766 } 6767 if (arm_feature(env, ARM_FEATURE_EL3)) { 6768 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 6769 } 6770 } 6771 6772 #ifdef TARGET_AARCH64 6773 if (cpu_isar_feature(aa64_pauth, cpu)) { 6774 define_arm_cp_regs(cpu, pauth_reginfo); 6775 } 6776 if (cpu_isar_feature(aa64_rndr, cpu)) { 6777 define_arm_cp_regs(cpu, rndr_reginfo); 6778 } 6779 #endif 6780 6781 /* 6782 * While all v8.0 cpus support aarch64, QEMU does have configurations 6783 * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max, 6784 * which will set ID_ISAR6. 6785 */ 6786 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) 6787 ? cpu_isar_feature(aa64_predinv, cpu) 6788 : cpu_isar_feature(aa32_predinv, cpu)) { 6789 define_arm_cp_regs(cpu, predinv_reginfo); 6790 } 6791 } 6792 6793 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 6794 { 6795 CPUState *cs = CPU(cpu); 6796 CPUARMState *env = &cpu->env; 6797 6798 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 6799 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, 6800 aarch64_fpu_gdb_set_reg, 6801 34, "aarch64-fpu.xml", 0); 6802 } else if (arm_feature(env, ARM_FEATURE_NEON)) { 6803 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 6804 51, "arm-neon.xml", 0); 6805 } else if (arm_feature(env, ARM_FEATURE_VFP3)) { 6806 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 6807 35, "arm-vfp3.xml", 0); 6808 } else if (arm_feature(env, ARM_FEATURE_VFP)) { 6809 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 6810 19, "arm-vfp.xml", 0); 6811 } 6812 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, 6813 arm_gen_dynamic_xml(cs), 6814 "system-registers.xml", 0); 6815 } 6816 6817 /* Sort alphabetically by type name, except for "any". */ 6818 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 6819 { 6820 ObjectClass *class_a = (ObjectClass *)a; 6821 ObjectClass *class_b = (ObjectClass *)b; 6822 const char *name_a, *name_b; 6823 6824 name_a = object_class_get_name(class_a); 6825 name_b = object_class_get_name(class_b); 6826 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 6827 return 1; 6828 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 6829 return -1; 6830 } else { 6831 return strcmp(name_a, name_b); 6832 } 6833 } 6834 6835 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 6836 { 6837 ObjectClass *oc = data; 6838 const char *typename; 6839 char *name; 6840 6841 typename = object_class_get_name(oc); 6842 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 6843 qemu_printf(" %s\n", name); 6844 g_free(name); 6845 } 6846 6847 void arm_cpu_list(void) 6848 { 6849 GSList *list; 6850 6851 list = object_class_get_list(TYPE_ARM_CPU, false); 6852 list = g_slist_sort(list, arm_cpu_list_compare); 6853 qemu_printf("Available CPUs:\n"); 6854 g_slist_foreach(list, arm_cpu_list_entry, NULL); 6855 g_slist_free(list); 6856 } 6857 6858 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 6859 { 6860 ObjectClass *oc = data; 6861 CpuDefinitionInfoList **cpu_list = user_data; 6862 CpuDefinitionInfoList *entry; 6863 CpuDefinitionInfo *info; 6864 const char *typename; 6865 6866 typename = object_class_get_name(oc); 6867 info = g_malloc0(sizeof(*info)); 6868 info->name = g_strndup(typename, 6869 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 6870 info->q_typename = g_strdup(typename); 6871 6872 entry = g_malloc0(sizeof(*entry)); 6873 entry->value = info; 6874 entry->next = *cpu_list; 6875 *cpu_list = entry; 6876 } 6877 6878 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 6879 { 6880 CpuDefinitionInfoList *cpu_list = NULL; 6881 GSList *list; 6882 6883 list = object_class_get_list(TYPE_ARM_CPU, false); 6884 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 6885 g_slist_free(list); 6886 6887 return cpu_list; 6888 } 6889 6890 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 6891 void *opaque, int state, int secstate, 6892 int crm, int opc1, int opc2, 6893 const char *name) 6894 { 6895 /* Private utility function for define_one_arm_cp_reg_with_opaque(): 6896 * add a single reginfo struct to the hash table. 6897 */ 6898 uint32_t *key = g_new(uint32_t, 1); 6899 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); 6900 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; 6901 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; 6902 6903 r2->name = g_strdup(name); 6904 /* Reset the secure state to the specific incoming state. This is 6905 * necessary as the register may have been defined with both states. 6906 */ 6907 r2->secure = secstate; 6908 6909 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 6910 /* Register is banked (using both entries in array). 6911 * Overwriting fieldoffset as the array is only used to define 6912 * banked registers but later only fieldoffset is used. 6913 */ 6914 r2->fieldoffset = r->bank_fieldoffsets[ns]; 6915 } 6916 6917 if (state == ARM_CP_STATE_AA32) { 6918 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 6919 /* If the register is banked then we don't need to migrate or 6920 * reset the 32-bit instance in certain cases: 6921 * 6922 * 1) If the register has both 32-bit and 64-bit instances then we 6923 * can count on the 64-bit instance taking care of the 6924 * non-secure bank. 6925 * 2) If ARMv8 is enabled then we can count on a 64-bit version 6926 * taking care of the secure bank. This requires that separate 6927 * 32 and 64-bit definitions are provided. 6928 */ 6929 if ((r->state == ARM_CP_STATE_BOTH && ns) || 6930 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 6931 r2->type |= ARM_CP_ALIAS; 6932 } 6933 } else if ((secstate != r->secure) && !ns) { 6934 /* The register is not banked so we only want to allow migration of 6935 * the non-secure instance. 6936 */ 6937 r2->type |= ARM_CP_ALIAS; 6938 } 6939 6940 if (r->state == ARM_CP_STATE_BOTH) { 6941 /* We assume it is a cp15 register if the .cp field is left unset. 6942 */ 6943 if (r2->cp == 0) { 6944 r2->cp = 15; 6945 } 6946 6947 #ifdef HOST_WORDS_BIGENDIAN 6948 if (r2->fieldoffset) { 6949 r2->fieldoffset += sizeof(uint32_t); 6950 } 6951 #endif 6952 } 6953 } 6954 if (state == ARM_CP_STATE_AA64) { 6955 /* To allow abbreviation of ARMCPRegInfo 6956 * definitions, we treat cp == 0 as equivalent to 6957 * the value for "standard guest-visible sysreg". 6958 * STATE_BOTH definitions are also always "standard 6959 * sysreg" in their AArch64 view (the .cp value may 6960 * be non-zero for the benefit of the AArch32 view). 6961 */ 6962 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { 6963 r2->cp = CP_REG_ARM64_SYSREG_CP; 6964 } 6965 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, 6966 r2->opc0, opc1, opc2); 6967 } else { 6968 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); 6969 } 6970 if (opaque) { 6971 r2->opaque = opaque; 6972 } 6973 /* reginfo passed to helpers is correct for the actual access, 6974 * and is never ARM_CP_STATE_BOTH: 6975 */ 6976 r2->state = state; 6977 /* Make sure reginfo passed to helpers for wildcarded regs 6978 * has the correct crm/opc1/opc2 for this reg, not CP_ANY: 6979 */ 6980 r2->crm = crm; 6981 r2->opc1 = opc1; 6982 r2->opc2 = opc2; 6983 /* By convention, for wildcarded registers only the first 6984 * entry is used for migration; the others are marked as 6985 * ALIAS so we don't try to transfer the register 6986 * multiple times. Special registers (ie NOP/WFI) are 6987 * never migratable and not even raw-accessible. 6988 */ 6989 if ((r->type & ARM_CP_SPECIAL)) { 6990 r2->type |= ARM_CP_NO_RAW; 6991 } 6992 if (((r->crm == CP_ANY) && crm != 0) || 6993 ((r->opc1 == CP_ANY) && opc1 != 0) || 6994 ((r->opc2 == CP_ANY) && opc2 != 0)) { 6995 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 6996 } 6997 6998 /* Check that raw accesses are either forbidden or handled. Note that 6999 * we can't assert this earlier because the setup of fieldoffset for 7000 * banked registers has to be done first. 7001 */ 7002 if (!(r2->type & ARM_CP_NO_RAW)) { 7003 assert(!raw_accessors_invalid(r2)); 7004 } 7005 7006 /* Overriding of an existing definition must be explicitly 7007 * requested. 7008 */ 7009 if (!(r->type & ARM_CP_OVERRIDE)) { 7010 ARMCPRegInfo *oldreg; 7011 oldreg = g_hash_table_lookup(cpu->cp_regs, key); 7012 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { 7013 fprintf(stderr, "Register redefined: cp=%d %d bit " 7014 "crn=%d crm=%d opc1=%d opc2=%d, " 7015 "was %s, now %s\n", r2->cp, 32 + 32 * is64, 7016 r2->crn, r2->crm, r2->opc1, r2->opc2, 7017 oldreg->name, r2->name); 7018 g_assert_not_reached(); 7019 } 7020 } 7021 g_hash_table_insert(cpu->cp_regs, key, r2); 7022 } 7023 7024 7025 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 7026 const ARMCPRegInfo *r, void *opaque) 7027 { 7028 /* Define implementations of coprocessor registers. 7029 * We store these in a hashtable because typically 7030 * there are less than 150 registers in a space which 7031 * is 16*16*16*8*8 = 262144 in size. 7032 * Wildcarding is supported for the crm, opc1 and opc2 fields. 7033 * If a register is defined twice then the second definition is 7034 * used, so this can be used to define some generic registers and 7035 * then override them with implementation specific variations. 7036 * At least one of the original and the second definition should 7037 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 7038 * against accidental use. 7039 * 7040 * The state field defines whether the register is to be 7041 * visible in the AArch32 or AArch64 execution state. If the 7042 * state is set to ARM_CP_STATE_BOTH then we synthesise a 7043 * reginfo structure for the AArch32 view, which sees the lower 7044 * 32 bits of the 64 bit register. 7045 * 7046 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 7047 * be wildcarded. AArch64 registers are always considered to be 64 7048 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 7049 * the register, if any. 7050 */ 7051 int crm, opc1, opc2, state; 7052 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 7053 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 7054 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 7055 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 7056 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 7057 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 7058 /* 64 bit registers have only CRm and Opc1 fields */ 7059 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 7060 /* op0 only exists in the AArch64 encodings */ 7061 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 7062 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 7063 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 7064 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 7065 * encodes a minimum access level for the register. We roll this 7066 * runtime check into our general permission check code, so check 7067 * here that the reginfo's specified permissions are strict enough 7068 * to encompass the generic architectural permission check. 7069 */ 7070 if (r->state != ARM_CP_STATE_AA32) { 7071 int mask = 0; 7072 switch (r->opc1) { 7073 case 0: 7074 /* min_EL EL1, but some accessible to EL0 via kernel ABI */ 7075 mask = PL0U_R | PL1_RW; 7076 break; 7077 case 1: case 2: 7078 /* min_EL EL1 */ 7079 mask = PL1_RW; 7080 break; 7081 case 3: 7082 /* min_EL EL0 */ 7083 mask = PL0_RW; 7084 break; 7085 case 4: 7086 /* min_EL EL2 */ 7087 mask = PL2_RW; 7088 break; 7089 case 5: 7090 /* unallocated encoding, so not possible */ 7091 assert(false); 7092 break; 7093 case 6: 7094 /* min_EL EL3 */ 7095 mask = PL3_RW; 7096 break; 7097 case 7: 7098 /* min_EL EL1, secure mode only (we don't check the latter) */ 7099 mask = PL1_RW; 7100 break; 7101 default: 7102 /* broken reginfo with out-of-range opc1 */ 7103 assert(false); 7104 break; 7105 } 7106 /* assert our permissions are not too lax (stricter is fine) */ 7107 assert((r->access & ~mask) == 0); 7108 } 7109 7110 /* Check that the register definition has enough info to handle 7111 * reads and writes if they are permitted. 7112 */ 7113 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { 7114 if (r->access & PL3_R) { 7115 assert((r->fieldoffset || 7116 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 7117 r->readfn); 7118 } 7119 if (r->access & PL3_W) { 7120 assert((r->fieldoffset || 7121 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 7122 r->writefn); 7123 } 7124 } 7125 /* Bad type field probably means missing sentinel at end of reg list */ 7126 assert(cptype_valid(r->type)); 7127 for (crm = crmmin; crm <= crmmax; crm++) { 7128 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 7129 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 7130 for (state = ARM_CP_STATE_AA32; 7131 state <= ARM_CP_STATE_AA64; state++) { 7132 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 7133 continue; 7134 } 7135 if (state == ARM_CP_STATE_AA32) { 7136 /* Under AArch32 CP registers can be common 7137 * (same for secure and non-secure world) or banked. 7138 */ 7139 char *name; 7140 7141 switch (r->secure) { 7142 case ARM_CP_SECSTATE_S: 7143 case ARM_CP_SECSTATE_NS: 7144 add_cpreg_to_hashtable(cpu, r, opaque, state, 7145 r->secure, crm, opc1, opc2, 7146 r->name); 7147 break; 7148 default: 7149 name = g_strdup_printf("%s_S", r->name); 7150 add_cpreg_to_hashtable(cpu, r, opaque, state, 7151 ARM_CP_SECSTATE_S, 7152 crm, opc1, opc2, name); 7153 g_free(name); 7154 add_cpreg_to_hashtable(cpu, r, opaque, state, 7155 ARM_CP_SECSTATE_NS, 7156 crm, opc1, opc2, r->name); 7157 break; 7158 } 7159 } else { 7160 /* AArch64 registers get mapped to non-secure instance 7161 * of AArch32 */ 7162 add_cpreg_to_hashtable(cpu, r, opaque, state, 7163 ARM_CP_SECSTATE_NS, 7164 crm, opc1, opc2, r->name); 7165 } 7166 } 7167 } 7168 } 7169 } 7170 } 7171 7172 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 7173 const ARMCPRegInfo *regs, void *opaque) 7174 { 7175 /* Define a whole list of registers */ 7176 const ARMCPRegInfo *r; 7177 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 7178 define_one_arm_cp_reg_with_opaque(cpu, r, opaque); 7179 } 7180 } 7181 7182 /* 7183 * Modify ARMCPRegInfo for access from userspace. 7184 * 7185 * This is a data driven modification directed by 7186 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as 7187 * user-space cannot alter any values and dynamic values pertaining to 7188 * execution state are hidden from user space view anyway. 7189 */ 7190 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods) 7191 { 7192 const ARMCPRegUserSpaceInfo *m; 7193 ARMCPRegInfo *r; 7194 7195 for (m = mods; m->name; m++) { 7196 GPatternSpec *pat = NULL; 7197 if (m->is_glob) { 7198 pat = g_pattern_spec_new(m->name); 7199 } 7200 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 7201 if (pat && g_pattern_match_string(pat, r->name)) { 7202 r->type = ARM_CP_CONST; 7203 r->access = PL0U_R; 7204 r->resetvalue = 0; 7205 /* continue */ 7206 } else if (strcmp(r->name, m->name) == 0) { 7207 r->type = ARM_CP_CONST; 7208 r->access = PL0U_R; 7209 r->resetvalue &= m->exported_bits; 7210 r->resetvalue |= m->fixed_bits; 7211 break; 7212 } 7213 } 7214 if (pat) { 7215 g_pattern_spec_free(pat); 7216 } 7217 } 7218 } 7219 7220 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 7221 { 7222 return g_hash_table_lookup(cpregs, &encoded_cp); 7223 } 7224 7225 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 7226 uint64_t value) 7227 { 7228 /* Helper coprocessor write function for write-ignore registers */ 7229 } 7230 7231 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 7232 { 7233 /* Helper coprocessor write function for read-as-zero registers */ 7234 return 0; 7235 } 7236 7237 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 7238 { 7239 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 7240 } 7241 7242 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 7243 { 7244 /* Return true if it is not valid for us to switch to 7245 * this CPU mode (ie all the UNPREDICTABLE cases in 7246 * the ARM ARM CPSRWriteByInstr pseudocode). 7247 */ 7248 7249 /* Changes to or from Hyp via MSR and CPS are illegal. */ 7250 if (write_type == CPSRWriteByInstr && 7251 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 7252 mode == ARM_CPU_MODE_HYP)) { 7253 return 1; 7254 } 7255 7256 switch (mode) { 7257 case ARM_CPU_MODE_USR: 7258 return 0; 7259 case ARM_CPU_MODE_SYS: 7260 case ARM_CPU_MODE_SVC: 7261 case ARM_CPU_MODE_ABT: 7262 case ARM_CPU_MODE_UND: 7263 case ARM_CPU_MODE_IRQ: 7264 case ARM_CPU_MODE_FIQ: 7265 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 7266 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 7267 */ 7268 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 7269 * and CPS are treated as illegal mode changes. 7270 */ 7271 if (write_type == CPSRWriteByInstr && 7272 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 7273 (arm_hcr_el2_eff(env) & HCR_TGE)) { 7274 return 1; 7275 } 7276 return 0; 7277 case ARM_CPU_MODE_HYP: 7278 return !arm_feature(env, ARM_FEATURE_EL2) 7279 || arm_current_el(env) < 2 || arm_is_secure_below_el3(env); 7280 case ARM_CPU_MODE_MON: 7281 return arm_current_el(env) < 3; 7282 default: 7283 return 1; 7284 } 7285 } 7286 7287 uint32_t cpsr_read(CPUARMState *env) 7288 { 7289 int ZF; 7290 ZF = (env->ZF == 0); 7291 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 7292 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 7293 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 7294 | ((env->condexec_bits & 0xfc) << 8) 7295 | (env->GE << 16) | (env->daif & CPSR_AIF); 7296 } 7297 7298 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 7299 CPSRWriteType write_type) 7300 { 7301 uint32_t changed_daif; 7302 7303 if (mask & CPSR_NZCV) { 7304 env->ZF = (~val) & CPSR_Z; 7305 env->NF = val; 7306 env->CF = (val >> 29) & 1; 7307 env->VF = (val << 3) & 0x80000000; 7308 } 7309 if (mask & CPSR_Q) 7310 env->QF = ((val & CPSR_Q) != 0); 7311 if (mask & CPSR_T) 7312 env->thumb = ((val & CPSR_T) != 0); 7313 if (mask & CPSR_IT_0_1) { 7314 env->condexec_bits &= ~3; 7315 env->condexec_bits |= (val >> 25) & 3; 7316 } 7317 if (mask & CPSR_IT_2_7) { 7318 env->condexec_bits &= 3; 7319 env->condexec_bits |= (val >> 8) & 0xfc; 7320 } 7321 if (mask & CPSR_GE) { 7322 env->GE = (val >> 16) & 0xf; 7323 } 7324 7325 /* In a V7 implementation that includes the security extensions but does 7326 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 7327 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 7328 * bits respectively. 7329 * 7330 * In a V8 implementation, it is permitted for privileged software to 7331 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 7332 */ 7333 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 7334 arm_feature(env, ARM_FEATURE_EL3) && 7335 !arm_feature(env, ARM_FEATURE_EL2) && 7336 !arm_is_secure(env)) { 7337 7338 changed_daif = (env->daif ^ val) & mask; 7339 7340 if (changed_daif & CPSR_A) { 7341 /* Check to see if we are allowed to change the masking of async 7342 * abort exceptions from a non-secure state. 7343 */ 7344 if (!(env->cp15.scr_el3 & SCR_AW)) { 7345 qemu_log_mask(LOG_GUEST_ERROR, 7346 "Ignoring attempt to switch CPSR_A flag from " 7347 "non-secure world with SCR.AW bit clear\n"); 7348 mask &= ~CPSR_A; 7349 } 7350 } 7351 7352 if (changed_daif & CPSR_F) { 7353 /* Check to see if we are allowed to change the masking of FIQ 7354 * exceptions from a non-secure state. 7355 */ 7356 if (!(env->cp15.scr_el3 & SCR_FW)) { 7357 qemu_log_mask(LOG_GUEST_ERROR, 7358 "Ignoring attempt to switch CPSR_F flag from " 7359 "non-secure world with SCR.FW bit clear\n"); 7360 mask &= ~CPSR_F; 7361 } 7362 7363 /* Check whether non-maskable FIQ (NMFI) support is enabled. 7364 * If this bit is set software is not allowed to mask 7365 * FIQs, but is allowed to set CPSR_F to 0. 7366 */ 7367 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 7368 (val & CPSR_F)) { 7369 qemu_log_mask(LOG_GUEST_ERROR, 7370 "Ignoring attempt to enable CPSR_F flag " 7371 "(non-maskable FIQ [NMFI] support enabled)\n"); 7372 mask &= ~CPSR_F; 7373 } 7374 } 7375 } 7376 7377 env->daif &= ~(CPSR_AIF & mask); 7378 env->daif |= val & CPSR_AIF & mask; 7379 7380 if (write_type != CPSRWriteRaw && 7381 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 7382 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 7383 /* Note that we can only get here in USR mode if this is a 7384 * gdb stub write; for this case we follow the architectural 7385 * behaviour for guest writes in USR mode of ignoring an attempt 7386 * to switch mode. (Those are caught by translate.c for writes 7387 * triggered by guest instructions.) 7388 */ 7389 mask &= ~CPSR_M; 7390 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 7391 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 7392 * v7, and has defined behaviour in v8: 7393 * + leave CPSR.M untouched 7394 * + allow changes to the other CPSR fields 7395 * + set PSTATE.IL 7396 * For user changes via the GDB stub, we don't set PSTATE.IL, 7397 * as this would be unnecessarily harsh for a user error. 7398 */ 7399 mask &= ~CPSR_M; 7400 if (write_type != CPSRWriteByGDBStub && 7401 arm_feature(env, ARM_FEATURE_V8)) { 7402 mask |= CPSR_IL; 7403 val |= CPSR_IL; 7404 } 7405 qemu_log_mask(LOG_GUEST_ERROR, 7406 "Illegal AArch32 mode switch attempt from %s to %s\n", 7407 aarch32_mode_name(env->uncached_cpsr), 7408 aarch32_mode_name(val)); 7409 } else { 7410 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", 7411 write_type == CPSRWriteExceptionReturn ? 7412 "Exception return from AArch32" : 7413 "AArch32 mode switch from", 7414 aarch32_mode_name(env->uncached_cpsr), 7415 aarch32_mode_name(val), env->regs[15]); 7416 switch_mode(env, val & CPSR_M); 7417 } 7418 } 7419 mask &= ~CACHED_CPSR_BITS; 7420 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 7421 } 7422 7423 /* Sign/zero extend */ 7424 uint32_t HELPER(sxtb16)(uint32_t x) 7425 { 7426 uint32_t res; 7427 res = (uint16_t)(int8_t)x; 7428 res |= (uint32_t)(int8_t)(x >> 16) << 16; 7429 return res; 7430 } 7431 7432 uint32_t HELPER(uxtb16)(uint32_t x) 7433 { 7434 uint32_t res; 7435 res = (uint16_t)(uint8_t)x; 7436 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 7437 return res; 7438 } 7439 7440 int32_t HELPER(sdiv)(int32_t num, int32_t den) 7441 { 7442 if (den == 0) 7443 return 0; 7444 if (num == INT_MIN && den == -1) 7445 return INT_MIN; 7446 return num / den; 7447 } 7448 7449 uint32_t HELPER(udiv)(uint32_t num, uint32_t den) 7450 { 7451 if (den == 0) 7452 return 0; 7453 return num / den; 7454 } 7455 7456 uint32_t HELPER(rbit)(uint32_t x) 7457 { 7458 return revbit32(x); 7459 } 7460 7461 #ifdef CONFIG_USER_ONLY 7462 7463 static void switch_mode(CPUARMState *env, int mode) 7464 { 7465 ARMCPU *cpu = env_archcpu(env); 7466 7467 if (mode != ARM_CPU_MODE_USR) { 7468 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 7469 } 7470 } 7471 7472 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 7473 uint32_t cur_el, bool secure) 7474 { 7475 return 1; 7476 } 7477 7478 void aarch64_sync_64_to_32(CPUARMState *env) 7479 { 7480 g_assert_not_reached(); 7481 } 7482 7483 #else 7484 7485 static void switch_mode(CPUARMState *env, int mode) 7486 { 7487 int old_mode; 7488 int i; 7489 7490 old_mode = env->uncached_cpsr & CPSR_M; 7491 if (mode == old_mode) 7492 return; 7493 7494 if (old_mode == ARM_CPU_MODE_FIQ) { 7495 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 7496 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 7497 } else if (mode == ARM_CPU_MODE_FIQ) { 7498 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 7499 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 7500 } 7501 7502 i = bank_number(old_mode); 7503 env->banked_r13[i] = env->regs[13]; 7504 env->banked_spsr[i] = env->spsr; 7505 7506 i = bank_number(mode); 7507 env->regs[13] = env->banked_r13[i]; 7508 env->spsr = env->banked_spsr[i]; 7509 7510 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; 7511 env->regs[14] = env->banked_r14[r14_bank_number(mode)]; 7512 } 7513 7514 /* Physical Interrupt Target EL Lookup Table 7515 * 7516 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 7517 * 7518 * The below multi-dimensional table is used for looking up the target 7519 * exception level given numerous condition criteria. Specifically, the 7520 * target EL is based on SCR and HCR routing controls as well as the 7521 * currently executing EL and secure state. 7522 * 7523 * Dimensions: 7524 * target_el_table[2][2][2][2][2][4] 7525 * | | | | | +--- Current EL 7526 * | | | | +------ Non-secure(0)/Secure(1) 7527 * | | | +--------- HCR mask override 7528 * | | +------------ SCR exec state control 7529 * | +--------------- SCR mask override 7530 * +------------------ 32-bit(0)/64-bit(1) EL3 7531 * 7532 * The table values are as such: 7533 * 0-3 = EL0-EL3 7534 * -1 = Cannot occur 7535 * 7536 * The ARM ARM target EL table includes entries indicating that an "exception 7537 * is not taken". The two cases where this is applicable are: 7538 * 1) An exception is taken from EL3 but the SCR does not have the exception 7539 * routed to EL3. 7540 * 2) An exception is taken from EL2 but the HCR does not have the exception 7541 * routed to EL2. 7542 * In these two cases, the below table contain a target of EL1. This value is 7543 * returned as it is expected that the consumer of the table data will check 7544 * for "target EL >= current EL" to ensure the exception is not taken. 7545 * 7546 * SCR HCR 7547 * 64 EA AMO From 7548 * BIT IRQ IMO Non-secure Secure 7549 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 7550 */ 7551 static const int8_t target_el_table[2][2][2][2][2][4] = { 7552 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 7553 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 7554 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 7555 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 7556 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 7557 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 7558 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 7559 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 7560 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 7561 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, 7562 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, 7563 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, 7564 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 7565 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 7566 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 7567 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, 7568 }; 7569 7570 /* 7571 * Determine the target EL for physical exceptions 7572 */ 7573 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 7574 uint32_t cur_el, bool secure) 7575 { 7576 CPUARMState *env = cs->env_ptr; 7577 bool rw; 7578 bool scr; 7579 bool hcr; 7580 int target_el; 7581 /* Is the highest EL AArch64? */ 7582 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64); 7583 uint64_t hcr_el2; 7584 7585 if (arm_feature(env, ARM_FEATURE_EL3)) { 7586 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 7587 } else { 7588 /* Either EL2 is the highest EL (and so the EL2 register width 7589 * is given by is64); or there is no EL2 or EL3, in which case 7590 * the value of 'rw' does not affect the table lookup anyway. 7591 */ 7592 rw = is64; 7593 } 7594 7595 hcr_el2 = arm_hcr_el2_eff(env); 7596 switch (excp_idx) { 7597 case EXCP_IRQ: 7598 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 7599 hcr = hcr_el2 & HCR_IMO; 7600 break; 7601 case EXCP_FIQ: 7602 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 7603 hcr = hcr_el2 & HCR_FMO; 7604 break; 7605 default: 7606 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 7607 hcr = hcr_el2 & HCR_AMO; 7608 break; 7609 }; 7610 7611 /* Perform a table-lookup for the target EL given the current state */ 7612 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 7613 7614 assert(target_el > 0); 7615 7616 return target_el; 7617 } 7618 7619 void arm_log_exception(int idx) 7620 { 7621 if (qemu_loglevel_mask(CPU_LOG_INT)) { 7622 const char *exc = NULL; 7623 static const char * const excnames[] = { 7624 [EXCP_UDEF] = "Undefined Instruction", 7625 [EXCP_SWI] = "SVC", 7626 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 7627 [EXCP_DATA_ABORT] = "Data Abort", 7628 [EXCP_IRQ] = "IRQ", 7629 [EXCP_FIQ] = "FIQ", 7630 [EXCP_BKPT] = "Breakpoint", 7631 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 7632 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 7633 [EXCP_HVC] = "Hypervisor Call", 7634 [EXCP_HYP_TRAP] = "Hypervisor Trap", 7635 [EXCP_SMC] = "Secure Monitor Call", 7636 [EXCP_VIRQ] = "Virtual IRQ", 7637 [EXCP_VFIQ] = "Virtual FIQ", 7638 [EXCP_SEMIHOST] = "Semihosting call", 7639 [EXCP_NOCP] = "v7M NOCP UsageFault", 7640 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 7641 [EXCP_STKOF] = "v8M STKOF UsageFault", 7642 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking", 7643 [EXCP_LSERR] = "v8M LSERR UsageFault", 7644 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault", 7645 }; 7646 7647 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 7648 exc = excnames[idx]; 7649 } 7650 if (!exc) { 7651 exc = "unknown"; 7652 } 7653 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); 7654 } 7655 } 7656 7657 /* 7658 * Function used to synchronize QEMU's AArch64 register set with AArch32 7659 * register set. This is necessary when switching between AArch32 and AArch64 7660 * execution state. 7661 */ 7662 void aarch64_sync_32_to_64(CPUARMState *env) 7663 { 7664 int i; 7665 uint32_t mode = env->uncached_cpsr & CPSR_M; 7666 7667 /* We can blanket copy R[0:7] to X[0:7] */ 7668 for (i = 0; i < 8; i++) { 7669 env->xregs[i] = env->regs[i]; 7670 } 7671 7672 /* 7673 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 7674 * Otherwise, they come from the banked user regs. 7675 */ 7676 if (mode == ARM_CPU_MODE_FIQ) { 7677 for (i = 8; i < 13; i++) { 7678 env->xregs[i] = env->usr_regs[i - 8]; 7679 } 7680 } else { 7681 for (i = 8; i < 13; i++) { 7682 env->xregs[i] = env->regs[i]; 7683 } 7684 } 7685 7686 /* 7687 * Registers x13-x23 are the various mode SP and FP registers. Registers 7688 * r13 and r14 are only copied if we are in that mode, otherwise we copy 7689 * from the mode banked register. 7690 */ 7691 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 7692 env->xregs[13] = env->regs[13]; 7693 env->xregs[14] = env->regs[14]; 7694 } else { 7695 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 7696 /* HYP is an exception in that it is copied from r14 */ 7697 if (mode == ARM_CPU_MODE_HYP) { 7698 env->xregs[14] = env->regs[14]; 7699 } else { 7700 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; 7701 } 7702 } 7703 7704 if (mode == ARM_CPU_MODE_HYP) { 7705 env->xregs[15] = env->regs[13]; 7706 } else { 7707 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 7708 } 7709 7710 if (mode == ARM_CPU_MODE_IRQ) { 7711 env->xregs[16] = env->regs[14]; 7712 env->xregs[17] = env->regs[13]; 7713 } else { 7714 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; 7715 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 7716 } 7717 7718 if (mode == ARM_CPU_MODE_SVC) { 7719 env->xregs[18] = env->regs[14]; 7720 env->xregs[19] = env->regs[13]; 7721 } else { 7722 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; 7723 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 7724 } 7725 7726 if (mode == ARM_CPU_MODE_ABT) { 7727 env->xregs[20] = env->regs[14]; 7728 env->xregs[21] = env->regs[13]; 7729 } else { 7730 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; 7731 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 7732 } 7733 7734 if (mode == ARM_CPU_MODE_UND) { 7735 env->xregs[22] = env->regs[14]; 7736 env->xregs[23] = env->regs[13]; 7737 } else { 7738 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; 7739 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 7740 } 7741 7742 /* 7743 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 7744 * mode, then we can copy from r8-r14. Otherwise, we copy from the 7745 * FIQ bank for r8-r14. 7746 */ 7747 if (mode == ARM_CPU_MODE_FIQ) { 7748 for (i = 24; i < 31; i++) { 7749 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 7750 } 7751 } else { 7752 for (i = 24; i < 29; i++) { 7753 env->xregs[i] = env->fiq_regs[i - 24]; 7754 } 7755 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 7756 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; 7757 } 7758 7759 env->pc = env->regs[15]; 7760 } 7761 7762 /* 7763 * Function used to synchronize QEMU's AArch32 register set with AArch64 7764 * register set. This is necessary when switching between AArch32 and AArch64 7765 * execution state. 7766 */ 7767 void aarch64_sync_64_to_32(CPUARMState *env) 7768 { 7769 int i; 7770 uint32_t mode = env->uncached_cpsr & CPSR_M; 7771 7772 /* We can blanket copy X[0:7] to R[0:7] */ 7773 for (i = 0; i < 8; i++) { 7774 env->regs[i] = env->xregs[i]; 7775 } 7776 7777 /* 7778 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 7779 * Otherwise, we copy x8-x12 into the banked user regs. 7780 */ 7781 if (mode == ARM_CPU_MODE_FIQ) { 7782 for (i = 8; i < 13; i++) { 7783 env->usr_regs[i - 8] = env->xregs[i]; 7784 } 7785 } else { 7786 for (i = 8; i < 13; i++) { 7787 env->regs[i] = env->xregs[i]; 7788 } 7789 } 7790 7791 /* 7792 * Registers r13 & r14 depend on the current mode. 7793 * If we are in a given mode, we copy the corresponding x registers to r13 7794 * and r14. Otherwise, we copy the x register to the banked r13 and r14 7795 * for the mode. 7796 */ 7797 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 7798 env->regs[13] = env->xregs[13]; 7799 env->regs[14] = env->xregs[14]; 7800 } else { 7801 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 7802 7803 /* 7804 * HYP is an exception in that it does not have its own banked r14 but 7805 * shares the USR r14 7806 */ 7807 if (mode == ARM_CPU_MODE_HYP) { 7808 env->regs[14] = env->xregs[14]; 7809 } else { 7810 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 7811 } 7812 } 7813 7814 if (mode == ARM_CPU_MODE_HYP) { 7815 env->regs[13] = env->xregs[15]; 7816 } else { 7817 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 7818 } 7819 7820 if (mode == ARM_CPU_MODE_IRQ) { 7821 env->regs[14] = env->xregs[16]; 7822 env->regs[13] = env->xregs[17]; 7823 } else { 7824 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 7825 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 7826 } 7827 7828 if (mode == ARM_CPU_MODE_SVC) { 7829 env->regs[14] = env->xregs[18]; 7830 env->regs[13] = env->xregs[19]; 7831 } else { 7832 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 7833 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 7834 } 7835 7836 if (mode == ARM_CPU_MODE_ABT) { 7837 env->regs[14] = env->xregs[20]; 7838 env->regs[13] = env->xregs[21]; 7839 } else { 7840 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 7841 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 7842 } 7843 7844 if (mode == ARM_CPU_MODE_UND) { 7845 env->regs[14] = env->xregs[22]; 7846 env->regs[13] = env->xregs[23]; 7847 } else { 7848 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 7849 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 7850 } 7851 7852 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 7853 * mode, then we can copy to r8-r14. Otherwise, we copy to the 7854 * FIQ bank for r8-r14. 7855 */ 7856 if (mode == ARM_CPU_MODE_FIQ) { 7857 for (i = 24; i < 31; i++) { 7858 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 7859 } 7860 } else { 7861 for (i = 24; i < 29; i++) { 7862 env->fiq_regs[i - 24] = env->xregs[i]; 7863 } 7864 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 7865 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 7866 } 7867 7868 env->regs[15] = env->pc; 7869 } 7870 7871 static void take_aarch32_exception(CPUARMState *env, int new_mode, 7872 uint32_t mask, uint32_t offset, 7873 uint32_t newpc) 7874 { 7875 /* Change the CPU state so as to actually take the exception. */ 7876 switch_mode(env, new_mode); 7877 /* 7878 * For exceptions taken to AArch32 we must clear the SS bit in both 7879 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 7880 */ 7881 env->uncached_cpsr &= ~PSTATE_SS; 7882 env->spsr = cpsr_read(env); 7883 /* Clear IT bits. */ 7884 env->condexec_bits = 0; 7885 /* Switch to the new mode, and to the correct instruction set. */ 7886 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 7887 /* Set new mode endianness */ 7888 env->uncached_cpsr &= ~CPSR_E; 7889 if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) { 7890 env->uncached_cpsr |= CPSR_E; 7891 } 7892 /* J and IL must always be cleared for exception entry */ 7893 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); 7894 env->daif |= mask; 7895 7896 if (new_mode == ARM_CPU_MODE_HYP) { 7897 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; 7898 env->elr_el[2] = env->regs[15]; 7899 } else { 7900 /* 7901 * this is a lie, as there was no c1_sys on V4T/V5, but who cares 7902 * and we should just guard the thumb mode on V4 7903 */ 7904 if (arm_feature(env, ARM_FEATURE_V4T)) { 7905 env->thumb = 7906 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 7907 } 7908 env->regs[14] = env->regs[15] + offset; 7909 } 7910 env->regs[15] = newpc; 7911 } 7912 7913 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) 7914 { 7915 /* 7916 * Handle exception entry to Hyp mode; this is sufficiently 7917 * different to entry to other AArch32 modes that we handle it 7918 * separately here. 7919 * 7920 * The vector table entry used is always the 0x14 Hyp mode entry point, 7921 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp. 7922 * The offset applied to the preferred return address is always zero 7923 * (see DDI0487C.a section G1.12.3). 7924 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. 7925 */ 7926 uint32_t addr, mask; 7927 ARMCPU *cpu = ARM_CPU(cs); 7928 CPUARMState *env = &cpu->env; 7929 7930 switch (cs->exception_index) { 7931 case EXCP_UDEF: 7932 addr = 0x04; 7933 break; 7934 case EXCP_SWI: 7935 addr = 0x14; 7936 break; 7937 case EXCP_BKPT: 7938 /* Fall through to prefetch abort. */ 7939 case EXCP_PREFETCH_ABORT: 7940 env->cp15.ifar_s = env->exception.vaddress; 7941 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", 7942 (uint32_t)env->exception.vaddress); 7943 addr = 0x0c; 7944 break; 7945 case EXCP_DATA_ABORT: 7946 env->cp15.dfar_s = env->exception.vaddress; 7947 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", 7948 (uint32_t)env->exception.vaddress); 7949 addr = 0x10; 7950 break; 7951 case EXCP_IRQ: 7952 addr = 0x18; 7953 break; 7954 case EXCP_FIQ: 7955 addr = 0x1c; 7956 break; 7957 case EXCP_HVC: 7958 addr = 0x08; 7959 break; 7960 case EXCP_HYP_TRAP: 7961 addr = 0x14; 7962 break; 7963 default: 7964 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 7965 } 7966 7967 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { 7968 if (!arm_feature(env, ARM_FEATURE_V8)) { 7969 /* 7970 * QEMU syndrome values are v8-style. v7 has the IL bit 7971 * UNK/SBZP for "field not valid" cases, where v8 uses RES1. 7972 * If this is a v7 CPU, squash the IL bit in those cases. 7973 */ 7974 if (cs->exception_index == EXCP_PREFETCH_ABORT || 7975 (cs->exception_index == EXCP_DATA_ABORT && 7976 !(env->exception.syndrome & ARM_EL_ISV)) || 7977 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { 7978 env->exception.syndrome &= ~ARM_EL_IL; 7979 } 7980 } 7981 env->cp15.esr_el[2] = env->exception.syndrome; 7982 } 7983 7984 if (arm_current_el(env) != 2 && addr < 0x14) { 7985 addr = 0x14; 7986 } 7987 7988 mask = 0; 7989 if (!(env->cp15.scr_el3 & SCR_EA)) { 7990 mask |= CPSR_A; 7991 } 7992 if (!(env->cp15.scr_el3 & SCR_IRQ)) { 7993 mask |= CPSR_I; 7994 } 7995 if (!(env->cp15.scr_el3 & SCR_FIQ)) { 7996 mask |= CPSR_F; 7997 } 7998 7999 addr += env->cp15.hvbar; 8000 8001 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); 8002 } 8003 8004 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 8005 { 8006 ARMCPU *cpu = ARM_CPU(cs); 8007 CPUARMState *env = &cpu->env; 8008 uint32_t addr; 8009 uint32_t mask; 8010 int new_mode; 8011 uint32_t offset; 8012 uint32_t moe; 8013 8014 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 8015 switch (syn_get_ec(env->exception.syndrome)) { 8016 case EC_BREAKPOINT: 8017 case EC_BREAKPOINT_SAME_EL: 8018 moe = 1; 8019 break; 8020 case EC_WATCHPOINT: 8021 case EC_WATCHPOINT_SAME_EL: 8022 moe = 10; 8023 break; 8024 case EC_AA32_BKPT: 8025 moe = 3; 8026 break; 8027 case EC_VECTORCATCH: 8028 moe = 5; 8029 break; 8030 default: 8031 moe = 0; 8032 break; 8033 } 8034 8035 if (moe) { 8036 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 8037 } 8038 8039 if (env->exception.target_el == 2) { 8040 arm_cpu_do_interrupt_aarch32_hyp(cs); 8041 return; 8042 } 8043 8044 switch (cs->exception_index) { 8045 case EXCP_UDEF: 8046 new_mode = ARM_CPU_MODE_UND; 8047 addr = 0x04; 8048 mask = CPSR_I; 8049 if (env->thumb) 8050 offset = 2; 8051 else 8052 offset = 4; 8053 break; 8054 case EXCP_SWI: 8055 new_mode = ARM_CPU_MODE_SVC; 8056 addr = 0x08; 8057 mask = CPSR_I; 8058 /* The PC already points to the next instruction. */ 8059 offset = 0; 8060 break; 8061 case EXCP_BKPT: 8062 /* Fall through to prefetch abort. */ 8063 case EXCP_PREFETCH_ABORT: 8064 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 8065 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 8066 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 8067 env->exception.fsr, (uint32_t)env->exception.vaddress); 8068 new_mode = ARM_CPU_MODE_ABT; 8069 addr = 0x0c; 8070 mask = CPSR_A | CPSR_I; 8071 offset = 4; 8072 break; 8073 case EXCP_DATA_ABORT: 8074 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 8075 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 8076 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 8077 env->exception.fsr, 8078 (uint32_t)env->exception.vaddress); 8079 new_mode = ARM_CPU_MODE_ABT; 8080 addr = 0x10; 8081 mask = CPSR_A | CPSR_I; 8082 offset = 8; 8083 break; 8084 case EXCP_IRQ: 8085 new_mode = ARM_CPU_MODE_IRQ; 8086 addr = 0x18; 8087 /* Disable IRQ and imprecise data aborts. */ 8088 mask = CPSR_A | CPSR_I; 8089 offset = 4; 8090 if (env->cp15.scr_el3 & SCR_IRQ) { 8091 /* IRQ routed to monitor mode */ 8092 new_mode = ARM_CPU_MODE_MON; 8093 mask |= CPSR_F; 8094 } 8095 break; 8096 case EXCP_FIQ: 8097 new_mode = ARM_CPU_MODE_FIQ; 8098 addr = 0x1c; 8099 /* Disable FIQ, IRQ and imprecise data aborts. */ 8100 mask = CPSR_A | CPSR_I | CPSR_F; 8101 if (env->cp15.scr_el3 & SCR_FIQ) { 8102 /* FIQ routed to monitor mode */ 8103 new_mode = ARM_CPU_MODE_MON; 8104 } 8105 offset = 4; 8106 break; 8107 case EXCP_VIRQ: 8108 new_mode = ARM_CPU_MODE_IRQ; 8109 addr = 0x18; 8110 /* Disable IRQ and imprecise data aborts. */ 8111 mask = CPSR_A | CPSR_I; 8112 offset = 4; 8113 break; 8114 case EXCP_VFIQ: 8115 new_mode = ARM_CPU_MODE_FIQ; 8116 addr = 0x1c; 8117 /* Disable FIQ, IRQ and imprecise data aborts. */ 8118 mask = CPSR_A | CPSR_I | CPSR_F; 8119 offset = 4; 8120 break; 8121 case EXCP_SMC: 8122 new_mode = ARM_CPU_MODE_MON; 8123 addr = 0x08; 8124 mask = CPSR_A | CPSR_I | CPSR_F; 8125 offset = 0; 8126 break; 8127 default: 8128 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8129 return; /* Never happens. Keep compiler happy. */ 8130 } 8131 8132 if (new_mode == ARM_CPU_MODE_MON) { 8133 addr += env->cp15.mvbar; 8134 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 8135 /* High vectors. When enabled, base address cannot be remapped. */ 8136 addr += 0xffff0000; 8137 } else { 8138 /* ARM v7 architectures provide a vector base address register to remap 8139 * the interrupt vector table. 8140 * This register is only followed in non-monitor mode, and is banked. 8141 * Note: only bits 31:5 are valid. 8142 */ 8143 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 8144 } 8145 8146 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 8147 env->cp15.scr_el3 &= ~SCR_NS; 8148 } 8149 8150 take_aarch32_exception(env, new_mode, mask, offset, addr); 8151 } 8152 8153 /* Handle exception entry to a target EL which is using AArch64 */ 8154 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 8155 { 8156 ARMCPU *cpu = ARM_CPU(cs); 8157 CPUARMState *env = &cpu->env; 8158 unsigned int new_el = env->exception.target_el; 8159 target_ulong addr = env->cp15.vbar_el[new_el]; 8160 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 8161 unsigned int cur_el = arm_current_el(env); 8162 8163 /* 8164 * Note that new_el can never be 0. If cur_el is 0, then 8165 * el0_a64 is is_a64(), else el0_a64 is ignored. 8166 */ 8167 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); 8168 8169 if (cur_el < new_el) { 8170 /* Entry vector offset depends on whether the implemented EL 8171 * immediately lower than the target level is using AArch32 or AArch64 8172 */ 8173 bool is_aa64; 8174 8175 switch (new_el) { 8176 case 3: 8177 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 8178 break; 8179 case 2: 8180 is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0; 8181 break; 8182 case 1: 8183 is_aa64 = is_a64(env); 8184 break; 8185 default: 8186 g_assert_not_reached(); 8187 } 8188 8189 if (is_aa64) { 8190 addr += 0x400; 8191 } else { 8192 addr += 0x600; 8193 } 8194 } else if (pstate_read(env) & PSTATE_SP) { 8195 addr += 0x200; 8196 } 8197 8198 switch (cs->exception_index) { 8199 case EXCP_PREFETCH_ABORT: 8200 case EXCP_DATA_ABORT: 8201 env->cp15.far_el[new_el] = env->exception.vaddress; 8202 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 8203 env->cp15.far_el[new_el]); 8204 /* fall through */ 8205 case EXCP_BKPT: 8206 case EXCP_UDEF: 8207 case EXCP_SWI: 8208 case EXCP_HVC: 8209 case EXCP_HYP_TRAP: 8210 case EXCP_SMC: 8211 if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) { 8212 /* 8213 * QEMU internal FP/SIMD syndromes from AArch32 include the 8214 * TA and coproc fields which are only exposed if the exception 8215 * is taken to AArch32 Hyp mode. Mask them out to get a valid 8216 * AArch64 format syndrome. 8217 */ 8218 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); 8219 } 8220 env->cp15.esr_el[new_el] = env->exception.syndrome; 8221 break; 8222 case EXCP_IRQ: 8223 case EXCP_VIRQ: 8224 addr += 0x80; 8225 break; 8226 case EXCP_FIQ: 8227 case EXCP_VFIQ: 8228 addr += 0x100; 8229 break; 8230 case EXCP_SEMIHOST: 8231 qemu_log_mask(CPU_LOG_INT, 8232 "...handling as semihosting call 0x%" PRIx64 "\n", 8233 env->xregs[0]); 8234 env->xregs[0] = do_arm_semihosting(env); 8235 return; 8236 default: 8237 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8238 } 8239 8240 if (is_a64(env)) { 8241 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); 8242 aarch64_save_sp(env, arm_current_el(env)); 8243 env->elr_el[new_el] = env->pc; 8244 } else { 8245 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env); 8246 env->elr_el[new_el] = env->regs[15]; 8247 8248 aarch64_sync_32_to_64(env); 8249 8250 env->condexec_bits = 0; 8251 } 8252 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 8253 env->elr_el[new_el]); 8254 8255 pstate_write(env, PSTATE_DAIF | new_mode); 8256 env->aarch64 = 1; 8257 aarch64_restore_sp(env, new_el); 8258 8259 env->pc = addr; 8260 8261 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 8262 new_el, env->pc, pstate_read(env)); 8263 } 8264 8265 static inline bool check_for_semihosting(CPUState *cs) 8266 { 8267 #ifdef CONFIG_TCG 8268 /* Check whether this exception is a semihosting call; if so 8269 * then handle it and return true; otherwise return false. 8270 */ 8271 ARMCPU *cpu = ARM_CPU(cs); 8272 CPUARMState *env = &cpu->env; 8273 8274 if (is_a64(env)) { 8275 if (cs->exception_index == EXCP_SEMIHOST) { 8276 /* This is always the 64-bit semihosting exception. 8277 * The "is this usermode" and "is semihosting enabled" 8278 * checks have been done at translate time. 8279 */ 8280 qemu_log_mask(CPU_LOG_INT, 8281 "...handling as semihosting call 0x%" PRIx64 "\n", 8282 env->xregs[0]); 8283 env->xregs[0] = do_arm_semihosting(env); 8284 return true; 8285 } 8286 return false; 8287 } else { 8288 uint32_t imm; 8289 8290 /* Only intercept calls from privileged modes, to provide some 8291 * semblance of security. 8292 */ 8293 if (cs->exception_index != EXCP_SEMIHOST && 8294 (!semihosting_enabled() || 8295 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) { 8296 return false; 8297 } 8298 8299 switch (cs->exception_index) { 8300 case EXCP_SEMIHOST: 8301 /* This is always a semihosting call; the "is this usermode" 8302 * and "is semihosting enabled" checks have been done at 8303 * translate time. 8304 */ 8305 break; 8306 case EXCP_SWI: 8307 /* Check for semihosting interrupt. */ 8308 if (env->thumb) { 8309 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env)) 8310 & 0xff; 8311 if (imm == 0xab) { 8312 break; 8313 } 8314 } else { 8315 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env)) 8316 & 0xffffff; 8317 if (imm == 0x123456) { 8318 break; 8319 } 8320 } 8321 return false; 8322 case EXCP_BKPT: 8323 /* See if this is a semihosting syscall. */ 8324 if (env->thumb) { 8325 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) 8326 & 0xff; 8327 if (imm == 0xab) { 8328 env->regs[15] += 2; 8329 break; 8330 } 8331 } 8332 return false; 8333 default: 8334 return false; 8335 } 8336 8337 qemu_log_mask(CPU_LOG_INT, 8338 "...handling as semihosting call 0x%x\n", 8339 env->regs[0]); 8340 env->regs[0] = do_arm_semihosting(env); 8341 return true; 8342 } 8343 #else 8344 return false; 8345 #endif 8346 } 8347 8348 /* Handle a CPU exception for A and R profile CPUs. 8349 * Do any appropriate logging, handle PSCI calls, and then hand off 8350 * to the AArch64-entry or AArch32-entry function depending on the 8351 * target exception level's register width. 8352 */ 8353 void arm_cpu_do_interrupt(CPUState *cs) 8354 { 8355 ARMCPU *cpu = ARM_CPU(cs); 8356 CPUARMState *env = &cpu->env; 8357 unsigned int new_el = env->exception.target_el; 8358 8359 assert(!arm_feature(env, ARM_FEATURE_M)); 8360 8361 arm_log_exception(cs->exception_index); 8362 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 8363 new_el); 8364 if (qemu_loglevel_mask(CPU_LOG_INT) 8365 && !excp_is_internal(cs->exception_index)) { 8366 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 8367 syn_get_ec(env->exception.syndrome), 8368 env->exception.syndrome); 8369 } 8370 8371 if (arm_is_psci_call(cpu, cs->exception_index)) { 8372 arm_handle_psci_call(cpu); 8373 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 8374 return; 8375 } 8376 8377 /* Semihosting semantics depend on the register width of the 8378 * code that caused the exception, not the target exception level, 8379 * so must be handled here. 8380 */ 8381 if (check_for_semihosting(cs)) { 8382 return; 8383 } 8384 8385 /* Hooks may change global state so BQL should be held, also the 8386 * BQL needs to be held for any modification of 8387 * cs->interrupt_request. 8388 */ 8389 g_assert(qemu_mutex_iothread_locked()); 8390 8391 arm_call_pre_el_change_hook(cpu); 8392 8393 assert(!excp_is_internal(cs->exception_index)); 8394 if (arm_el_is_aa64(env, new_el)) { 8395 arm_cpu_do_interrupt_aarch64(cs); 8396 } else { 8397 arm_cpu_do_interrupt_aarch32(cs); 8398 } 8399 8400 arm_call_el_change_hook(cpu); 8401 8402 if (!kvm_enabled()) { 8403 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 8404 } 8405 } 8406 #endif /* !CONFIG_USER_ONLY */ 8407 8408 /* Return the exception level which controls this address translation regime */ 8409 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 8410 { 8411 switch (mmu_idx) { 8412 case ARMMMUIdx_S2NS: 8413 case ARMMMUIdx_S1E2: 8414 return 2; 8415 case ARMMMUIdx_S1E3: 8416 return 3; 8417 case ARMMMUIdx_S1SE0: 8418 return arm_el_is_aa64(env, 3) ? 1 : 3; 8419 case ARMMMUIdx_S1SE1: 8420 case ARMMMUIdx_S1NSE0: 8421 case ARMMMUIdx_S1NSE1: 8422 case ARMMMUIdx_MPrivNegPri: 8423 case ARMMMUIdx_MUserNegPri: 8424 case ARMMMUIdx_MPriv: 8425 case ARMMMUIdx_MUser: 8426 case ARMMMUIdx_MSPrivNegPri: 8427 case ARMMMUIdx_MSUserNegPri: 8428 case ARMMMUIdx_MSPriv: 8429 case ARMMMUIdx_MSUser: 8430 return 1; 8431 default: 8432 g_assert_not_reached(); 8433 } 8434 } 8435 8436 #ifndef CONFIG_USER_ONLY 8437 8438 /* Return the SCTLR value which controls this address translation regime */ 8439 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 8440 { 8441 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 8442 } 8443 8444 /* Return true if the specified stage of address translation is disabled */ 8445 static inline bool regime_translation_disabled(CPUARMState *env, 8446 ARMMMUIdx mmu_idx) 8447 { 8448 if (arm_feature(env, ARM_FEATURE_M)) { 8449 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 8450 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 8451 case R_V7M_MPU_CTRL_ENABLE_MASK: 8452 /* Enabled, but not for HardFault and NMI */ 8453 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 8454 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 8455 /* Enabled for all cases */ 8456 return false; 8457 case 0: 8458 default: 8459 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 8460 * we warned about that in armv7m_nvic.c when the guest set it. 8461 */ 8462 return true; 8463 } 8464 } 8465 8466 if (mmu_idx == ARMMMUIdx_S2NS) { 8467 /* HCR.DC means HCR.VM behaves as 1 */ 8468 return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0; 8469 } 8470 8471 if (env->cp15.hcr_el2 & HCR_TGE) { 8472 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ 8473 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { 8474 return true; 8475 } 8476 } 8477 8478 if ((env->cp15.hcr_el2 & HCR_DC) && 8479 (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) { 8480 /* HCR.DC means SCTLR_EL1.M behaves as 0 */ 8481 return true; 8482 } 8483 8484 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 8485 } 8486 8487 static inline bool regime_translation_big_endian(CPUARMState *env, 8488 ARMMMUIdx mmu_idx) 8489 { 8490 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 8491 } 8492 8493 /* Return the TTBR associated with this translation regime */ 8494 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 8495 int ttbrn) 8496 { 8497 if (mmu_idx == ARMMMUIdx_S2NS) { 8498 return env->cp15.vttbr_el2; 8499 } 8500 if (ttbrn == 0) { 8501 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 8502 } else { 8503 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 8504 } 8505 } 8506 8507 #endif /* !CONFIG_USER_ONLY */ 8508 8509 /* Return the TCR controlling this translation regime */ 8510 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 8511 { 8512 if (mmu_idx == ARMMMUIdx_S2NS) { 8513 return &env->cp15.vtcr_el2; 8514 } 8515 return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; 8516 } 8517 8518 /* Convert a possible stage1+2 MMU index into the appropriate 8519 * stage 1 MMU index 8520 */ 8521 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 8522 { 8523 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 8524 mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0); 8525 } 8526 return mmu_idx; 8527 } 8528 8529 /* Return true if the translation regime is using LPAE format page tables */ 8530 static inline bool regime_using_lpae_format(CPUARMState *env, 8531 ARMMMUIdx mmu_idx) 8532 { 8533 int el = regime_el(env, mmu_idx); 8534 if (el == 2 || arm_el_is_aa64(env, el)) { 8535 return true; 8536 } 8537 if (arm_feature(env, ARM_FEATURE_LPAE) 8538 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 8539 return true; 8540 } 8541 return false; 8542 } 8543 8544 /* Returns true if the stage 1 translation regime is using LPAE format page 8545 * tables. Used when raising alignment exceptions, whose FSR changes depending 8546 * on whether the long or short descriptor format is in use. */ 8547 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 8548 { 8549 mmu_idx = stage_1_mmu_idx(mmu_idx); 8550 8551 return regime_using_lpae_format(env, mmu_idx); 8552 } 8553 8554 #ifndef CONFIG_USER_ONLY 8555 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 8556 { 8557 switch (mmu_idx) { 8558 case ARMMMUIdx_S1SE0: 8559 case ARMMMUIdx_S1NSE0: 8560 case ARMMMUIdx_MUser: 8561 case ARMMMUIdx_MSUser: 8562 case ARMMMUIdx_MUserNegPri: 8563 case ARMMMUIdx_MSUserNegPri: 8564 return true; 8565 default: 8566 return false; 8567 case ARMMMUIdx_S12NSE0: 8568 case ARMMMUIdx_S12NSE1: 8569 g_assert_not_reached(); 8570 } 8571 } 8572 8573 /* Translate section/page access permissions to page 8574 * R/W protection flags 8575 * 8576 * @env: CPUARMState 8577 * @mmu_idx: MMU index indicating required translation regime 8578 * @ap: The 3-bit access permissions (AP[2:0]) 8579 * @domain_prot: The 2-bit domain access permissions 8580 */ 8581 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 8582 int ap, int domain_prot) 8583 { 8584 bool is_user = regime_is_user(env, mmu_idx); 8585 8586 if (domain_prot == 3) { 8587 return PAGE_READ | PAGE_WRITE; 8588 } 8589 8590 switch (ap) { 8591 case 0: 8592 if (arm_feature(env, ARM_FEATURE_V7)) { 8593 return 0; 8594 } 8595 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 8596 case SCTLR_S: 8597 return is_user ? 0 : PAGE_READ; 8598 case SCTLR_R: 8599 return PAGE_READ; 8600 default: 8601 return 0; 8602 } 8603 case 1: 8604 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 8605 case 2: 8606 if (is_user) { 8607 return PAGE_READ; 8608 } else { 8609 return PAGE_READ | PAGE_WRITE; 8610 } 8611 case 3: 8612 return PAGE_READ | PAGE_WRITE; 8613 case 4: /* Reserved. */ 8614 return 0; 8615 case 5: 8616 return is_user ? 0 : PAGE_READ; 8617 case 6: 8618 return PAGE_READ; 8619 case 7: 8620 if (!arm_feature(env, ARM_FEATURE_V6K)) { 8621 return 0; 8622 } 8623 return PAGE_READ; 8624 default: 8625 g_assert_not_reached(); 8626 } 8627 } 8628 8629 /* Translate section/page access permissions to page 8630 * R/W protection flags. 8631 * 8632 * @ap: The 2-bit simple AP (AP[2:1]) 8633 * @is_user: TRUE if accessing from PL0 8634 */ 8635 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 8636 { 8637 switch (ap) { 8638 case 0: 8639 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 8640 case 1: 8641 return PAGE_READ | PAGE_WRITE; 8642 case 2: 8643 return is_user ? 0 : PAGE_READ; 8644 case 3: 8645 return PAGE_READ; 8646 default: 8647 g_assert_not_reached(); 8648 } 8649 } 8650 8651 static inline int 8652 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 8653 { 8654 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 8655 } 8656 8657 /* Translate S2 section/page access permissions to protection flags 8658 * 8659 * @env: CPUARMState 8660 * @s2ap: The 2-bit stage2 access permissions (S2AP) 8661 * @xn: XN (execute-never) bit 8662 */ 8663 static int get_S2prot(CPUARMState *env, int s2ap, int xn) 8664 { 8665 int prot = 0; 8666 8667 if (s2ap & 1) { 8668 prot |= PAGE_READ; 8669 } 8670 if (s2ap & 2) { 8671 prot |= PAGE_WRITE; 8672 } 8673 if (!xn) { 8674 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 8675 prot |= PAGE_EXEC; 8676 } 8677 } 8678 return prot; 8679 } 8680 8681 /* Translate section/page access permissions to protection flags 8682 * 8683 * @env: CPUARMState 8684 * @mmu_idx: MMU index indicating required translation regime 8685 * @is_aa64: TRUE if AArch64 8686 * @ap: The 2-bit simple AP (AP[2:1]) 8687 * @ns: NS (non-secure) bit 8688 * @xn: XN (execute-never) bit 8689 * @pxn: PXN (privileged execute-never) bit 8690 */ 8691 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 8692 int ap, int ns, int xn, int pxn) 8693 { 8694 bool is_user = regime_is_user(env, mmu_idx); 8695 int prot_rw, user_rw; 8696 bool have_wxn; 8697 int wxn = 0; 8698 8699 assert(mmu_idx != ARMMMUIdx_S2NS); 8700 8701 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 8702 if (is_user) { 8703 prot_rw = user_rw; 8704 } else { 8705 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 8706 } 8707 8708 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 8709 return prot_rw; 8710 } 8711 8712 /* TODO have_wxn should be replaced with 8713 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 8714 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 8715 * compatible processors have EL2, which is required for [U]WXN. 8716 */ 8717 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 8718 8719 if (have_wxn) { 8720 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 8721 } 8722 8723 if (is_aa64) { 8724 switch (regime_el(env, mmu_idx)) { 8725 case 1: 8726 if (!is_user) { 8727 xn = pxn || (user_rw & PAGE_WRITE); 8728 } 8729 break; 8730 case 2: 8731 case 3: 8732 break; 8733 } 8734 } else if (arm_feature(env, ARM_FEATURE_V7)) { 8735 switch (regime_el(env, mmu_idx)) { 8736 case 1: 8737 case 3: 8738 if (is_user) { 8739 xn = xn || !(user_rw & PAGE_READ); 8740 } else { 8741 int uwxn = 0; 8742 if (have_wxn) { 8743 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 8744 } 8745 xn = xn || !(prot_rw & PAGE_READ) || pxn || 8746 (uwxn && (user_rw & PAGE_WRITE)); 8747 } 8748 break; 8749 case 2: 8750 break; 8751 } 8752 } else { 8753 xn = wxn = 0; 8754 } 8755 8756 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 8757 return prot_rw; 8758 } 8759 return prot_rw | PAGE_EXEC; 8760 } 8761 8762 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 8763 uint32_t *table, uint32_t address) 8764 { 8765 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 8766 TCR *tcr = regime_tcr(env, mmu_idx); 8767 8768 if (address & tcr->mask) { 8769 if (tcr->raw_tcr & TTBCR_PD1) { 8770 /* Translation table walk disabled for TTBR1 */ 8771 return false; 8772 } 8773 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 8774 } else { 8775 if (tcr->raw_tcr & TTBCR_PD0) { 8776 /* Translation table walk disabled for TTBR0 */ 8777 return false; 8778 } 8779 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 8780 } 8781 *table |= (address >> 18) & 0x3ffc; 8782 return true; 8783 } 8784 8785 /* Translate a S1 pagetable walk through S2 if needed. */ 8786 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 8787 hwaddr addr, MemTxAttrs txattrs, 8788 ARMMMUFaultInfo *fi) 8789 { 8790 if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) && 8791 !regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 8792 target_ulong s2size; 8793 hwaddr s2pa; 8794 int s2prot; 8795 int ret; 8796 ARMCacheAttrs cacheattrs = {}; 8797 ARMCacheAttrs *pcacheattrs = NULL; 8798 8799 if (env->cp15.hcr_el2 & HCR_PTW) { 8800 /* 8801 * PTW means we must fault if this S1 walk touches S2 Device 8802 * memory; otherwise we don't care about the attributes and can 8803 * save the S2 translation the effort of computing them. 8804 */ 8805 pcacheattrs = &cacheattrs; 8806 } 8807 8808 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa, 8809 &txattrs, &s2prot, &s2size, fi, pcacheattrs); 8810 if (ret) { 8811 assert(fi->type != ARMFault_None); 8812 fi->s2addr = addr; 8813 fi->stage2 = true; 8814 fi->s1ptw = true; 8815 return ~0; 8816 } 8817 if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) { 8818 /* Access was to Device memory: generate Permission fault */ 8819 fi->type = ARMFault_Permission; 8820 fi->s2addr = addr; 8821 fi->stage2 = true; 8822 fi->s1ptw = true; 8823 return ~0; 8824 } 8825 addr = s2pa; 8826 } 8827 return addr; 8828 } 8829 8830 /* All loads done in the course of a page table walk go through here. */ 8831 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 8832 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 8833 { 8834 ARMCPU *cpu = ARM_CPU(cs); 8835 CPUARMState *env = &cpu->env; 8836 MemTxAttrs attrs = {}; 8837 MemTxResult result = MEMTX_OK; 8838 AddressSpace *as; 8839 uint32_t data; 8840 8841 attrs.secure = is_secure; 8842 as = arm_addressspace(cs, attrs); 8843 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 8844 if (fi->s1ptw) { 8845 return 0; 8846 } 8847 if (regime_translation_big_endian(env, mmu_idx)) { 8848 data = address_space_ldl_be(as, addr, attrs, &result); 8849 } else { 8850 data = address_space_ldl_le(as, addr, attrs, &result); 8851 } 8852 if (result == MEMTX_OK) { 8853 return data; 8854 } 8855 fi->type = ARMFault_SyncExternalOnWalk; 8856 fi->ea = arm_extabort_type(result); 8857 return 0; 8858 } 8859 8860 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 8861 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 8862 { 8863 ARMCPU *cpu = ARM_CPU(cs); 8864 CPUARMState *env = &cpu->env; 8865 MemTxAttrs attrs = {}; 8866 MemTxResult result = MEMTX_OK; 8867 AddressSpace *as; 8868 uint64_t data; 8869 8870 attrs.secure = is_secure; 8871 as = arm_addressspace(cs, attrs); 8872 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 8873 if (fi->s1ptw) { 8874 return 0; 8875 } 8876 if (regime_translation_big_endian(env, mmu_idx)) { 8877 data = address_space_ldq_be(as, addr, attrs, &result); 8878 } else { 8879 data = address_space_ldq_le(as, addr, attrs, &result); 8880 } 8881 if (result == MEMTX_OK) { 8882 return data; 8883 } 8884 fi->type = ARMFault_SyncExternalOnWalk; 8885 fi->ea = arm_extabort_type(result); 8886 return 0; 8887 } 8888 8889 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 8890 MMUAccessType access_type, ARMMMUIdx mmu_idx, 8891 hwaddr *phys_ptr, int *prot, 8892 target_ulong *page_size, 8893 ARMMMUFaultInfo *fi) 8894 { 8895 CPUState *cs = env_cpu(env); 8896 int level = 1; 8897 uint32_t table; 8898 uint32_t desc; 8899 int type; 8900 int ap; 8901 int domain = 0; 8902 int domain_prot; 8903 hwaddr phys_addr; 8904 uint32_t dacr; 8905 8906 /* Pagetable walk. */ 8907 /* Lookup l1 descriptor. */ 8908 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 8909 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 8910 fi->type = ARMFault_Translation; 8911 goto do_fault; 8912 } 8913 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8914 mmu_idx, fi); 8915 if (fi->type != ARMFault_None) { 8916 goto do_fault; 8917 } 8918 type = (desc & 3); 8919 domain = (desc >> 5) & 0x0f; 8920 if (regime_el(env, mmu_idx) == 1) { 8921 dacr = env->cp15.dacr_ns; 8922 } else { 8923 dacr = env->cp15.dacr_s; 8924 } 8925 domain_prot = (dacr >> (domain * 2)) & 3; 8926 if (type == 0) { 8927 /* Section translation fault. */ 8928 fi->type = ARMFault_Translation; 8929 goto do_fault; 8930 } 8931 if (type != 2) { 8932 level = 2; 8933 } 8934 if (domain_prot == 0 || domain_prot == 2) { 8935 fi->type = ARMFault_Domain; 8936 goto do_fault; 8937 } 8938 if (type == 2) { 8939 /* 1Mb section. */ 8940 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 8941 ap = (desc >> 10) & 3; 8942 *page_size = 1024 * 1024; 8943 } else { 8944 /* Lookup l2 entry. */ 8945 if (type == 1) { 8946 /* Coarse pagetable. */ 8947 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 8948 } else { 8949 /* Fine pagetable. */ 8950 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 8951 } 8952 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8953 mmu_idx, fi); 8954 if (fi->type != ARMFault_None) { 8955 goto do_fault; 8956 } 8957 switch (desc & 3) { 8958 case 0: /* Page translation fault. */ 8959 fi->type = ARMFault_Translation; 8960 goto do_fault; 8961 case 1: /* 64k page. */ 8962 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 8963 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 8964 *page_size = 0x10000; 8965 break; 8966 case 2: /* 4k page. */ 8967 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 8968 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 8969 *page_size = 0x1000; 8970 break; 8971 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 8972 if (type == 1) { 8973 /* ARMv6/XScale extended small page format */ 8974 if (arm_feature(env, ARM_FEATURE_XSCALE) 8975 || arm_feature(env, ARM_FEATURE_V6)) { 8976 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 8977 *page_size = 0x1000; 8978 } else { 8979 /* UNPREDICTABLE in ARMv5; we choose to take a 8980 * page translation fault. 8981 */ 8982 fi->type = ARMFault_Translation; 8983 goto do_fault; 8984 } 8985 } else { 8986 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 8987 *page_size = 0x400; 8988 } 8989 ap = (desc >> 4) & 3; 8990 break; 8991 default: 8992 /* Never happens, but compiler isn't smart enough to tell. */ 8993 abort(); 8994 } 8995 } 8996 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 8997 *prot |= *prot ? PAGE_EXEC : 0; 8998 if (!(*prot & (1 << access_type))) { 8999 /* Access permission fault. */ 9000 fi->type = ARMFault_Permission; 9001 goto do_fault; 9002 } 9003 *phys_ptr = phys_addr; 9004 return false; 9005 do_fault: 9006 fi->domain = domain; 9007 fi->level = level; 9008 return true; 9009 } 9010 9011 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 9012 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9013 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 9014 target_ulong *page_size, ARMMMUFaultInfo *fi) 9015 { 9016 CPUState *cs = env_cpu(env); 9017 int level = 1; 9018 uint32_t table; 9019 uint32_t desc; 9020 uint32_t xn; 9021 uint32_t pxn = 0; 9022 int type; 9023 int ap; 9024 int domain = 0; 9025 int domain_prot; 9026 hwaddr phys_addr; 9027 uint32_t dacr; 9028 bool ns; 9029 9030 /* Pagetable walk. */ 9031 /* Lookup l1 descriptor. */ 9032 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 9033 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 9034 fi->type = ARMFault_Translation; 9035 goto do_fault; 9036 } 9037 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9038 mmu_idx, fi); 9039 if (fi->type != ARMFault_None) { 9040 goto do_fault; 9041 } 9042 type = (desc & 3); 9043 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { 9044 /* Section translation fault, or attempt to use the encoding 9045 * which is Reserved on implementations without PXN. 9046 */ 9047 fi->type = ARMFault_Translation; 9048 goto do_fault; 9049 } 9050 if ((type == 1) || !(desc & (1 << 18))) { 9051 /* Page or Section. */ 9052 domain = (desc >> 5) & 0x0f; 9053 } 9054 if (regime_el(env, mmu_idx) == 1) { 9055 dacr = env->cp15.dacr_ns; 9056 } else { 9057 dacr = env->cp15.dacr_s; 9058 } 9059 if (type == 1) { 9060 level = 2; 9061 } 9062 domain_prot = (dacr >> (domain * 2)) & 3; 9063 if (domain_prot == 0 || domain_prot == 2) { 9064 /* Section or Page domain fault */ 9065 fi->type = ARMFault_Domain; 9066 goto do_fault; 9067 } 9068 if (type != 1) { 9069 if (desc & (1 << 18)) { 9070 /* Supersection. */ 9071 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 9072 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 9073 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 9074 *page_size = 0x1000000; 9075 } else { 9076 /* Section. */ 9077 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 9078 *page_size = 0x100000; 9079 } 9080 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 9081 xn = desc & (1 << 4); 9082 pxn = desc & 1; 9083 ns = extract32(desc, 19, 1); 9084 } else { 9085 if (arm_feature(env, ARM_FEATURE_PXN)) { 9086 pxn = (desc >> 2) & 1; 9087 } 9088 ns = extract32(desc, 3, 1); 9089 /* Lookup l2 entry. */ 9090 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 9091 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9092 mmu_idx, fi); 9093 if (fi->type != ARMFault_None) { 9094 goto do_fault; 9095 } 9096 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 9097 switch (desc & 3) { 9098 case 0: /* Page translation fault. */ 9099 fi->type = ARMFault_Translation; 9100 goto do_fault; 9101 case 1: /* 64k page. */ 9102 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 9103 xn = desc & (1 << 15); 9104 *page_size = 0x10000; 9105 break; 9106 case 2: case 3: /* 4k page. */ 9107 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 9108 xn = desc & 1; 9109 *page_size = 0x1000; 9110 break; 9111 default: 9112 /* Never happens, but compiler isn't smart enough to tell. */ 9113 abort(); 9114 } 9115 } 9116 if (domain_prot == 3) { 9117 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 9118 } else { 9119 if (pxn && !regime_is_user(env, mmu_idx)) { 9120 xn = 1; 9121 } 9122 if (xn && access_type == MMU_INST_FETCH) { 9123 fi->type = ARMFault_Permission; 9124 goto do_fault; 9125 } 9126 9127 if (arm_feature(env, ARM_FEATURE_V6K) && 9128 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 9129 /* The simplified model uses AP[0] as an access control bit. */ 9130 if ((ap & 1) == 0) { 9131 /* Access flag fault. */ 9132 fi->type = ARMFault_AccessFlag; 9133 goto do_fault; 9134 } 9135 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 9136 } else { 9137 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 9138 } 9139 if (*prot && !xn) { 9140 *prot |= PAGE_EXEC; 9141 } 9142 if (!(*prot & (1 << access_type))) { 9143 /* Access permission fault. */ 9144 fi->type = ARMFault_Permission; 9145 goto do_fault; 9146 } 9147 } 9148 if (ns) { 9149 /* The NS bit will (as required by the architecture) have no effect if 9150 * the CPU doesn't support TZ or this is a non-secure translation 9151 * regime, because the attribute will already be non-secure. 9152 */ 9153 attrs->secure = false; 9154 } 9155 *phys_ptr = phys_addr; 9156 return false; 9157 do_fault: 9158 fi->domain = domain; 9159 fi->level = level; 9160 return true; 9161 } 9162 9163 /* 9164 * check_s2_mmu_setup 9165 * @cpu: ARMCPU 9166 * @is_aa64: True if the translation regime is in AArch64 state 9167 * @startlevel: Suggested starting level 9168 * @inputsize: Bitsize of IPAs 9169 * @stride: Page-table stride (See the ARM ARM) 9170 * 9171 * Returns true if the suggested S2 translation parameters are OK and 9172 * false otherwise. 9173 */ 9174 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 9175 int inputsize, int stride) 9176 { 9177 const int grainsize = stride + 3; 9178 int startsizecheck; 9179 9180 /* Negative levels are never allowed. */ 9181 if (level < 0) { 9182 return false; 9183 } 9184 9185 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 9186 if (startsizecheck < 1 || startsizecheck > stride + 4) { 9187 return false; 9188 } 9189 9190 if (is_aa64) { 9191 CPUARMState *env = &cpu->env; 9192 unsigned int pamax = arm_pamax(cpu); 9193 9194 switch (stride) { 9195 case 13: /* 64KB Pages. */ 9196 if (level == 0 || (level == 1 && pamax <= 42)) { 9197 return false; 9198 } 9199 break; 9200 case 11: /* 16KB Pages. */ 9201 if (level == 0 || (level == 1 && pamax <= 40)) { 9202 return false; 9203 } 9204 break; 9205 case 9: /* 4KB Pages. */ 9206 if (level == 0 && pamax <= 42) { 9207 return false; 9208 } 9209 break; 9210 default: 9211 g_assert_not_reached(); 9212 } 9213 9214 /* Inputsize checks. */ 9215 if (inputsize > pamax && 9216 (arm_el_is_aa64(env, 1) || inputsize > 40)) { 9217 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 9218 return false; 9219 } 9220 } else { 9221 /* AArch32 only supports 4KB pages. Assert on that. */ 9222 assert(stride == 9); 9223 9224 if (level == 0) { 9225 return false; 9226 } 9227 } 9228 return true; 9229 } 9230 9231 /* Translate from the 4-bit stage 2 representation of 9232 * memory attributes (without cache-allocation hints) to 9233 * the 8-bit representation of the stage 1 MAIR registers 9234 * (which includes allocation hints). 9235 * 9236 * ref: shared/translation/attrs/S2AttrDecode() 9237 * .../S2ConvertAttrsHints() 9238 */ 9239 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 9240 { 9241 uint8_t hiattr = extract32(s2attrs, 2, 2); 9242 uint8_t loattr = extract32(s2attrs, 0, 2); 9243 uint8_t hihint = 0, lohint = 0; 9244 9245 if (hiattr != 0) { /* normal memory */ 9246 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ 9247 hiattr = loattr = 1; /* non-cacheable */ 9248 } else { 9249 if (hiattr != 1) { /* Write-through or write-back */ 9250 hihint = 3; /* RW allocate */ 9251 } 9252 if (loattr != 1) { /* Write-through or write-back */ 9253 lohint = 3; /* RW allocate */ 9254 } 9255 } 9256 } 9257 9258 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 9259 } 9260 #endif /* !CONFIG_USER_ONLY */ 9261 9262 ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va, 9263 ARMMMUIdx mmu_idx) 9264 { 9265 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 9266 uint32_t el = regime_el(env, mmu_idx); 9267 bool tbi, tbid, epd, hpd, using16k, using64k; 9268 int select, tsz; 9269 9270 /* 9271 * Bit 55 is always between the two regions, and is canonical for 9272 * determining if address tagging is enabled. 9273 */ 9274 select = extract64(va, 55, 1); 9275 9276 if (el > 1) { 9277 tsz = extract32(tcr, 0, 6); 9278 using64k = extract32(tcr, 14, 1); 9279 using16k = extract32(tcr, 15, 1); 9280 if (mmu_idx == ARMMMUIdx_S2NS) { 9281 /* VTCR_EL2 */ 9282 tbi = tbid = hpd = false; 9283 } else { 9284 tbi = extract32(tcr, 20, 1); 9285 hpd = extract32(tcr, 24, 1); 9286 tbid = extract32(tcr, 29, 1); 9287 } 9288 epd = false; 9289 } else if (!select) { 9290 tsz = extract32(tcr, 0, 6); 9291 epd = extract32(tcr, 7, 1); 9292 using64k = extract32(tcr, 14, 1); 9293 using16k = extract32(tcr, 15, 1); 9294 tbi = extract64(tcr, 37, 1); 9295 hpd = extract64(tcr, 41, 1); 9296 tbid = extract64(tcr, 51, 1); 9297 } else { 9298 int tg = extract32(tcr, 30, 2); 9299 using16k = tg == 1; 9300 using64k = tg == 3; 9301 tsz = extract32(tcr, 16, 6); 9302 epd = extract32(tcr, 23, 1); 9303 tbi = extract64(tcr, 38, 1); 9304 hpd = extract64(tcr, 42, 1); 9305 tbid = extract64(tcr, 52, 1); 9306 } 9307 tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */ 9308 tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */ 9309 9310 return (ARMVAParameters) { 9311 .tsz = tsz, 9312 .select = select, 9313 .tbi = tbi, 9314 .tbid = tbid, 9315 .epd = epd, 9316 .hpd = hpd, 9317 .using16k = using16k, 9318 .using64k = using64k, 9319 }; 9320 } 9321 9322 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 9323 ARMMMUIdx mmu_idx, bool data) 9324 { 9325 ARMVAParameters ret = aa64_va_parameters_both(env, va, mmu_idx); 9326 9327 /* Present TBI as a composite with TBID. */ 9328 ret.tbi &= (data || !ret.tbid); 9329 return ret; 9330 } 9331 9332 #ifndef CONFIG_USER_ONLY 9333 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, 9334 ARMMMUIdx mmu_idx) 9335 { 9336 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 9337 uint32_t el = regime_el(env, mmu_idx); 9338 int select, tsz; 9339 bool epd, hpd; 9340 9341 if (mmu_idx == ARMMMUIdx_S2NS) { 9342 /* VTCR */ 9343 bool sext = extract32(tcr, 4, 1); 9344 bool sign = extract32(tcr, 3, 1); 9345 9346 /* 9347 * If the sign-extend bit is not the same as t0sz[3], the result 9348 * is unpredictable. Flag this as a guest error. 9349 */ 9350 if (sign != sext) { 9351 qemu_log_mask(LOG_GUEST_ERROR, 9352 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 9353 } 9354 tsz = sextract32(tcr, 0, 4) + 8; 9355 select = 0; 9356 hpd = false; 9357 epd = false; 9358 } else if (el == 2) { 9359 /* HTCR */ 9360 tsz = extract32(tcr, 0, 3); 9361 select = 0; 9362 hpd = extract64(tcr, 24, 1); 9363 epd = false; 9364 } else { 9365 int t0sz = extract32(tcr, 0, 3); 9366 int t1sz = extract32(tcr, 16, 3); 9367 9368 if (t1sz == 0) { 9369 select = va > (0xffffffffu >> t0sz); 9370 } else { 9371 /* Note that we will detect errors later. */ 9372 select = va >= ~(0xffffffffu >> t1sz); 9373 } 9374 if (!select) { 9375 tsz = t0sz; 9376 epd = extract32(tcr, 7, 1); 9377 hpd = extract64(tcr, 41, 1); 9378 } else { 9379 tsz = t1sz; 9380 epd = extract32(tcr, 23, 1); 9381 hpd = extract64(tcr, 42, 1); 9382 } 9383 /* For aarch32, hpd0 is not enabled without t2e as well. */ 9384 hpd &= extract32(tcr, 6, 1); 9385 } 9386 9387 return (ARMVAParameters) { 9388 .tsz = tsz, 9389 .select = select, 9390 .epd = epd, 9391 .hpd = hpd, 9392 }; 9393 } 9394 9395 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 9396 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9397 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 9398 target_ulong *page_size_ptr, 9399 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 9400 { 9401 ARMCPU *cpu = env_archcpu(env); 9402 CPUState *cs = CPU(cpu); 9403 /* Read an LPAE long-descriptor translation table. */ 9404 ARMFaultType fault_type = ARMFault_Translation; 9405 uint32_t level; 9406 ARMVAParameters param; 9407 uint64_t ttbr; 9408 hwaddr descaddr, indexmask, indexmask_grainsize; 9409 uint32_t tableattrs; 9410 target_ulong page_size; 9411 uint32_t attrs; 9412 int32_t stride; 9413 int addrsize, inputsize; 9414 TCR *tcr = regime_tcr(env, mmu_idx); 9415 int ap, ns, xn, pxn; 9416 uint32_t el = regime_el(env, mmu_idx); 9417 bool ttbr1_valid; 9418 uint64_t descaddrmask; 9419 bool aarch64 = arm_el_is_aa64(env, el); 9420 bool guarded = false; 9421 9422 /* TODO: 9423 * This code does not handle the different format TCR for VTCR_EL2. 9424 * This code also does not support shareability levels. 9425 * Attribute and permission bit handling should also be checked when adding 9426 * support for those page table walks. 9427 */ 9428 if (aarch64) { 9429 param = aa64_va_parameters(env, address, mmu_idx, 9430 access_type != MMU_INST_FETCH); 9431 level = 0; 9432 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it 9433 * invalid. 9434 */ 9435 ttbr1_valid = (el < 2); 9436 addrsize = 64 - 8 * param.tbi; 9437 inputsize = 64 - param.tsz; 9438 } else { 9439 param = aa32_va_parameters(env, address, mmu_idx); 9440 level = 1; 9441 /* There is no TTBR1 for EL2 */ 9442 ttbr1_valid = (el != 2); 9443 addrsize = (mmu_idx == ARMMMUIdx_S2NS ? 40 : 32); 9444 inputsize = addrsize - param.tsz; 9445 } 9446 9447 /* 9448 * We determined the region when collecting the parameters, but we 9449 * have not yet validated that the address is valid for the region. 9450 * Extract the top bits and verify that they all match select. 9451 * 9452 * For aa32, if inputsize == addrsize, then we have selected the 9453 * region by exclusion in aa32_va_parameters and there is no more 9454 * validation to do here. 9455 */ 9456 if (inputsize < addrsize) { 9457 target_ulong top_bits = sextract64(address, inputsize, 9458 addrsize - inputsize); 9459 if (-top_bits != param.select || (param.select && !ttbr1_valid)) { 9460 /* The gap between the two regions is a Translation fault */ 9461 fault_type = ARMFault_Translation; 9462 goto do_fault; 9463 } 9464 } 9465 9466 if (param.using64k) { 9467 stride = 13; 9468 } else if (param.using16k) { 9469 stride = 11; 9470 } else { 9471 stride = 9; 9472 } 9473 9474 /* Note that QEMU ignores shareability and cacheability attributes, 9475 * so we don't need to do anything with the SH, ORGN, IRGN fields 9476 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 9477 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 9478 * implement any ASID-like capability so we can ignore it (instead 9479 * we will always flush the TLB any time the ASID is changed). 9480 */ 9481 ttbr = regime_ttbr(env, mmu_idx, param.select); 9482 9483 /* Here we should have set up all the parameters for the translation: 9484 * inputsize, ttbr, epd, stride, tbi 9485 */ 9486 9487 if (param.epd) { 9488 /* Translation table walk disabled => Translation fault on TLB miss 9489 * Note: This is always 0 on 64-bit EL2 and EL3. 9490 */ 9491 goto do_fault; 9492 } 9493 9494 if (mmu_idx != ARMMMUIdx_S2NS) { 9495 /* The starting level depends on the virtual address size (which can 9496 * be up to 48 bits) and the translation granule size. It indicates 9497 * the number of strides (stride bits at a time) needed to 9498 * consume the bits of the input address. In the pseudocode this is: 9499 * level = 4 - RoundUp((inputsize - grainsize) / stride) 9500 * where their 'inputsize' is our 'inputsize', 'grainsize' is 9501 * our 'stride + 3' and 'stride' is our 'stride'. 9502 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 9503 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 9504 * = 4 - (inputsize - 4) / stride; 9505 */ 9506 level = 4 - (inputsize - 4) / stride; 9507 } else { 9508 /* For stage 2 translations the starting level is specified by the 9509 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 9510 */ 9511 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 9512 uint32_t startlevel; 9513 bool ok; 9514 9515 if (!aarch64 || stride == 9) { 9516 /* AArch32 or 4KB pages */ 9517 startlevel = 2 - sl0; 9518 } else { 9519 /* 16KB or 64KB pages */ 9520 startlevel = 3 - sl0; 9521 } 9522 9523 /* Check that the starting level is valid. */ 9524 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 9525 inputsize, stride); 9526 if (!ok) { 9527 fault_type = ARMFault_Translation; 9528 goto do_fault; 9529 } 9530 level = startlevel; 9531 } 9532 9533 indexmask_grainsize = (1ULL << (stride + 3)) - 1; 9534 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; 9535 9536 /* Now we can extract the actual base address from the TTBR */ 9537 descaddr = extract64(ttbr, 0, 48); 9538 descaddr &= ~indexmask; 9539 9540 /* The address field in the descriptor goes up to bit 39 for ARMv7 9541 * but up to bit 47 for ARMv8, but we use the descaddrmask 9542 * up to bit 39 for AArch32, because we don't need other bits in that case 9543 * to construct next descriptor address (anyway they should be all zeroes). 9544 */ 9545 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & 9546 ~indexmask_grainsize; 9547 9548 /* Secure accesses start with the page table in secure memory and 9549 * can be downgraded to non-secure at any step. Non-secure accesses 9550 * remain non-secure. We implement this by just ORing in the NSTable/NS 9551 * bits at each step. 9552 */ 9553 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 9554 for (;;) { 9555 uint64_t descriptor; 9556 bool nstable; 9557 9558 descaddr |= (address >> (stride * (4 - level))) & indexmask; 9559 descaddr &= ~7ULL; 9560 nstable = extract32(tableattrs, 4, 1); 9561 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 9562 if (fi->type != ARMFault_None) { 9563 goto do_fault; 9564 } 9565 9566 if (!(descriptor & 1) || 9567 (!(descriptor & 2) && (level == 3))) { 9568 /* Invalid, or the Reserved level 3 encoding */ 9569 goto do_fault; 9570 } 9571 descaddr = descriptor & descaddrmask; 9572 9573 if ((descriptor & 2) && (level < 3)) { 9574 /* Table entry. The top five bits are attributes which may 9575 * propagate down through lower levels of the table (and 9576 * which are all arranged so that 0 means "no effect", so 9577 * we can gather them up by ORing in the bits at each level). 9578 */ 9579 tableattrs |= extract64(descriptor, 59, 5); 9580 level++; 9581 indexmask = indexmask_grainsize; 9582 continue; 9583 } 9584 /* Block entry at level 1 or 2, or page entry at level 3. 9585 * These are basically the same thing, although the number 9586 * of bits we pull in from the vaddr varies. 9587 */ 9588 page_size = (1ULL << ((stride * (4 - level)) + 3)); 9589 descaddr |= (address & (page_size - 1)); 9590 /* Extract attributes from the descriptor */ 9591 attrs = extract64(descriptor, 2, 10) 9592 | (extract64(descriptor, 52, 12) << 10); 9593 9594 if (mmu_idx == ARMMMUIdx_S2NS) { 9595 /* Stage 2 table descriptors do not include any attribute fields */ 9596 break; 9597 } 9598 /* Merge in attributes from table descriptors */ 9599 attrs |= nstable << 3; /* NS */ 9600 guarded = extract64(descriptor, 50, 1); /* GP */ 9601 if (param.hpd) { 9602 /* HPD disables all the table attributes except NSTable. */ 9603 break; 9604 } 9605 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 9606 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 9607 * means "force PL1 access only", which means forcing AP[1] to 0. 9608 */ 9609 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */ 9610 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */ 9611 break; 9612 } 9613 /* Here descaddr is the final physical address, and attributes 9614 * are all in attrs. 9615 */ 9616 fault_type = ARMFault_AccessFlag; 9617 if ((attrs & (1 << 8)) == 0) { 9618 /* Access flag */ 9619 goto do_fault; 9620 } 9621 9622 ap = extract32(attrs, 4, 2); 9623 xn = extract32(attrs, 12, 1); 9624 9625 if (mmu_idx == ARMMMUIdx_S2NS) { 9626 ns = true; 9627 *prot = get_S2prot(env, ap, xn); 9628 } else { 9629 ns = extract32(attrs, 3, 1); 9630 pxn = extract32(attrs, 11, 1); 9631 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 9632 } 9633 9634 fault_type = ARMFault_Permission; 9635 if (!(*prot & (1 << access_type))) { 9636 goto do_fault; 9637 } 9638 9639 if (ns) { 9640 /* The NS bit will (as required by the architecture) have no effect if 9641 * the CPU doesn't support TZ or this is a non-secure translation 9642 * regime, because the attribute will already be non-secure. 9643 */ 9644 txattrs->secure = false; 9645 } 9646 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ 9647 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { 9648 txattrs->target_tlb_bit0 = true; 9649 } 9650 9651 if (cacheattrs != NULL) { 9652 if (mmu_idx == ARMMMUIdx_S2NS) { 9653 cacheattrs->attrs = convert_stage2_attrs(env, 9654 extract32(attrs, 0, 4)); 9655 } else { 9656 /* Index into MAIR registers for cache attributes */ 9657 uint8_t attrindx = extract32(attrs, 0, 3); 9658 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 9659 assert(attrindx <= 7); 9660 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 9661 } 9662 cacheattrs->shareability = extract32(attrs, 6, 2); 9663 } 9664 9665 *phys_ptr = descaddr; 9666 *page_size_ptr = page_size; 9667 return false; 9668 9669 do_fault: 9670 fi->type = fault_type; 9671 fi->level = level; 9672 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 9673 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS); 9674 return true; 9675 } 9676 9677 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 9678 ARMMMUIdx mmu_idx, 9679 int32_t address, int *prot) 9680 { 9681 if (!arm_feature(env, ARM_FEATURE_M)) { 9682 *prot = PAGE_READ | PAGE_WRITE; 9683 switch (address) { 9684 case 0xF0000000 ... 0xFFFFFFFF: 9685 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 9686 /* hivecs execing is ok */ 9687 *prot |= PAGE_EXEC; 9688 } 9689 break; 9690 case 0x00000000 ... 0x7FFFFFFF: 9691 *prot |= PAGE_EXEC; 9692 break; 9693 } 9694 } else { 9695 /* Default system address map for M profile cores. 9696 * The architecture specifies which regions are execute-never; 9697 * at the MPU level no other checks are defined. 9698 */ 9699 switch (address) { 9700 case 0x00000000 ... 0x1fffffff: /* ROM */ 9701 case 0x20000000 ... 0x3fffffff: /* SRAM */ 9702 case 0x60000000 ... 0x7fffffff: /* RAM */ 9703 case 0x80000000 ... 0x9fffffff: /* RAM */ 9704 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 9705 break; 9706 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 9707 case 0xa0000000 ... 0xbfffffff: /* Device */ 9708 case 0xc0000000 ... 0xdfffffff: /* Device */ 9709 case 0xe0000000 ... 0xffffffff: /* System */ 9710 *prot = PAGE_READ | PAGE_WRITE; 9711 break; 9712 default: 9713 g_assert_not_reached(); 9714 } 9715 } 9716 } 9717 9718 static bool pmsav7_use_background_region(ARMCPU *cpu, 9719 ARMMMUIdx mmu_idx, bool is_user) 9720 { 9721 /* Return true if we should use the default memory map as a 9722 * "background" region if there are no hits against any MPU regions. 9723 */ 9724 CPUARMState *env = &cpu->env; 9725 9726 if (is_user) { 9727 return false; 9728 } 9729 9730 if (arm_feature(env, ARM_FEATURE_M)) { 9731 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 9732 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 9733 } else { 9734 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 9735 } 9736 } 9737 9738 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 9739 { 9740 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 9741 return arm_feature(env, ARM_FEATURE_M) && 9742 extract32(address, 20, 12) == 0xe00; 9743 } 9744 9745 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 9746 { 9747 /* True if address is in the M profile system region 9748 * 0xe0000000 - 0xffffffff 9749 */ 9750 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 9751 } 9752 9753 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 9754 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9755 hwaddr *phys_ptr, int *prot, 9756 target_ulong *page_size, 9757 ARMMMUFaultInfo *fi) 9758 { 9759 ARMCPU *cpu = env_archcpu(env); 9760 int n; 9761 bool is_user = regime_is_user(env, mmu_idx); 9762 9763 *phys_ptr = address; 9764 *page_size = TARGET_PAGE_SIZE; 9765 *prot = 0; 9766 9767 if (regime_translation_disabled(env, mmu_idx) || 9768 m_is_ppb_region(env, address)) { 9769 /* MPU disabled or M profile PPB access: use default memory map. 9770 * The other case which uses the default memory map in the 9771 * v7M ARM ARM pseudocode is exception vector reads from the vector 9772 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 9773 * which always does a direct read using address_space_ldl(), rather 9774 * than going via this function, so we don't need to check that here. 9775 */ 9776 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9777 } else { /* MPU enabled */ 9778 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 9779 /* region search */ 9780 uint32_t base = env->pmsav7.drbar[n]; 9781 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 9782 uint32_t rmask; 9783 bool srdis = false; 9784 9785 if (!(env->pmsav7.drsr[n] & 0x1)) { 9786 continue; 9787 } 9788 9789 if (!rsize) { 9790 qemu_log_mask(LOG_GUEST_ERROR, 9791 "DRSR[%d]: Rsize field cannot be 0\n", n); 9792 continue; 9793 } 9794 rsize++; 9795 rmask = (1ull << rsize) - 1; 9796 9797 if (base & rmask) { 9798 qemu_log_mask(LOG_GUEST_ERROR, 9799 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 9800 "to DRSR region size, mask = 0x%" PRIx32 "\n", 9801 n, base, rmask); 9802 continue; 9803 } 9804 9805 if (address < base || address > base + rmask) { 9806 /* 9807 * Address not in this region. We must check whether the 9808 * region covers addresses in the same page as our address. 9809 * In that case we must not report a size that covers the 9810 * whole page for a subsequent hit against a different MPU 9811 * region or the background region, because it would result in 9812 * incorrect TLB hits for subsequent accesses to addresses that 9813 * are in this MPU region. 9814 */ 9815 if (ranges_overlap(base, rmask, 9816 address & TARGET_PAGE_MASK, 9817 TARGET_PAGE_SIZE)) { 9818 *page_size = 1; 9819 } 9820 continue; 9821 } 9822 9823 /* Region matched */ 9824 9825 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 9826 int i, snd; 9827 uint32_t srdis_mask; 9828 9829 rsize -= 3; /* sub region size (power of 2) */ 9830 snd = ((address - base) >> rsize) & 0x7; 9831 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 9832 9833 srdis_mask = srdis ? 0x3 : 0x0; 9834 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 9835 /* This will check in groups of 2, 4 and then 8, whether 9836 * the subregion bits are consistent. rsize is incremented 9837 * back up to give the region size, considering consistent 9838 * adjacent subregions as one region. Stop testing if rsize 9839 * is already big enough for an entire QEMU page. 9840 */ 9841 int snd_rounded = snd & ~(i - 1); 9842 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 9843 snd_rounded + 8, i); 9844 if (srdis_mask ^ srdis_multi) { 9845 break; 9846 } 9847 srdis_mask = (srdis_mask << i) | srdis_mask; 9848 rsize++; 9849 } 9850 } 9851 if (srdis) { 9852 continue; 9853 } 9854 if (rsize < TARGET_PAGE_BITS) { 9855 *page_size = 1 << rsize; 9856 } 9857 break; 9858 } 9859 9860 if (n == -1) { /* no hits */ 9861 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 9862 /* background fault */ 9863 fi->type = ARMFault_Background; 9864 return true; 9865 } 9866 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9867 } else { /* a MPU hit! */ 9868 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 9869 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 9870 9871 if (m_is_system_region(env, address)) { 9872 /* System space is always execute never */ 9873 xn = 1; 9874 } 9875 9876 if (is_user) { /* User mode AP bit decoding */ 9877 switch (ap) { 9878 case 0: 9879 case 1: 9880 case 5: 9881 break; /* no access */ 9882 case 3: 9883 *prot |= PAGE_WRITE; 9884 /* fall through */ 9885 case 2: 9886 case 6: 9887 *prot |= PAGE_READ | PAGE_EXEC; 9888 break; 9889 case 7: 9890 /* for v7M, same as 6; for R profile a reserved value */ 9891 if (arm_feature(env, ARM_FEATURE_M)) { 9892 *prot |= PAGE_READ | PAGE_EXEC; 9893 break; 9894 } 9895 /* fall through */ 9896 default: 9897 qemu_log_mask(LOG_GUEST_ERROR, 9898 "DRACR[%d]: Bad value for AP bits: 0x%" 9899 PRIx32 "\n", n, ap); 9900 } 9901 } else { /* Priv. mode AP bits decoding */ 9902 switch (ap) { 9903 case 0: 9904 break; /* no access */ 9905 case 1: 9906 case 2: 9907 case 3: 9908 *prot |= PAGE_WRITE; 9909 /* fall through */ 9910 case 5: 9911 case 6: 9912 *prot |= PAGE_READ | PAGE_EXEC; 9913 break; 9914 case 7: 9915 /* for v7M, same as 6; for R profile a reserved value */ 9916 if (arm_feature(env, ARM_FEATURE_M)) { 9917 *prot |= PAGE_READ | PAGE_EXEC; 9918 break; 9919 } 9920 /* fall through */ 9921 default: 9922 qemu_log_mask(LOG_GUEST_ERROR, 9923 "DRACR[%d]: Bad value for AP bits: 0x%" 9924 PRIx32 "\n", n, ap); 9925 } 9926 } 9927 9928 /* execute never */ 9929 if (xn) { 9930 *prot &= ~PAGE_EXEC; 9931 } 9932 } 9933 } 9934 9935 fi->type = ARMFault_Permission; 9936 fi->level = 1; 9937 return !(*prot & (1 << access_type)); 9938 } 9939 9940 static bool v8m_is_sau_exempt(CPUARMState *env, 9941 uint32_t address, MMUAccessType access_type) 9942 { 9943 /* The architecture specifies that certain address ranges are 9944 * exempt from v8M SAU/IDAU checks. 9945 */ 9946 return 9947 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 9948 (address >= 0xe0000000 && address <= 0xe0002fff) || 9949 (address >= 0xe000e000 && address <= 0xe000efff) || 9950 (address >= 0xe002e000 && address <= 0xe002efff) || 9951 (address >= 0xe0040000 && address <= 0xe0041fff) || 9952 (address >= 0xe00ff000 && address <= 0xe00fffff); 9953 } 9954 9955 void v8m_security_lookup(CPUARMState *env, uint32_t address, 9956 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9957 V8M_SAttributes *sattrs) 9958 { 9959 /* Look up the security attributes for this address. Compare the 9960 * pseudocode SecurityCheck() function. 9961 * We assume the caller has zero-initialized *sattrs. 9962 */ 9963 ARMCPU *cpu = env_archcpu(env); 9964 int r; 9965 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 9966 int idau_region = IREGION_NOTVALID; 9967 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 9968 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 9969 9970 if (cpu->idau) { 9971 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 9972 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 9973 9974 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 9975 &idau_nsc); 9976 } 9977 9978 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 9979 /* 0xf0000000..0xffffffff is always S for insn fetches */ 9980 return; 9981 } 9982 9983 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 9984 sattrs->ns = !regime_is_secure(env, mmu_idx); 9985 return; 9986 } 9987 9988 if (idau_region != IREGION_NOTVALID) { 9989 sattrs->irvalid = true; 9990 sattrs->iregion = idau_region; 9991 } 9992 9993 switch (env->sau.ctrl & 3) { 9994 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 9995 break; 9996 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 9997 sattrs->ns = true; 9998 break; 9999 default: /* SAU.ENABLE == 1 */ 10000 for (r = 0; r < cpu->sau_sregion; r++) { 10001 if (env->sau.rlar[r] & 1) { 10002 uint32_t base = env->sau.rbar[r] & ~0x1f; 10003 uint32_t limit = env->sau.rlar[r] | 0x1f; 10004 10005 if (base <= address && limit >= address) { 10006 if (base > addr_page_base || limit < addr_page_limit) { 10007 sattrs->subpage = true; 10008 } 10009 if (sattrs->srvalid) { 10010 /* If we hit in more than one region then we must report 10011 * as Secure, not NS-Callable, with no valid region 10012 * number info. 10013 */ 10014 sattrs->ns = false; 10015 sattrs->nsc = false; 10016 sattrs->sregion = 0; 10017 sattrs->srvalid = false; 10018 break; 10019 } else { 10020 if (env->sau.rlar[r] & 2) { 10021 sattrs->nsc = true; 10022 } else { 10023 sattrs->ns = true; 10024 } 10025 sattrs->srvalid = true; 10026 sattrs->sregion = r; 10027 } 10028 } else { 10029 /* 10030 * Address not in this region. We must check whether the 10031 * region covers addresses in the same page as our address. 10032 * In that case we must not report a size that covers the 10033 * whole page for a subsequent hit against a different MPU 10034 * region or the background region, because it would result 10035 * in incorrect TLB hits for subsequent accesses to 10036 * addresses that are in this MPU region. 10037 */ 10038 if (limit >= base && 10039 ranges_overlap(base, limit - base + 1, 10040 addr_page_base, 10041 TARGET_PAGE_SIZE)) { 10042 sattrs->subpage = true; 10043 } 10044 } 10045 } 10046 } 10047 break; 10048 } 10049 10050 /* 10051 * The IDAU will override the SAU lookup results if it specifies 10052 * higher security than the SAU does. 10053 */ 10054 if (!idau_ns) { 10055 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 10056 sattrs->ns = false; 10057 sattrs->nsc = idau_nsc; 10058 } 10059 } 10060 } 10061 10062 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 10063 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10064 hwaddr *phys_ptr, MemTxAttrs *txattrs, 10065 int *prot, bool *is_subpage, 10066 ARMMMUFaultInfo *fi, uint32_t *mregion) 10067 { 10068 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 10069 * that a full phys-to-virt translation does). 10070 * mregion is (if not NULL) set to the region number which matched, 10071 * or -1 if no region number is returned (MPU off, address did not 10072 * hit a region, address hit in multiple regions). 10073 * We set is_subpage to true if the region hit doesn't cover the 10074 * entire TARGET_PAGE the address is within. 10075 */ 10076 ARMCPU *cpu = env_archcpu(env); 10077 bool is_user = regime_is_user(env, mmu_idx); 10078 uint32_t secure = regime_is_secure(env, mmu_idx); 10079 int n; 10080 int matchregion = -1; 10081 bool hit = false; 10082 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 10083 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 10084 10085 *is_subpage = false; 10086 *phys_ptr = address; 10087 *prot = 0; 10088 if (mregion) { 10089 *mregion = -1; 10090 } 10091 10092 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 10093 * was an exception vector read from the vector table (which is always 10094 * done using the default system address map), because those accesses 10095 * are done in arm_v7m_load_vector(), which always does a direct 10096 * read using address_space_ldl(), rather than going via this function. 10097 */ 10098 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 10099 hit = true; 10100 } else if (m_is_ppb_region(env, address)) { 10101 hit = true; 10102 } else { 10103 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 10104 hit = true; 10105 } 10106 10107 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 10108 /* region search */ 10109 /* Note that the base address is bits [31:5] from the register 10110 * with bits [4:0] all zeroes, but the limit address is bits 10111 * [31:5] from the register with bits [4:0] all ones. 10112 */ 10113 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 10114 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 10115 10116 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 10117 /* Region disabled */ 10118 continue; 10119 } 10120 10121 if (address < base || address > limit) { 10122 /* 10123 * Address not in this region. We must check whether the 10124 * region covers addresses in the same page as our address. 10125 * In that case we must not report a size that covers the 10126 * whole page for a subsequent hit against a different MPU 10127 * region or the background region, because it would result in 10128 * incorrect TLB hits for subsequent accesses to addresses that 10129 * are in this MPU region. 10130 */ 10131 if (limit >= base && 10132 ranges_overlap(base, limit - base + 1, 10133 addr_page_base, 10134 TARGET_PAGE_SIZE)) { 10135 *is_subpage = true; 10136 } 10137 continue; 10138 } 10139 10140 if (base > addr_page_base || limit < addr_page_limit) { 10141 *is_subpage = true; 10142 } 10143 10144 if (matchregion != -1) { 10145 /* Multiple regions match -- always a failure (unlike 10146 * PMSAv7 where highest-numbered-region wins) 10147 */ 10148 fi->type = ARMFault_Permission; 10149 fi->level = 1; 10150 return true; 10151 } 10152 10153 matchregion = n; 10154 hit = true; 10155 } 10156 } 10157 10158 if (!hit) { 10159 /* background fault */ 10160 fi->type = ARMFault_Background; 10161 return true; 10162 } 10163 10164 if (matchregion == -1) { 10165 /* hit using the background region */ 10166 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 10167 } else { 10168 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 10169 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 10170 10171 if (m_is_system_region(env, address)) { 10172 /* System space is always execute never */ 10173 xn = 1; 10174 } 10175 10176 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 10177 if (*prot && !xn) { 10178 *prot |= PAGE_EXEC; 10179 } 10180 /* We don't need to look the attribute up in the MAIR0/MAIR1 10181 * registers because that only tells us about cacheability. 10182 */ 10183 if (mregion) { 10184 *mregion = matchregion; 10185 } 10186 } 10187 10188 fi->type = ARMFault_Permission; 10189 fi->level = 1; 10190 return !(*prot & (1 << access_type)); 10191 } 10192 10193 10194 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 10195 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10196 hwaddr *phys_ptr, MemTxAttrs *txattrs, 10197 int *prot, target_ulong *page_size, 10198 ARMMMUFaultInfo *fi) 10199 { 10200 uint32_t secure = regime_is_secure(env, mmu_idx); 10201 V8M_SAttributes sattrs = {}; 10202 bool ret; 10203 bool mpu_is_subpage; 10204 10205 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10206 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 10207 if (access_type == MMU_INST_FETCH) { 10208 /* Instruction fetches always use the MMU bank and the 10209 * transaction attribute determined by the fetch address, 10210 * regardless of CPU state. This is painful for QEMU 10211 * to handle, because it would mean we need to encode 10212 * into the mmu_idx not just the (user, negpri) information 10213 * for the current security state but also that for the 10214 * other security state, which would balloon the number 10215 * of mmu_idx values needed alarmingly. 10216 * Fortunately we can avoid this because it's not actually 10217 * possible to arbitrarily execute code from memory with 10218 * the wrong security attribute: it will always generate 10219 * an exception of some kind or another, apart from the 10220 * special case of an NS CPU executing an SG instruction 10221 * in S&NSC memory. So we always just fail the translation 10222 * here and sort things out in the exception handler 10223 * (including possibly emulating an SG instruction). 10224 */ 10225 if (sattrs.ns != !secure) { 10226 if (sattrs.nsc) { 10227 fi->type = ARMFault_QEMU_NSCExec; 10228 } else { 10229 fi->type = ARMFault_QEMU_SFault; 10230 } 10231 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 10232 *phys_ptr = address; 10233 *prot = 0; 10234 return true; 10235 } 10236 } else { 10237 /* For data accesses we always use the MMU bank indicated 10238 * by the current CPU state, but the security attributes 10239 * might downgrade a secure access to nonsecure. 10240 */ 10241 if (sattrs.ns) { 10242 txattrs->secure = false; 10243 } else if (!secure) { 10244 /* NS access to S memory must fault. 10245 * Architecturally we should first check whether the 10246 * MPU information for this address indicates that we 10247 * are doing an unaligned access to Device memory, which 10248 * should generate a UsageFault instead. QEMU does not 10249 * currently check for that kind of unaligned access though. 10250 * If we added it we would need to do so as a special case 10251 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 10252 */ 10253 fi->type = ARMFault_QEMU_SFault; 10254 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 10255 *phys_ptr = address; 10256 *prot = 0; 10257 return true; 10258 } 10259 } 10260 } 10261 10262 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 10263 txattrs, prot, &mpu_is_subpage, fi, NULL); 10264 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; 10265 return ret; 10266 } 10267 10268 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 10269 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10270 hwaddr *phys_ptr, int *prot, 10271 ARMMMUFaultInfo *fi) 10272 { 10273 int n; 10274 uint32_t mask; 10275 uint32_t base; 10276 bool is_user = regime_is_user(env, mmu_idx); 10277 10278 if (regime_translation_disabled(env, mmu_idx)) { 10279 /* MPU disabled. */ 10280 *phys_ptr = address; 10281 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10282 return false; 10283 } 10284 10285 *phys_ptr = address; 10286 for (n = 7; n >= 0; n--) { 10287 base = env->cp15.c6_region[n]; 10288 if ((base & 1) == 0) { 10289 continue; 10290 } 10291 mask = 1 << ((base >> 1) & 0x1f); 10292 /* Keep this shift separate from the above to avoid an 10293 (undefined) << 32. */ 10294 mask = (mask << 1) - 1; 10295 if (((base ^ address) & ~mask) == 0) { 10296 break; 10297 } 10298 } 10299 if (n < 0) { 10300 fi->type = ARMFault_Background; 10301 return true; 10302 } 10303 10304 if (access_type == MMU_INST_FETCH) { 10305 mask = env->cp15.pmsav5_insn_ap; 10306 } else { 10307 mask = env->cp15.pmsav5_data_ap; 10308 } 10309 mask = (mask >> (n * 4)) & 0xf; 10310 switch (mask) { 10311 case 0: 10312 fi->type = ARMFault_Permission; 10313 fi->level = 1; 10314 return true; 10315 case 1: 10316 if (is_user) { 10317 fi->type = ARMFault_Permission; 10318 fi->level = 1; 10319 return true; 10320 } 10321 *prot = PAGE_READ | PAGE_WRITE; 10322 break; 10323 case 2: 10324 *prot = PAGE_READ; 10325 if (!is_user) { 10326 *prot |= PAGE_WRITE; 10327 } 10328 break; 10329 case 3: 10330 *prot = PAGE_READ | PAGE_WRITE; 10331 break; 10332 case 5: 10333 if (is_user) { 10334 fi->type = ARMFault_Permission; 10335 fi->level = 1; 10336 return true; 10337 } 10338 *prot = PAGE_READ; 10339 break; 10340 case 6: 10341 *prot = PAGE_READ; 10342 break; 10343 default: 10344 /* Bad permission. */ 10345 fi->type = ARMFault_Permission; 10346 fi->level = 1; 10347 return true; 10348 } 10349 *prot |= PAGE_EXEC; 10350 return false; 10351 } 10352 10353 /* Combine either inner or outer cacheability attributes for normal 10354 * memory, according to table D4-42 and pseudocode procedure 10355 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 10356 * 10357 * NB: only stage 1 includes allocation hints (RW bits), leading to 10358 * some asymmetry. 10359 */ 10360 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 10361 { 10362 if (s1 == 4 || s2 == 4) { 10363 /* non-cacheable has precedence */ 10364 return 4; 10365 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 10366 /* stage 1 write-through takes precedence */ 10367 return s1; 10368 } else if (extract32(s2, 2, 2) == 2) { 10369 /* stage 2 write-through takes precedence, but the allocation hint 10370 * is still taken from stage 1 10371 */ 10372 return (2 << 2) | extract32(s1, 0, 2); 10373 } else { /* write-back */ 10374 return s1; 10375 } 10376 } 10377 10378 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 10379 * and CombineS1S2Desc() 10380 * 10381 * @s1: Attributes from stage 1 walk 10382 * @s2: Attributes from stage 2 walk 10383 */ 10384 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 10385 { 10386 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); 10387 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); 10388 ARMCacheAttrs ret; 10389 10390 /* Combine shareability attributes (table D4-43) */ 10391 if (s1.shareability == 2 || s2.shareability == 2) { 10392 /* if either are outer-shareable, the result is outer-shareable */ 10393 ret.shareability = 2; 10394 } else if (s1.shareability == 3 || s2.shareability == 3) { 10395 /* if either are inner-shareable, the result is inner-shareable */ 10396 ret.shareability = 3; 10397 } else { 10398 /* both non-shareable */ 10399 ret.shareability = 0; 10400 } 10401 10402 /* Combine memory type and cacheability attributes */ 10403 if (s1hi == 0 || s2hi == 0) { 10404 /* Device has precedence over normal */ 10405 if (s1lo == 0 || s2lo == 0) { 10406 /* nGnRnE has precedence over anything */ 10407 ret.attrs = 0; 10408 } else if (s1lo == 4 || s2lo == 4) { 10409 /* non-Reordering has precedence over Reordering */ 10410 ret.attrs = 4; /* nGnRE */ 10411 } else if (s1lo == 8 || s2lo == 8) { 10412 /* non-Gathering has precedence over Gathering */ 10413 ret.attrs = 8; /* nGRE */ 10414 } else { 10415 ret.attrs = 0xc; /* GRE */ 10416 } 10417 10418 /* Any location for which the resultant memory type is any 10419 * type of Device memory is always treated as Outer Shareable. 10420 */ 10421 ret.shareability = 2; 10422 } else { /* Normal memory */ 10423 /* Outer/inner cacheability combine independently */ 10424 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 10425 | combine_cacheattr_nibble(s1lo, s2lo); 10426 10427 if (ret.attrs == 0x44) { 10428 /* Any location for which the resultant memory type is Normal 10429 * Inner Non-cacheable, Outer Non-cacheable is always treated 10430 * as Outer Shareable. 10431 */ 10432 ret.shareability = 2; 10433 } 10434 } 10435 10436 return ret; 10437 } 10438 10439 10440 /* get_phys_addr - get the physical address for this virtual address 10441 * 10442 * Find the physical address corresponding to the given virtual address, 10443 * by doing a translation table walk on MMU based systems or using the 10444 * MPU state on MPU based systems. 10445 * 10446 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 10447 * prot and page_size may not be filled in, and the populated fsr value provides 10448 * information on why the translation aborted, in the format of a 10449 * DFSR/IFSR fault register, with the following caveats: 10450 * * we honour the short vs long DFSR format differences. 10451 * * the WnR bit is never set (the caller must do this). 10452 * * for PSMAv5 based systems we don't bother to return a full FSR format 10453 * value. 10454 * 10455 * @env: CPUARMState 10456 * @address: virtual address to get physical address for 10457 * @access_type: 0 for read, 1 for write, 2 for execute 10458 * @mmu_idx: MMU index indicating required translation regime 10459 * @phys_ptr: set to the physical address corresponding to the virtual address 10460 * @attrs: set to the memory transaction attributes to use 10461 * @prot: set to the permissions for the page containing phys_ptr 10462 * @page_size: set to the size of the page containing phys_ptr 10463 * @fi: set to fault info if the translation fails 10464 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 10465 */ 10466 bool get_phys_addr(CPUARMState *env, target_ulong address, 10467 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10468 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 10469 target_ulong *page_size, 10470 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 10471 { 10472 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 10473 /* Call ourselves recursively to do the stage 1 and then stage 2 10474 * translations. 10475 */ 10476 if (arm_feature(env, ARM_FEATURE_EL2)) { 10477 hwaddr ipa; 10478 int s2_prot; 10479 int ret; 10480 ARMCacheAttrs cacheattrs2 = {}; 10481 10482 ret = get_phys_addr(env, address, access_type, 10483 stage_1_mmu_idx(mmu_idx), &ipa, attrs, 10484 prot, page_size, fi, cacheattrs); 10485 10486 /* If S1 fails or S2 is disabled, return early. */ 10487 if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 10488 *phys_ptr = ipa; 10489 return ret; 10490 } 10491 10492 /* S1 is done. Now do S2 translation. */ 10493 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS, 10494 phys_ptr, attrs, &s2_prot, 10495 page_size, fi, 10496 cacheattrs != NULL ? &cacheattrs2 : NULL); 10497 fi->s2addr = ipa; 10498 /* Combine the S1 and S2 perms. */ 10499 *prot &= s2_prot; 10500 10501 /* Combine the S1 and S2 cache attributes, if needed */ 10502 if (!ret && cacheattrs != NULL) { 10503 if (env->cp15.hcr_el2 & HCR_DC) { 10504 /* 10505 * HCR.DC forces the first stage attributes to 10506 * Normal Non-Shareable, 10507 * Inner Write-Back Read-Allocate Write-Allocate, 10508 * Outer Write-Back Read-Allocate Write-Allocate. 10509 */ 10510 cacheattrs->attrs = 0xff; 10511 cacheattrs->shareability = 0; 10512 } 10513 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 10514 } 10515 10516 return ret; 10517 } else { 10518 /* 10519 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 10520 */ 10521 mmu_idx = stage_1_mmu_idx(mmu_idx); 10522 } 10523 } 10524 10525 /* The page table entries may downgrade secure to non-secure, but 10526 * cannot upgrade an non-secure translation regime's attributes 10527 * to secure. 10528 */ 10529 attrs->secure = regime_is_secure(env, mmu_idx); 10530 attrs->user = regime_is_user(env, mmu_idx); 10531 10532 /* Fast Context Switch Extension. This doesn't exist at all in v8. 10533 * In v7 and earlier it affects all stage 1 translations. 10534 */ 10535 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS 10536 && !arm_feature(env, ARM_FEATURE_V8)) { 10537 if (regime_el(env, mmu_idx) == 3) { 10538 address += env->cp15.fcseidr_s; 10539 } else { 10540 address += env->cp15.fcseidr_ns; 10541 } 10542 } 10543 10544 if (arm_feature(env, ARM_FEATURE_PMSA)) { 10545 bool ret; 10546 *page_size = TARGET_PAGE_SIZE; 10547 10548 if (arm_feature(env, ARM_FEATURE_V8)) { 10549 /* PMSAv8 */ 10550 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 10551 phys_ptr, attrs, prot, page_size, fi); 10552 } else if (arm_feature(env, ARM_FEATURE_V7)) { 10553 /* PMSAv7 */ 10554 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 10555 phys_ptr, prot, page_size, fi); 10556 } else { 10557 /* Pre-v7 MPU */ 10558 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 10559 phys_ptr, prot, fi); 10560 } 10561 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 10562 " mmu_idx %u -> %s (prot %c%c%c)\n", 10563 access_type == MMU_DATA_LOAD ? "reading" : 10564 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 10565 (uint32_t)address, mmu_idx, 10566 ret ? "Miss" : "Hit", 10567 *prot & PAGE_READ ? 'r' : '-', 10568 *prot & PAGE_WRITE ? 'w' : '-', 10569 *prot & PAGE_EXEC ? 'x' : '-'); 10570 10571 return ret; 10572 } 10573 10574 /* Definitely a real MMU, not an MPU */ 10575 10576 if (regime_translation_disabled(env, mmu_idx)) { 10577 /* MMU disabled. */ 10578 *phys_ptr = address; 10579 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10580 *page_size = TARGET_PAGE_SIZE; 10581 return 0; 10582 } 10583 10584 if (regime_using_lpae_format(env, mmu_idx)) { 10585 return get_phys_addr_lpae(env, address, access_type, mmu_idx, 10586 phys_ptr, attrs, prot, page_size, 10587 fi, cacheattrs); 10588 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 10589 return get_phys_addr_v6(env, address, access_type, mmu_idx, 10590 phys_ptr, attrs, prot, page_size, fi); 10591 } else { 10592 return get_phys_addr_v5(env, address, access_type, mmu_idx, 10593 phys_ptr, prot, page_size, fi); 10594 } 10595 } 10596 10597 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 10598 MemTxAttrs *attrs) 10599 { 10600 ARMCPU *cpu = ARM_CPU(cs); 10601 CPUARMState *env = &cpu->env; 10602 hwaddr phys_addr; 10603 target_ulong page_size; 10604 int prot; 10605 bool ret; 10606 ARMMMUFaultInfo fi = {}; 10607 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 10608 10609 *attrs = (MemTxAttrs) {}; 10610 10611 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, 10612 attrs, &prot, &page_size, &fi, NULL); 10613 10614 if (ret) { 10615 return -1; 10616 } 10617 return phys_addr; 10618 } 10619 10620 #endif 10621 10622 /* Note that signed overflow is undefined in C. The following routines are 10623 careful to use unsigned types where modulo arithmetic is required. 10624 Failure to do so _will_ break on newer gcc. */ 10625 10626 /* Signed saturating arithmetic. */ 10627 10628 /* Perform 16-bit signed saturating addition. */ 10629 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 10630 { 10631 uint16_t res; 10632 10633 res = a + b; 10634 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 10635 if (a & 0x8000) 10636 res = 0x8000; 10637 else 10638 res = 0x7fff; 10639 } 10640 return res; 10641 } 10642 10643 /* Perform 8-bit signed saturating addition. */ 10644 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 10645 { 10646 uint8_t res; 10647 10648 res = a + b; 10649 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 10650 if (a & 0x80) 10651 res = 0x80; 10652 else 10653 res = 0x7f; 10654 } 10655 return res; 10656 } 10657 10658 /* Perform 16-bit signed saturating subtraction. */ 10659 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 10660 { 10661 uint16_t res; 10662 10663 res = a - b; 10664 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 10665 if (a & 0x8000) 10666 res = 0x8000; 10667 else 10668 res = 0x7fff; 10669 } 10670 return res; 10671 } 10672 10673 /* Perform 8-bit signed saturating subtraction. */ 10674 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 10675 { 10676 uint8_t res; 10677 10678 res = a - b; 10679 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 10680 if (a & 0x80) 10681 res = 0x80; 10682 else 10683 res = 0x7f; 10684 } 10685 return res; 10686 } 10687 10688 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 10689 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 10690 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 10691 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 10692 #define PFX q 10693 10694 #include "op_addsub.h" 10695 10696 /* Unsigned saturating arithmetic. */ 10697 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 10698 { 10699 uint16_t res; 10700 res = a + b; 10701 if (res < a) 10702 res = 0xffff; 10703 return res; 10704 } 10705 10706 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 10707 { 10708 if (a > b) 10709 return a - b; 10710 else 10711 return 0; 10712 } 10713 10714 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 10715 { 10716 uint8_t res; 10717 res = a + b; 10718 if (res < a) 10719 res = 0xff; 10720 return res; 10721 } 10722 10723 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 10724 { 10725 if (a > b) 10726 return a - b; 10727 else 10728 return 0; 10729 } 10730 10731 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 10732 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 10733 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 10734 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 10735 #define PFX uq 10736 10737 #include "op_addsub.h" 10738 10739 /* Signed modulo arithmetic. */ 10740 #define SARITH16(a, b, n, op) do { \ 10741 int32_t sum; \ 10742 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 10743 RESULT(sum, n, 16); \ 10744 if (sum >= 0) \ 10745 ge |= 3 << (n * 2); \ 10746 } while(0) 10747 10748 #define SARITH8(a, b, n, op) do { \ 10749 int32_t sum; \ 10750 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 10751 RESULT(sum, n, 8); \ 10752 if (sum >= 0) \ 10753 ge |= 1 << n; \ 10754 } while(0) 10755 10756 10757 #define ADD16(a, b, n) SARITH16(a, b, n, +) 10758 #define SUB16(a, b, n) SARITH16(a, b, n, -) 10759 #define ADD8(a, b, n) SARITH8(a, b, n, +) 10760 #define SUB8(a, b, n) SARITH8(a, b, n, -) 10761 #define PFX s 10762 #define ARITH_GE 10763 10764 #include "op_addsub.h" 10765 10766 /* Unsigned modulo arithmetic. */ 10767 #define ADD16(a, b, n) do { \ 10768 uint32_t sum; \ 10769 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 10770 RESULT(sum, n, 16); \ 10771 if ((sum >> 16) == 1) \ 10772 ge |= 3 << (n * 2); \ 10773 } while(0) 10774 10775 #define ADD8(a, b, n) do { \ 10776 uint32_t sum; \ 10777 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 10778 RESULT(sum, n, 8); \ 10779 if ((sum >> 8) == 1) \ 10780 ge |= 1 << n; \ 10781 } while(0) 10782 10783 #define SUB16(a, b, n) do { \ 10784 uint32_t sum; \ 10785 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 10786 RESULT(sum, n, 16); \ 10787 if ((sum >> 16) == 0) \ 10788 ge |= 3 << (n * 2); \ 10789 } while(0) 10790 10791 #define SUB8(a, b, n) do { \ 10792 uint32_t sum; \ 10793 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 10794 RESULT(sum, n, 8); \ 10795 if ((sum >> 8) == 0) \ 10796 ge |= 1 << n; \ 10797 } while(0) 10798 10799 #define PFX u 10800 #define ARITH_GE 10801 10802 #include "op_addsub.h" 10803 10804 /* Halved signed arithmetic. */ 10805 #define ADD16(a, b, n) \ 10806 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 10807 #define SUB16(a, b, n) \ 10808 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 10809 #define ADD8(a, b, n) \ 10810 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 10811 #define SUB8(a, b, n) \ 10812 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 10813 #define PFX sh 10814 10815 #include "op_addsub.h" 10816 10817 /* Halved unsigned arithmetic. */ 10818 #define ADD16(a, b, n) \ 10819 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 10820 #define SUB16(a, b, n) \ 10821 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 10822 #define ADD8(a, b, n) \ 10823 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 10824 #define SUB8(a, b, n) \ 10825 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 10826 #define PFX uh 10827 10828 #include "op_addsub.h" 10829 10830 static inline uint8_t do_usad(uint8_t a, uint8_t b) 10831 { 10832 if (a > b) 10833 return a - b; 10834 else 10835 return b - a; 10836 } 10837 10838 /* Unsigned sum of absolute byte differences. */ 10839 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 10840 { 10841 uint32_t sum; 10842 sum = do_usad(a, b); 10843 sum += do_usad(a >> 8, b >> 8); 10844 sum += do_usad(a >> 16, b >>16); 10845 sum += do_usad(a >> 24, b >> 24); 10846 return sum; 10847 } 10848 10849 /* For ARMv6 SEL instruction. */ 10850 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 10851 { 10852 uint32_t mask; 10853 10854 mask = 0; 10855 if (flags & 1) 10856 mask |= 0xff; 10857 if (flags & 2) 10858 mask |= 0xff00; 10859 if (flags & 4) 10860 mask |= 0xff0000; 10861 if (flags & 8) 10862 mask |= 0xff000000; 10863 return (a & mask) | (b & ~mask); 10864 } 10865 10866 /* CRC helpers. 10867 * The upper bytes of val (above the number specified by 'bytes') must have 10868 * been zeroed out by the caller. 10869 */ 10870 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 10871 { 10872 uint8_t buf[4]; 10873 10874 stl_le_p(buf, val); 10875 10876 /* zlib crc32 converts the accumulator and output to one's complement. */ 10877 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 10878 } 10879 10880 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 10881 { 10882 uint8_t buf[4]; 10883 10884 stl_le_p(buf, val); 10885 10886 /* Linux crc32c converts the output to one's complement. */ 10887 return crc32c(acc, buf, bytes) ^ 0xffffffff; 10888 } 10889 10890 /* Return the exception level to which FP-disabled exceptions should 10891 * be taken, or 0 if FP is enabled. 10892 */ 10893 int fp_exception_el(CPUARMState *env, int cur_el) 10894 { 10895 #ifndef CONFIG_USER_ONLY 10896 int fpen; 10897 10898 /* CPACR and the CPTR registers don't exist before v6, so FP is 10899 * always accessible 10900 */ 10901 if (!arm_feature(env, ARM_FEATURE_V6)) { 10902 return 0; 10903 } 10904 10905 if (arm_feature(env, ARM_FEATURE_M)) { 10906 /* CPACR can cause a NOCP UsageFault taken to current security state */ 10907 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) { 10908 return 1; 10909 } 10910 10911 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) { 10912 if (!extract32(env->v7m.nsacr, 10, 1)) { 10913 /* FP insns cause a NOCP UsageFault taken to Secure */ 10914 return 3; 10915 } 10916 } 10917 10918 return 0; 10919 } 10920 10921 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 10922 * 0, 2 : trap EL0 and EL1/PL1 accesses 10923 * 1 : trap only EL0 accesses 10924 * 3 : trap no accesses 10925 */ 10926 fpen = extract32(env->cp15.cpacr_el1, 20, 2); 10927 switch (fpen) { 10928 case 0: 10929 case 2: 10930 if (cur_el == 0 || cur_el == 1) { 10931 /* Trap to PL1, which might be EL1 or EL3 */ 10932 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 10933 return 3; 10934 } 10935 return 1; 10936 } 10937 if (cur_el == 3 && !is_a64(env)) { 10938 /* Secure PL1 running at EL3 */ 10939 return 3; 10940 } 10941 break; 10942 case 1: 10943 if (cur_el == 0) { 10944 return 1; 10945 } 10946 break; 10947 case 3: 10948 break; 10949 } 10950 10951 /* 10952 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode 10953 * to control non-secure access to the FPU. It doesn't have any 10954 * effect if EL3 is AArch64 or if EL3 doesn't exist at all. 10955 */ 10956 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 10957 cur_el <= 2 && !arm_is_secure_below_el3(env))) { 10958 if (!extract32(env->cp15.nsacr, 10, 1)) { 10959 /* FP insns act as UNDEF */ 10960 return cur_el == 2 ? 2 : 1; 10961 } 10962 } 10963 10964 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 10965 * check because zero bits in the registers mean "don't trap". 10966 */ 10967 10968 /* CPTR_EL2 : present in v7VE or v8 */ 10969 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 10970 && !arm_is_secure_below_el3(env)) { 10971 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 10972 return 2; 10973 } 10974 10975 /* CPTR_EL3 : present in v8 */ 10976 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 10977 /* Trap all FP ops to EL3 */ 10978 return 3; 10979 } 10980 #endif 10981 return 0; 10982 } 10983 10984 #ifndef CONFIG_TCG 10985 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) 10986 { 10987 g_assert_not_reached(); 10988 } 10989 #endif 10990 10991 ARMMMUIdx arm_mmu_idx(CPUARMState *env) 10992 { 10993 int el; 10994 10995 if (arm_feature(env, ARM_FEATURE_M)) { 10996 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); 10997 } 10998 10999 el = arm_current_el(env); 11000 if (el < 2 && arm_is_secure_below_el3(env)) { 11001 return ARMMMUIdx_S1SE0 + el; 11002 } else { 11003 return ARMMMUIdx_S12NSE0 + el; 11004 } 11005 } 11006 11007 int cpu_mmu_index(CPUARMState *env, bool ifetch) 11008 { 11009 return arm_to_core_mmu_idx(arm_mmu_idx(env)); 11010 } 11011 11012 #ifndef CONFIG_USER_ONLY 11013 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 11014 { 11015 return stage_1_mmu_idx(arm_mmu_idx(env)); 11016 } 11017 #endif 11018 11019 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 11020 target_ulong *cs_base, uint32_t *pflags) 11021 { 11022 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 11023 int current_el = arm_current_el(env); 11024 int fp_el = fp_exception_el(env, current_el); 11025 uint32_t flags = 0; 11026 11027 if (is_a64(env)) { 11028 ARMCPU *cpu = env_archcpu(env); 11029 uint64_t sctlr; 11030 11031 *pc = env->pc; 11032 flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1); 11033 11034 /* Get control bits for tagged addresses. */ 11035 { 11036 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); 11037 ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1); 11038 int tbii, tbid; 11039 11040 /* FIXME: ARMv8.1-VHE S2 translation regime. */ 11041 if (regime_el(env, stage1) < 2) { 11042 ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1); 11043 tbid = (p1.tbi << 1) | p0.tbi; 11044 tbii = tbid & ~((p1.tbid << 1) | p0.tbid); 11045 } else { 11046 tbid = p0.tbi; 11047 tbii = tbid & !p0.tbid; 11048 } 11049 11050 flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii); 11051 flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid); 11052 } 11053 11054 if (cpu_isar_feature(aa64_sve, cpu)) { 11055 int sve_el = sve_exception_el(env, current_el); 11056 uint32_t zcr_len; 11057 11058 /* If SVE is disabled, but FP is enabled, 11059 * then the effective len is 0. 11060 */ 11061 if (sve_el != 0 && fp_el == 0) { 11062 zcr_len = 0; 11063 } else { 11064 zcr_len = sve_zcr_len_for_el(env, current_el); 11065 } 11066 flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el); 11067 flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len); 11068 } 11069 11070 sctlr = arm_sctlr(env, current_el); 11071 11072 if (cpu_isar_feature(aa64_pauth, cpu)) { 11073 /* 11074 * In order to save space in flags, we record only whether 11075 * pauth is "inactive", meaning all insns are implemented as 11076 * a nop, or "active" when some action must be performed. 11077 * The decision of which action to take is left to a helper. 11078 */ 11079 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { 11080 flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1); 11081 } 11082 } 11083 11084 if (cpu_isar_feature(aa64_bti, cpu)) { 11085 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */ 11086 if (sctlr & (current_el == 0 ? SCTLR_BT0 : SCTLR_BT1)) { 11087 flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1); 11088 } 11089 flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype); 11090 } 11091 } else { 11092 *pc = env->regs[15]; 11093 flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb); 11094 flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, env->vfp.vec_len); 11095 flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, env->vfp.vec_stride); 11096 flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits); 11097 flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, arm_sctlr_b(env)); 11098 flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env)); 11099 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30) 11100 || arm_el_is_aa64(env, 1) || arm_feature(env, ARM_FEATURE_M)) { 11101 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); 11102 } 11103 /* Note that XSCALE_CPAR shares bits with VECSTRIDE */ 11104 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 11105 flags = FIELD_DP32(flags, TBFLAG_A32, 11106 XSCALE_CPAR, env->cp15.c15_cpar); 11107 } 11108 } 11109 11110 flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, arm_to_core_mmu_idx(mmu_idx)); 11111 11112 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 11113 * states defined in the ARM ARM for software singlestep: 11114 * SS_ACTIVE PSTATE.SS State 11115 * 0 x Inactive (the TB flag for SS is always 0) 11116 * 1 0 Active-pending 11117 * 1 1 Active-not-pending 11118 */ 11119 if (arm_singlestep_active(env)) { 11120 flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1); 11121 if (is_a64(env)) { 11122 if (env->pstate & PSTATE_SS) { 11123 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1); 11124 } 11125 } else { 11126 if (env->uncached_cpsr & PSTATE_SS) { 11127 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1); 11128 } 11129 } 11130 } 11131 if (arm_cpu_data_is_big_endian(env)) { 11132 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); 11133 } 11134 flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el); 11135 11136 if (arm_v7m_is_handler_mode(env)) { 11137 flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1); 11138 } 11139 11140 /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is 11141 * suppressing them because the requested execution priority is less than 0. 11142 */ 11143 if (arm_feature(env, ARM_FEATURE_V8) && 11144 arm_feature(env, ARM_FEATURE_M) && 11145 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && 11146 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) { 11147 flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1); 11148 } 11149 11150 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 11151 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) != env->v7m.secure) { 11152 flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1); 11153 } 11154 11155 if (arm_feature(env, ARM_FEATURE_M) && 11156 (env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && 11157 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || 11158 (env->v7m.secure && 11159 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { 11160 /* 11161 * ASPEN is set, but FPCA/SFPA indicate that there is no active 11162 * FP context; we must create a new FP context before executing 11163 * any FP insn. 11164 */ 11165 flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1); 11166 } 11167 11168 if (arm_feature(env, ARM_FEATURE_M)) { 11169 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; 11170 11171 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { 11172 flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1); 11173 } 11174 } 11175 11176 *pflags = flags; 11177 *cs_base = 0; 11178 } 11179 11180 #ifdef TARGET_AARCH64 11181 /* 11182 * The manual says that when SVE is enabled and VQ is widened the 11183 * implementation is allowed to zero the previously inaccessible 11184 * portion of the registers. The corollary to that is that when 11185 * SVE is enabled and VQ is narrowed we are also allowed to zero 11186 * the now inaccessible portion of the registers. 11187 * 11188 * The intent of this is that no predicate bit beyond VQ is ever set. 11189 * Which means that some operations on predicate registers themselves 11190 * may operate on full uint64_t or even unrolled across the maximum 11191 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally 11192 * may well be cheaper than conditionals to restrict the operation 11193 * to the relevant portion of a uint16_t[16]. 11194 */ 11195 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) 11196 { 11197 int i, j; 11198 uint64_t pmask; 11199 11200 assert(vq >= 1 && vq <= ARM_MAX_VQ); 11201 assert(vq <= env_archcpu(env)->sve_max_vq); 11202 11203 /* Zap the high bits of the zregs. */ 11204 for (i = 0; i < 32; i++) { 11205 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); 11206 } 11207 11208 /* Zap the high bits of the pregs and ffr. */ 11209 pmask = 0; 11210 if (vq & 3) { 11211 pmask = ~(-1ULL << (16 * (vq & 3))); 11212 } 11213 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { 11214 for (i = 0; i < 17; ++i) { 11215 env->vfp.pregs[i].p[j] &= pmask; 11216 } 11217 pmask = 0; 11218 } 11219 } 11220 11221 /* 11222 * Notice a change in SVE vector size when changing EL. 11223 */ 11224 void aarch64_sve_change_el(CPUARMState *env, int old_el, 11225 int new_el, bool el0_a64) 11226 { 11227 ARMCPU *cpu = env_archcpu(env); 11228 int old_len, new_len; 11229 bool old_a64, new_a64; 11230 11231 /* Nothing to do if no SVE. */ 11232 if (!cpu_isar_feature(aa64_sve, cpu)) { 11233 return; 11234 } 11235 11236 /* Nothing to do if FP is disabled in either EL. */ 11237 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { 11238 return; 11239 } 11240 11241 /* 11242 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped 11243 * at ELx, or not available because the EL is in AArch32 state, then 11244 * for all purposes other than a direct read, the ZCR_ELx.LEN field 11245 * has an effective value of 0". 11246 * 11247 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). 11248 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition 11249 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that 11250 * we already have the correct register contents when encountering the 11251 * vq0->vq0 transition between EL0->EL1. 11252 */ 11253 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; 11254 old_len = (old_a64 && !sve_exception_el(env, old_el) 11255 ? sve_zcr_len_for_el(env, old_el) : 0); 11256 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; 11257 new_len = (new_a64 && !sve_exception_el(env, new_el) 11258 ? sve_zcr_len_for_el(env, new_el) : 0); 11259 11260 /* When changing vector length, clear inaccessible state. */ 11261 if (new_len < old_len) { 11262 aarch64_sve_narrow_vq(env, new_len + 1); 11263 } 11264 } 11265 #endif 11266