1 #include "qemu/osdep.h" 2 #include "target/arm/idau.h" 3 #include "trace.h" 4 #include "cpu.h" 5 #include "internals.h" 6 #include "exec/gdbstub.h" 7 #include "exec/helper-proto.h" 8 #include "qemu/host-utils.h" 9 #include "sysemu/arch_init.h" 10 #include "sysemu/sysemu.h" 11 #include "qemu/bitops.h" 12 #include "qemu/crc32c.h" 13 #include "exec/exec-all.h" 14 #include "exec/cpu_ldst.h" 15 #include "arm_ldst.h" 16 #include <zlib.h> /* For crc32 */ 17 #include "exec/semihost.h" 18 #include "sysemu/kvm.h" 19 #include "fpu/softfloat.h" 20 #include "qemu/range.h" 21 22 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 23 24 #ifndef CONFIG_USER_ONLY 25 /* Cacheability and shareability attributes for a memory access */ 26 typedef struct ARMCacheAttrs { 27 unsigned int attrs:8; /* as in the MAIR register encoding */ 28 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 29 } ARMCacheAttrs; 30 31 static bool get_phys_addr(CPUARMState *env, target_ulong address, 32 MMUAccessType access_type, ARMMMUIdx mmu_idx, 33 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 34 target_ulong *page_size, 35 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 36 37 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 38 MMUAccessType access_type, ARMMMUIdx mmu_idx, 39 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 40 target_ulong *page_size_ptr, 41 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 42 43 /* Security attributes for an address, as returned by v8m_security_lookup. */ 44 typedef struct V8M_SAttributes { 45 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 46 bool ns; 47 bool nsc; 48 uint8_t sregion; 49 bool srvalid; 50 uint8_t iregion; 51 bool irvalid; 52 } V8M_SAttributes; 53 54 static void v8m_security_lookup(CPUARMState *env, uint32_t address, 55 MMUAccessType access_type, ARMMMUIdx mmu_idx, 56 V8M_SAttributes *sattrs); 57 #endif 58 59 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 60 { 61 int nregs; 62 63 /* VFP data registers are always little-endian. */ 64 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 65 if (reg < nregs) { 66 stq_le_p(buf, *aa32_vfp_dreg(env, reg)); 67 return 8; 68 } 69 if (arm_feature(env, ARM_FEATURE_NEON)) { 70 /* Aliases for Q regs. */ 71 nregs += 16; 72 if (reg < nregs) { 73 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 74 stq_le_p(buf, q[0]); 75 stq_le_p(buf + 8, q[1]); 76 return 16; 77 } 78 } 79 switch (reg - nregs) { 80 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4; 81 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4; 82 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4; 83 } 84 return 0; 85 } 86 87 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 88 { 89 int nregs; 90 91 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 92 if (reg < nregs) { 93 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); 94 return 8; 95 } 96 if (arm_feature(env, ARM_FEATURE_NEON)) { 97 nregs += 16; 98 if (reg < nregs) { 99 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 100 q[0] = ldq_le_p(buf); 101 q[1] = ldq_le_p(buf + 8); 102 return 16; 103 } 104 } 105 switch (reg - nregs) { 106 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 107 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4; 108 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; 109 } 110 return 0; 111 } 112 113 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 114 { 115 switch (reg) { 116 case 0 ... 31: 117 /* 128 bit FP register */ 118 { 119 uint64_t *q = aa64_vfp_qreg(env, reg); 120 stq_le_p(buf, q[0]); 121 stq_le_p(buf + 8, q[1]); 122 return 16; 123 } 124 case 32: 125 /* FPSR */ 126 stl_p(buf, vfp_get_fpsr(env)); 127 return 4; 128 case 33: 129 /* FPCR */ 130 stl_p(buf, vfp_get_fpcr(env)); 131 return 4; 132 default: 133 return 0; 134 } 135 } 136 137 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 138 { 139 switch (reg) { 140 case 0 ... 31: 141 /* 128 bit FP register */ 142 { 143 uint64_t *q = aa64_vfp_qreg(env, reg); 144 q[0] = ldq_le_p(buf); 145 q[1] = ldq_le_p(buf + 8); 146 return 16; 147 } 148 case 32: 149 /* FPSR */ 150 vfp_set_fpsr(env, ldl_p(buf)); 151 return 4; 152 case 33: 153 /* FPCR */ 154 vfp_set_fpcr(env, ldl_p(buf)); 155 return 4; 156 default: 157 return 0; 158 } 159 } 160 161 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 162 { 163 assert(ri->fieldoffset); 164 if (cpreg_field_is_64bit(ri)) { 165 return CPREG_FIELD64(env, ri); 166 } else { 167 return CPREG_FIELD32(env, ri); 168 } 169 } 170 171 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 172 uint64_t value) 173 { 174 assert(ri->fieldoffset); 175 if (cpreg_field_is_64bit(ri)) { 176 CPREG_FIELD64(env, ri) = value; 177 } else { 178 CPREG_FIELD32(env, ri) = value; 179 } 180 } 181 182 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 183 { 184 return (char *)env + ri->fieldoffset; 185 } 186 187 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 188 { 189 /* Raw read of a coprocessor register (as needed for migration, etc). */ 190 if (ri->type & ARM_CP_CONST) { 191 return ri->resetvalue; 192 } else if (ri->raw_readfn) { 193 return ri->raw_readfn(env, ri); 194 } else if (ri->readfn) { 195 return ri->readfn(env, ri); 196 } else { 197 return raw_read(env, ri); 198 } 199 } 200 201 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 202 uint64_t v) 203 { 204 /* Raw write of a coprocessor register (as needed for migration, etc). 205 * Note that constant registers are treated as write-ignored; the 206 * caller should check for success by whether a readback gives the 207 * value written. 208 */ 209 if (ri->type & ARM_CP_CONST) { 210 return; 211 } else if (ri->raw_writefn) { 212 ri->raw_writefn(env, ri, v); 213 } else if (ri->writefn) { 214 ri->writefn(env, ri, v); 215 } else { 216 raw_write(env, ri, v); 217 } 218 } 219 220 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg) 221 { 222 ARMCPU *cpu = arm_env_get_cpu(env); 223 const ARMCPRegInfo *ri; 224 uint32_t key; 225 226 key = cpu->dyn_xml.cpregs_keys[reg]; 227 ri = get_arm_cp_reginfo(cpu->cp_regs, key); 228 if (ri) { 229 if (cpreg_field_is_64bit(ri)) { 230 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); 231 } else { 232 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); 233 } 234 } 235 return 0; 236 } 237 238 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) 239 { 240 return 0; 241 } 242 243 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 244 { 245 /* Return true if the regdef would cause an assertion if you called 246 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 247 * program bug for it not to have the NO_RAW flag). 248 * NB that returning false here doesn't necessarily mean that calling 249 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 250 * read/write access functions which are safe for raw use" from "has 251 * read/write access functions which have side effects but has forgotten 252 * to provide raw access functions". 253 * The tests here line up with the conditions in read/write_raw_cp_reg() 254 * and assertions in raw_read()/raw_write(). 255 */ 256 if ((ri->type & ARM_CP_CONST) || 257 ri->fieldoffset || 258 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 259 return false; 260 } 261 return true; 262 } 263 264 bool write_cpustate_to_list(ARMCPU *cpu) 265 { 266 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 267 int i; 268 bool ok = true; 269 270 for (i = 0; i < cpu->cpreg_array_len; i++) { 271 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 272 const ARMCPRegInfo *ri; 273 274 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 275 if (!ri) { 276 ok = false; 277 continue; 278 } 279 if (ri->type & ARM_CP_NO_RAW) { 280 continue; 281 } 282 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri); 283 } 284 return ok; 285 } 286 287 bool write_list_to_cpustate(ARMCPU *cpu) 288 { 289 int i; 290 bool ok = true; 291 292 for (i = 0; i < cpu->cpreg_array_len; i++) { 293 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 294 uint64_t v = cpu->cpreg_values[i]; 295 const ARMCPRegInfo *ri; 296 297 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 298 if (!ri) { 299 ok = false; 300 continue; 301 } 302 if (ri->type & ARM_CP_NO_RAW) { 303 continue; 304 } 305 /* Write value and confirm it reads back as written 306 * (to catch read-only registers and partially read-only 307 * registers where the incoming migration value doesn't match) 308 */ 309 write_raw_cp_reg(&cpu->env, ri, v); 310 if (read_raw_cp_reg(&cpu->env, ri) != v) { 311 ok = false; 312 } 313 } 314 return ok; 315 } 316 317 static void add_cpreg_to_list(gpointer key, gpointer opaque) 318 { 319 ARMCPU *cpu = opaque; 320 uint64_t regidx; 321 const ARMCPRegInfo *ri; 322 323 regidx = *(uint32_t *)key; 324 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 325 326 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 327 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 328 /* The value array need not be initialized at this point */ 329 cpu->cpreg_array_len++; 330 } 331 } 332 333 static void count_cpreg(gpointer key, gpointer opaque) 334 { 335 ARMCPU *cpu = opaque; 336 uint64_t regidx; 337 const ARMCPRegInfo *ri; 338 339 regidx = *(uint32_t *)key; 340 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 341 342 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 343 cpu->cpreg_array_len++; 344 } 345 } 346 347 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 348 { 349 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); 350 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); 351 352 if (aidx > bidx) { 353 return 1; 354 } 355 if (aidx < bidx) { 356 return -1; 357 } 358 return 0; 359 } 360 361 void init_cpreg_list(ARMCPU *cpu) 362 { 363 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 364 * Note that we require cpreg_tuples[] to be sorted by key ID. 365 */ 366 GList *keys; 367 int arraylen; 368 369 keys = g_hash_table_get_keys(cpu->cp_regs); 370 keys = g_list_sort(keys, cpreg_key_compare); 371 372 cpu->cpreg_array_len = 0; 373 374 g_list_foreach(keys, count_cpreg, cpu); 375 376 arraylen = cpu->cpreg_array_len; 377 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 378 cpu->cpreg_values = g_new(uint64_t, arraylen); 379 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 380 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 381 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 382 cpu->cpreg_array_len = 0; 383 384 g_list_foreach(keys, add_cpreg_to_list, cpu); 385 386 assert(cpu->cpreg_array_len == arraylen); 387 388 g_list_free(keys); 389 } 390 391 /* 392 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but 393 * they are accessible when EL3 is using AArch64 regardless of EL3.NS. 394 * 395 * access_el3_aa32ns: Used to check AArch32 register views. 396 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. 397 */ 398 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 399 const ARMCPRegInfo *ri, 400 bool isread) 401 { 402 bool secure = arm_is_secure_below_el3(env); 403 404 assert(!arm_el_is_aa64(env, 3)); 405 if (secure) { 406 return CP_ACCESS_TRAP_UNCATEGORIZED; 407 } 408 return CP_ACCESS_OK; 409 } 410 411 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, 412 const ARMCPRegInfo *ri, 413 bool isread) 414 { 415 if (!arm_el_is_aa64(env, 3)) { 416 return access_el3_aa32ns(env, ri, isread); 417 } 418 return CP_ACCESS_OK; 419 } 420 421 /* Some secure-only AArch32 registers trap to EL3 if used from 422 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 423 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 424 * We assume that the .access field is set to PL1_RW. 425 */ 426 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 427 const ARMCPRegInfo *ri, 428 bool isread) 429 { 430 if (arm_current_el(env) == 3) { 431 return CP_ACCESS_OK; 432 } 433 if (arm_is_secure_below_el3(env)) { 434 return CP_ACCESS_TRAP_EL3; 435 } 436 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 437 return CP_ACCESS_TRAP_UNCATEGORIZED; 438 } 439 440 /* Check for traps to "powerdown debug" registers, which are controlled 441 * by MDCR.TDOSA 442 */ 443 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 444 bool isread) 445 { 446 int el = arm_current_el(env); 447 448 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA) 449 && !arm_is_secure_below_el3(env)) { 450 return CP_ACCESS_TRAP_EL2; 451 } 452 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 453 return CP_ACCESS_TRAP_EL3; 454 } 455 return CP_ACCESS_OK; 456 } 457 458 /* Check for traps to "debug ROM" registers, which are controlled 459 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 460 */ 461 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 462 bool isread) 463 { 464 int el = arm_current_el(env); 465 466 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA) 467 && !arm_is_secure_below_el3(env)) { 468 return CP_ACCESS_TRAP_EL2; 469 } 470 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 471 return CP_ACCESS_TRAP_EL3; 472 } 473 return CP_ACCESS_OK; 474 } 475 476 /* Check for traps to general debug registers, which are controlled 477 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 478 */ 479 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 480 bool isread) 481 { 482 int el = arm_current_el(env); 483 484 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA) 485 && !arm_is_secure_below_el3(env)) { 486 return CP_ACCESS_TRAP_EL2; 487 } 488 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 489 return CP_ACCESS_TRAP_EL3; 490 } 491 return CP_ACCESS_OK; 492 } 493 494 /* Check for traps to performance monitor registers, which are controlled 495 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 496 */ 497 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 498 bool isread) 499 { 500 int el = arm_current_el(env); 501 502 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 503 && !arm_is_secure_below_el3(env)) { 504 return CP_ACCESS_TRAP_EL2; 505 } 506 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 507 return CP_ACCESS_TRAP_EL3; 508 } 509 return CP_ACCESS_OK; 510 } 511 512 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 513 { 514 ARMCPU *cpu = arm_env_get_cpu(env); 515 516 raw_write(env, ri, value); 517 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 518 } 519 520 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 521 { 522 ARMCPU *cpu = arm_env_get_cpu(env); 523 524 if (raw_read(env, ri) != value) { 525 /* Unlike real hardware the qemu TLB uses virtual addresses, 526 * not modified virtual addresses, so this causes a TLB flush. 527 */ 528 tlb_flush(CPU(cpu)); 529 raw_write(env, ri, value); 530 } 531 } 532 533 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 534 uint64_t value) 535 { 536 ARMCPU *cpu = arm_env_get_cpu(env); 537 538 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 539 && !extended_addresses_enabled(env)) { 540 /* For VMSA (when not using the LPAE long descriptor page table 541 * format) this register includes the ASID, so do a TLB flush. 542 * For PMSA it is purely a process ID and no action is needed. 543 */ 544 tlb_flush(CPU(cpu)); 545 } 546 raw_write(env, ri, value); 547 } 548 549 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 550 uint64_t value) 551 { 552 /* Invalidate all (TLBIALL) */ 553 ARMCPU *cpu = arm_env_get_cpu(env); 554 555 tlb_flush(CPU(cpu)); 556 } 557 558 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 559 uint64_t value) 560 { 561 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 562 ARMCPU *cpu = arm_env_get_cpu(env); 563 564 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 565 } 566 567 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 568 uint64_t value) 569 { 570 /* Invalidate by ASID (TLBIASID) */ 571 ARMCPU *cpu = arm_env_get_cpu(env); 572 573 tlb_flush(CPU(cpu)); 574 } 575 576 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 577 uint64_t value) 578 { 579 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 580 ARMCPU *cpu = arm_env_get_cpu(env); 581 582 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 583 } 584 585 /* IS variants of TLB operations must affect all cores */ 586 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 587 uint64_t value) 588 { 589 CPUState *cs = ENV_GET_CPU(env); 590 591 tlb_flush_all_cpus_synced(cs); 592 } 593 594 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 595 uint64_t value) 596 { 597 CPUState *cs = ENV_GET_CPU(env); 598 599 tlb_flush_all_cpus_synced(cs); 600 } 601 602 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 603 uint64_t value) 604 { 605 CPUState *cs = ENV_GET_CPU(env); 606 607 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 608 } 609 610 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 611 uint64_t value) 612 { 613 CPUState *cs = ENV_GET_CPU(env); 614 615 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 616 } 617 618 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 619 uint64_t value) 620 { 621 CPUState *cs = ENV_GET_CPU(env); 622 623 tlb_flush_by_mmuidx(cs, 624 ARMMMUIdxBit_S12NSE1 | 625 ARMMMUIdxBit_S12NSE0 | 626 ARMMMUIdxBit_S2NS); 627 } 628 629 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 630 uint64_t value) 631 { 632 CPUState *cs = ENV_GET_CPU(env); 633 634 tlb_flush_by_mmuidx_all_cpus_synced(cs, 635 ARMMMUIdxBit_S12NSE1 | 636 ARMMMUIdxBit_S12NSE0 | 637 ARMMMUIdxBit_S2NS); 638 } 639 640 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, 641 uint64_t value) 642 { 643 /* Invalidate by IPA. This has to invalidate any structures that 644 * contain only stage 2 translation information, but does not need 645 * to apply to structures that contain combined stage 1 and stage 2 646 * translation information. 647 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 648 */ 649 CPUState *cs = ENV_GET_CPU(env); 650 uint64_t pageaddr; 651 652 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 653 return; 654 } 655 656 pageaddr = sextract64(value << 12, 0, 40); 657 658 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 659 } 660 661 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 662 uint64_t value) 663 { 664 CPUState *cs = ENV_GET_CPU(env); 665 uint64_t pageaddr; 666 667 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 668 return; 669 } 670 671 pageaddr = sextract64(value << 12, 0, 40); 672 673 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 674 ARMMMUIdxBit_S2NS); 675 } 676 677 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 678 uint64_t value) 679 { 680 CPUState *cs = ENV_GET_CPU(env); 681 682 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 683 } 684 685 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 686 uint64_t value) 687 { 688 CPUState *cs = ENV_GET_CPU(env); 689 690 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 691 } 692 693 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 694 uint64_t value) 695 { 696 CPUState *cs = ENV_GET_CPU(env); 697 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 698 699 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 700 } 701 702 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 703 uint64_t value) 704 { 705 CPUState *cs = ENV_GET_CPU(env); 706 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 707 708 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 709 ARMMMUIdxBit_S1E2); 710 } 711 712 static const ARMCPRegInfo cp_reginfo[] = { 713 /* Define the secure and non-secure FCSE identifier CP registers 714 * separately because there is no secure bank in V8 (no _EL3). This allows 715 * the secure register to be properly reset and migrated. There is also no 716 * v8 EL1 version of the register so the non-secure instance stands alone. 717 */ 718 { .name = "FCSEIDR", 719 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 720 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 721 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 722 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 723 { .name = "FCSEIDR_S", 724 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 725 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 726 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 727 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 728 /* Define the secure and non-secure context identifier CP registers 729 * separately because there is no secure bank in V8 (no _EL3). This allows 730 * the secure register to be properly reset and migrated. In the 731 * non-secure case, the 32-bit register will have reset and migration 732 * disabled during registration as it is handled by the 64-bit instance. 733 */ 734 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 735 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 736 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 737 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 738 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 739 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 740 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 741 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 742 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 743 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 744 REGINFO_SENTINEL 745 }; 746 747 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 748 /* NB: Some of these registers exist in v8 but with more precise 749 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 750 */ 751 /* MMU Domain access control / MPU write buffer control */ 752 { .name = "DACR", 753 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 754 .access = PL1_RW, .resetvalue = 0, 755 .writefn = dacr_write, .raw_writefn = raw_write, 756 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 757 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 758 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 759 * For v6 and v5, these mappings are overly broad. 760 */ 761 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 762 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 763 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 764 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 765 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 766 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 767 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 768 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 769 /* Cache maintenance ops; some of this space may be overridden later. */ 770 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 771 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 772 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 773 REGINFO_SENTINEL 774 }; 775 776 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 777 /* Not all pre-v6 cores implemented this WFI, so this is slightly 778 * over-broad. 779 */ 780 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 781 .access = PL1_W, .type = ARM_CP_WFI }, 782 REGINFO_SENTINEL 783 }; 784 785 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 786 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 787 * is UNPREDICTABLE; we choose to NOP as most implementations do). 788 */ 789 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 790 .access = PL1_W, .type = ARM_CP_WFI }, 791 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 792 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 793 * OMAPCP will override this space. 794 */ 795 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 796 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 797 .resetvalue = 0 }, 798 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 799 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 800 .resetvalue = 0 }, 801 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 802 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 803 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 804 .resetvalue = 0 }, 805 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 806 * implementing it as RAZ means the "debug architecture version" bits 807 * will read as a reserved value, which should cause Linux to not try 808 * to use the debug hardware. 809 */ 810 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 811 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 812 /* MMU TLB control. Note that the wildcarding means we cover not just 813 * the unified TLB ops but also the dside/iside/inner-shareable variants. 814 */ 815 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 816 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 817 .type = ARM_CP_NO_RAW }, 818 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 819 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 820 .type = ARM_CP_NO_RAW }, 821 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 822 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 823 .type = ARM_CP_NO_RAW }, 824 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 825 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 826 .type = ARM_CP_NO_RAW }, 827 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 828 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 829 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 830 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 831 REGINFO_SENTINEL 832 }; 833 834 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 835 uint64_t value) 836 { 837 uint32_t mask = 0; 838 839 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 840 if (!arm_feature(env, ARM_FEATURE_V8)) { 841 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 842 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 843 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 844 */ 845 if (arm_feature(env, ARM_FEATURE_VFP)) { 846 /* VFP coprocessor: cp10 & cp11 [23:20] */ 847 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 848 849 if (!arm_feature(env, ARM_FEATURE_NEON)) { 850 /* ASEDIS [31] bit is RAO/WI */ 851 value |= (1 << 31); 852 } 853 854 /* VFPv3 and upwards with NEON implement 32 double precision 855 * registers (D0-D31). 856 */ 857 if (!arm_feature(env, ARM_FEATURE_NEON) || 858 !arm_feature(env, ARM_FEATURE_VFP3)) { 859 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 860 value |= (1 << 30); 861 } 862 } 863 value &= mask; 864 } 865 env->cp15.cpacr_el1 = value; 866 } 867 868 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 869 { 870 /* Call cpacr_write() so that we reset with the correct RAO bits set 871 * for our CPU features. 872 */ 873 cpacr_write(env, ri, 0); 874 } 875 876 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 877 bool isread) 878 { 879 if (arm_feature(env, ARM_FEATURE_V8)) { 880 /* Check if CPACR accesses are to be trapped to EL2 */ 881 if (arm_current_el(env) == 1 && 882 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { 883 return CP_ACCESS_TRAP_EL2; 884 /* Check if CPACR accesses are to be trapped to EL3 */ 885 } else if (arm_current_el(env) < 3 && 886 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 887 return CP_ACCESS_TRAP_EL3; 888 } 889 } 890 891 return CP_ACCESS_OK; 892 } 893 894 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 895 bool isread) 896 { 897 /* Check if CPTR accesses are set to trap to EL3 */ 898 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 899 return CP_ACCESS_TRAP_EL3; 900 } 901 902 return CP_ACCESS_OK; 903 } 904 905 static const ARMCPRegInfo v6_cp_reginfo[] = { 906 /* prefetch by MVA in v6, NOP in v7 */ 907 { .name = "MVA_prefetch", 908 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 909 .access = PL1_W, .type = ARM_CP_NOP }, 910 /* We need to break the TB after ISB to execute self-modifying code 911 * correctly and also to take any pending interrupts immediately. 912 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 913 */ 914 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 915 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 916 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 917 .access = PL0_W, .type = ARM_CP_NOP }, 918 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 919 .access = PL0_W, .type = ARM_CP_NOP }, 920 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 921 .access = PL1_RW, 922 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 923 offsetof(CPUARMState, cp15.ifar_ns) }, 924 .resetvalue = 0, }, 925 /* Watchpoint Fault Address Register : should actually only be present 926 * for 1136, 1176, 11MPCore. 927 */ 928 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 929 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 930 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 931 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 932 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 933 .resetfn = cpacr_reset, .writefn = cpacr_write }, 934 REGINFO_SENTINEL 935 }; 936 937 /* Definitions for the PMU registers */ 938 #define PMCRN_MASK 0xf800 939 #define PMCRN_SHIFT 11 940 #define PMCRD 0x8 941 #define PMCRC 0x4 942 #define PMCRE 0x1 943 944 static inline uint32_t pmu_num_counters(CPUARMState *env) 945 { 946 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; 947 } 948 949 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 950 static inline uint64_t pmu_counter_mask(CPUARMState *env) 951 { 952 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); 953 } 954 955 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 956 bool isread) 957 { 958 /* Performance monitor registers user accessibility is controlled 959 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 960 * trapping to EL2 or EL3 for other accesses. 961 */ 962 int el = arm_current_el(env); 963 964 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 965 return CP_ACCESS_TRAP; 966 } 967 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 968 && !arm_is_secure_below_el3(env)) { 969 return CP_ACCESS_TRAP_EL2; 970 } 971 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 972 return CP_ACCESS_TRAP_EL3; 973 } 974 975 return CP_ACCESS_OK; 976 } 977 978 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 979 const ARMCPRegInfo *ri, 980 bool isread) 981 { 982 /* ER: event counter read trap control */ 983 if (arm_feature(env, ARM_FEATURE_V8) 984 && arm_current_el(env) == 0 985 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 986 && isread) { 987 return CP_ACCESS_OK; 988 } 989 990 return pmreg_access(env, ri, isread); 991 } 992 993 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 994 const ARMCPRegInfo *ri, 995 bool isread) 996 { 997 /* SW: software increment write trap control */ 998 if (arm_feature(env, ARM_FEATURE_V8) 999 && arm_current_el(env) == 0 1000 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 1001 && !isread) { 1002 return CP_ACCESS_OK; 1003 } 1004 1005 return pmreg_access(env, ri, isread); 1006 } 1007 1008 #ifndef CONFIG_USER_ONLY 1009 1010 static CPAccessResult pmreg_access_selr(CPUARMState *env, 1011 const ARMCPRegInfo *ri, 1012 bool isread) 1013 { 1014 /* ER: event counter read trap control */ 1015 if (arm_feature(env, ARM_FEATURE_V8) 1016 && arm_current_el(env) == 0 1017 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 1018 return CP_ACCESS_OK; 1019 } 1020 1021 return pmreg_access(env, ri, isread); 1022 } 1023 1024 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 1025 const ARMCPRegInfo *ri, 1026 bool isread) 1027 { 1028 /* CR: cycle counter read trap control */ 1029 if (arm_feature(env, ARM_FEATURE_V8) 1030 && arm_current_el(env) == 0 1031 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 1032 && isread) { 1033 return CP_ACCESS_OK; 1034 } 1035 1036 return pmreg_access(env, ri, isread); 1037 } 1038 1039 static inline bool arm_ccnt_enabled(CPUARMState *env) 1040 { 1041 /* This does not support checking PMCCFILTR_EL0 register */ 1042 1043 if (!(env->cp15.c9_pmcr & PMCRE) || !(env->cp15.c9_pmcnten & (1 << 31))) { 1044 return false; 1045 } 1046 1047 return true; 1048 } 1049 1050 void pmccntr_sync(CPUARMState *env) 1051 { 1052 uint64_t temp_ticks; 1053 1054 temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1055 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1056 1057 if (env->cp15.c9_pmcr & PMCRD) { 1058 /* Increment once every 64 processor clock cycles */ 1059 temp_ticks /= 64; 1060 } 1061 1062 if (arm_ccnt_enabled(env)) { 1063 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt; 1064 } 1065 } 1066 1067 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1068 uint64_t value) 1069 { 1070 pmccntr_sync(env); 1071 1072 if (value & PMCRC) { 1073 /* The counter has been reset */ 1074 env->cp15.c15_ccnt = 0; 1075 } 1076 1077 /* only the DP, X, D and E bits are writable */ 1078 env->cp15.c9_pmcr &= ~0x39; 1079 env->cp15.c9_pmcr |= (value & 0x39); 1080 1081 pmccntr_sync(env); 1082 } 1083 1084 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1085 { 1086 uint64_t total_ticks; 1087 1088 if (!arm_ccnt_enabled(env)) { 1089 /* Counter is disabled, do not change value */ 1090 return env->cp15.c15_ccnt; 1091 } 1092 1093 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1094 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1095 1096 if (env->cp15.c9_pmcr & PMCRD) { 1097 /* Increment once every 64 processor clock cycles */ 1098 total_ticks /= 64; 1099 } 1100 return total_ticks - env->cp15.c15_ccnt; 1101 } 1102 1103 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1104 uint64_t value) 1105 { 1106 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1107 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1108 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1109 * accessed. 1110 */ 1111 env->cp15.c9_pmselr = value & 0x1f; 1112 } 1113 1114 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1115 uint64_t value) 1116 { 1117 uint64_t total_ticks; 1118 1119 if (!arm_ccnt_enabled(env)) { 1120 /* Counter is disabled, set the absolute value */ 1121 env->cp15.c15_ccnt = value; 1122 return; 1123 } 1124 1125 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1126 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1127 1128 if (env->cp15.c9_pmcr & PMCRD) { 1129 /* Increment once every 64 processor clock cycles */ 1130 total_ticks /= 64; 1131 } 1132 env->cp15.c15_ccnt = total_ticks - value; 1133 } 1134 1135 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1136 uint64_t value) 1137 { 1138 uint64_t cur_val = pmccntr_read(env, NULL); 1139 1140 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1141 } 1142 1143 #else /* CONFIG_USER_ONLY */ 1144 1145 void pmccntr_sync(CPUARMState *env) 1146 { 1147 } 1148 1149 #endif 1150 1151 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1152 uint64_t value) 1153 { 1154 pmccntr_sync(env); 1155 env->cp15.pmccfiltr_el0 = value & 0xfc000000; 1156 pmccntr_sync(env); 1157 } 1158 1159 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1160 uint64_t value) 1161 { 1162 value &= pmu_counter_mask(env); 1163 env->cp15.c9_pmcnten |= value; 1164 } 1165 1166 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1167 uint64_t value) 1168 { 1169 value &= pmu_counter_mask(env); 1170 env->cp15.c9_pmcnten &= ~value; 1171 } 1172 1173 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1174 uint64_t value) 1175 { 1176 env->cp15.c9_pmovsr &= ~value; 1177 } 1178 1179 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1180 uint64_t value) 1181 { 1182 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1183 * PMSELR value is equal to or greater than the number of implemented 1184 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1185 */ 1186 if (env->cp15.c9_pmselr == 0x1f) { 1187 pmccfiltr_write(env, ri, value); 1188 } 1189 } 1190 1191 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1192 { 1193 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1194 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write(). 1195 */ 1196 if (env->cp15.c9_pmselr == 0x1f) { 1197 return env->cp15.pmccfiltr_el0; 1198 } else { 1199 return 0; 1200 } 1201 } 1202 1203 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1204 uint64_t value) 1205 { 1206 if (arm_feature(env, ARM_FEATURE_V8)) { 1207 env->cp15.c9_pmuserenr = value & 0xf; 1208 } else { 1209 env->cp15.c9_pmuserenr = value & 1; 1210 } 1211 } 1212 1213 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1214 uint64_t value) 1215 { 1216 /* We have no event counters so only the C bit can be changed */ 1217 value &= pmu_counter_mask(env); 1218 env->cp15.c9_pminten |= value; 1219 } 1220 1221 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1222 uint64_t value) 1223 { 1224 value &= pmu_counter_mask(env); 1225 env->cp15.c9_pminten &= ~value; 1226 } 1227 1228 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 1229 uint64_t value) 1230 { 1231 /* Note that even though the AArch64 view of this register has bits 1232 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 1233 * architectural requirements for bits which are RES0 only in some 1234 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 1235 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 1236 */ 1237 raw_write(env, ri, value & ~0x1FULL); 1238 } 1239 1240 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 1241 { 1242 /* We only mask off bits that are RES0 both for AArch64 and AArch32. 1243 * For bits that vary between AArch32/64, code needs to check the 1244 * current execution mode before directly using the feature bit. 1245 */ 1246 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK; 1247 1248 if (!arm_feature(env, ARM_FEATURE_EL2)) { 1249 valid_mask &= ~SCR_HCE; 1250 1251 /* On ARMv7, SMD (or SCD as it is called in v7) is only 1252 * supported if EL2 exists. The bit is UNK/SBZP when 1253 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 1254 * when EL2 is unavailable. 1255 * On ARMv8, this bit is always available. 1256 */ 1257 if (arm_feature(env, ARM_FEATURE_V7) && 1258 !arm_feature(env, ARM_FEATURE_V8)) { 1259 valid_mask &= ~SCR_SMD; 1260 } 1261 } 1262 1263 /* Clear all-context RES0 bits. */ 1264 value &= valid_mask; 1265 raw_write(env, ri, value); 1266 } 1267 1268 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1269 { 1270 ARMCPU *cpu = arm_env_get_cpu(env); 1271 1272 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 1273 * bank 1274 */ 1275 uint32_t index = A32_BANKED_REG_GET(env, csselr, 1276 ri->secure & ARM_CP_SECSTATE_S); 1277 1278 return cpu->ccsidr[index]; 1279 } 1280 1281 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1282 uint64_t value) 1283 { 1284 raw_write(env, ri, value & 0xf); 1285 } 1286 1287 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1288 { 1289 CPUState *cs = ENV_GET_CPU(env); 1290 uint64_t ret = 0; 1291 1292 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 1293 ret |= CPSR_I; 1294 } 1295 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 1296 ret |= CPSR_F; 1297 } 1298 /* External aborts are not possible in QEMU so A bit is always clear */ 1299 return ret; 1300 } 1301 1302 static const ARMCPRegInfo v7_cp_reginfo[] = { 1303 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 1304 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 1305 .access = PL1_W, .type = ARM_CP_NOP }, 1306 /* Performance monitors are implementation defined in v7, 1307 * but with an ARM recommended set of registers, which we 1308 * follow (although we don't actually implement any counters) 1309 * 1310 * Performance registers fall into three categories: 1311 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 1312 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 1313 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 1314 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 1315 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 1316 */ 1317 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 1318 .access = PL0_RW, .type = ARM_CP_ALIAS, 1319 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1320 .writefn = pmcntenset_write, 1321 .accessfn = pmreg_access, 1322 .raw_writefn = raw_write }, 1323 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 1324 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 1325 .access = PL0_RW, .accessfn = pmreg_access, 1326 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 1327 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 1328 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 1329 .access = PL0_RW, 1330 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1331 .accessfn = pmreg_access, 1332 .writefn = pmcntenclr_write, 1333 .type = ARM_CP_ALIAS }, 1334 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 1335 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 1336 .access = PL0_RW, .accessfn = pmreg_access, 1337 .type = ARM_CP_ALIAS, 1338 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 1339 .writefn = pmcntenclr_write }, 1340 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 1341 .access = PL0_RW, 1342 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 1343 .accessfn = pmreg_access, 1344 .writefn = pmovsr_write, 1345 .raw_writefn = raw_write }, 1346 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 1347 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 1348 .access = PL0_RW, .accessfn = pmreg_access, 1349 .type = ARM_CP_ALIAS, 1350 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 1351 .writefn = pmovsr_write, 1352 .raw_writefn = raw_write }, 1353 /* Unimplemented so WI. */ 1354 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 1355 .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP }, 1356 #ifndef CONFIG_USER_ONLY 1357 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 1358 .access = PL0_RW, .type = ARM_CP_ALIAS, 1359 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 1360 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 1361 .raw_writefn = raw_write}, 1362 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 1363 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 1364 .access = PL0_RW, .accessfn = pmreg_access_selr, 1365 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 1366 .writefn = pmselr_write, .raw_writefn = raw_write, }, 1367 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 1368 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 1369 .readfn = pmccntr_read, .writefn = pmccntr_write32, 1370 .accessfn = pmreg_access_ccntr }, 1371 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 1372 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 1373 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 1374 .type = ARM_CP_IO, 1375 .readfn = pmccntr_read, .writefn = pmccntr_write, }, 1376 #endif 1377 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 1378 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 1379 .writefn = pmccfiltr_write, 1380 .access = PL0_RW, .accessfn = pmreg_access, 1381 .type = ARM_CP_IO, 1382 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 1383 .resetvalue = 0, }, 1384 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 1385 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, 1386 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1387 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 1388 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 1389 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, 1390 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1391 /* Unimplemented, RAZ/WI. */ 1392 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 1393 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0, 1394 .accessfn = pmreg_access_xevcntr }, 1395 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 1396 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 1397 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 1398 .resetvalue = 0, 1399 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 1400 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 1401 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 1402 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1403 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 1404 .resetvalue = 0, 1405 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 1406 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 1407 .access = PL1_RW, .accessfn = access_tpm, 1408 .type = ARM_CP_ALIAS | ARM_CP_IO, 1409 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 1410 .resetvalue = 0, 1411 .writefn = pmintenset_write, .raw_writefn = raw_write }, 1412 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 1413 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 1414 .access = PL1_RW, .accessfn = access_tpm, 1415 .type = ARM_CP_IO, 1416 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1417 .writefn = pmintenset_write, .raw_writefn = raw_write, 1418 .resetvalue = 0x0 }, 1419 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 1420 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1421 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1422 .writefn = pmintenclr_write, }, 1423 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 1424 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 1425 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1426 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1427 .writefn = pmintenclr_write }, 1428 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 1429 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 1430 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 1431 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 1432 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 1433 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0, 1434 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 1435 offsetof(CPUARMState, cp15.csselr_ns) } }, 1436 /* Auxiliary ID register: this actually has an IMPDEF value but for now 1437 * just RAZ for all cores: 1438 */ 1439 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 1440 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 1441 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 1442 /* Auxiliary fault status registers: these also are IMPDEF, and we 1443 * choose to RAZ/WI for all cores. 1444 */ 1445 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 1446 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 1447 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 1448 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 1449 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 1450 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 1451 /* MAIR can just read-as-written because we don't implement caches 1452 * and so don't need to care about memory attributes. 1453 */ 1454 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 1455 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 1456 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 1457 .resetvalue = 0 }, 1458 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 1459 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 1460 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 1461 .resetvalue = 0 }, 1462 /* For non-long-descriptor page tables these are PRRR and NMRR; 1463 * regardless they still act as reads-as-written for QEMU. 1464 */ 1465 /* MAIR0/1 are defined separately from their 64-bit counterpart which 1466 * allows them to assign the correct fieldoffset based on the endianness 1467 * handled in the field definitions. 1468 */ 1469 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 1470 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, 1471 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 1472 offsetof(CPUARMState, cp15.mair0_ns) }, 1473 .resetfn = arm_cp_reset_ignore }, 1474 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 1475 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW, 1476 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 1477 offsetof(CPUARMState, cp15.mair1_ns) }, 1478 .resetfn = arm_cp_reset_ignore }, 1479 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 1480 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 1481 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 1482 /* 32 bit ITLB invalidates */ 1483 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 1484 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1485 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 1486 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1487 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 1488 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1489 /* 32 bit DTLB invalidates */ 1490 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 1491 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1492 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 1493 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1494 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 1495 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1496 /* 32 bit TLB invalidates */ 1497 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 1498 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1499 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 1500 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1501 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 1502 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1503 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 1504 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 1505 REGINFO_SENTINEL 1506 }; 1507 1508 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 1509 /* 32 bit TLB invalidates, Inner Shareable */ 1510 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 1511 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write }, 1512 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 1513 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 1514 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 1515 .type = ARM_CP_NO_RAW, .access = PL1_W, 1516 .writefn = tlbiasid_is_write }, 1517 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 1518 .type = ARM_CP_NO_RAW, .access = PL1_W, 1519 .writefn = tlbimvaa_is_write }, 1520 REGINFO_SENTINEL 1521 }; 1522 1523 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1524 uint64_t value) 1525 { 1526 value &= 1; 1527 env->teecr = value; 1528 } 1529 1530 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 1531 bool isread) 1532 { 1533 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 1534 return CP_ACCESS_TRAP; 1535 } 1536 return CP_ACCESS_OK; 1537 } 1538 1539 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 1540 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 1541 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 1542 .resetvalue = 0, 1543 .writefn = teecr_write }, 1544 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 1545 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 1546 .accessfn = teehbr_access, .resetvalue = 0 }, 1547 REGINFO_SENTINEL 1548 }; 1549 1550 static const ARMCPRegInfo v6k_cp_reginfo[] = { 1551 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 1552 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 1553 .access = PL0_RW, 1554 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 1555 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 1556 .access = PL0_RW, 1557 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 1558 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 1559 .resetfn = arm_cp_reset_ignore }, 1560 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 1561 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 1562 .access = PL0_R|PL1_W, 1563 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 1564 .resetvalue = 0}, 1565 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 1566 .access = PL0_R|PL1_W, 1567 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 1568 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 1569 .resetfn = arm_cp_reset_ignore }, 1570 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 1571 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 1572 .access = PL1_RW, 1573 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 1574 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 1575 .access = PL1_RW, 1576 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 1577 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 1578 .resetvalue = 0 }, 1579 REGINFO_SENTINEL 1580 }; 1581 1582 #ifndef CONFIG_USER_ONLY 1583 1584 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 1585 bool isread) 1586 { 1587 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 1588 * Writable only at the highest implemented exception level. 1589 */ 1590 int el = arm_current_el(env); 1591 1592 switch (el) { 1593 case 0: 1594 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) { 1595 return CP_ACCESS_TRAP; 1596 } 1597 break; 1598 case 1: 1599 if (!isread && ri->state == ARM_CP_STATE_AA32 && 1600 arm_is_secure_below_el3(env)) { 1601 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 1602 return CP_ACCESS_TRAP_UNCATEGORIZED; 1603 } 1604 break; 1605 case 2: 1606 case 3: 1607 break; 1608 } 1609 1610 if (!isread && el < arm_highest_el(env)) { 1611 return CP_ACCESS_TRAP_UNCATEGORIZED; 1612 } 1613 1614 return CP_ACCESS_OK; 1615 } 1616 1617 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 1618 bool isread) 1619 { 1620 unsigned int cur_el = arm_current_el(env); 1621 bool secure = arm_is_secure(env); 1622 1623 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */ 1624 if (cur_el == 0 && 1625 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 1626 return CP_ACCESS_TRAP; 1627 } 1628 1629 if (arm_feature(env, ARM_FEATURE_EL2) && 1630 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 1631 !extract32(env->cp15.cnthctl_el2, 0, 1)) { 1632 return CP_ACCESS_TRAP_EL2; 1633 } 1634 return CP_ACCESS_OK; 1635 } 1636 1637 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 1638 bool isread) 1639 { 1640 unsigned int cur_el = arm_current_el(env); 1641 bool secure = arm_is_secure(env); 1642 1643 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if 1644 * EL0[PV]TEN is zero. 1645 */ 1646 if (cur_el == 0 && 1647 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 1648 return CP_ACCESS_TRAP; 1649 } 1650 1651 if (arm_feature(env, ARM_FEATURE_EL2) && 1652 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 1653 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 1654 return CP_ACCESS_TRAP_EL2; 1655 } 1656 return CP_ACCESS_OK; 1657 } 1658 1659 static CPAccessResult gt_pct_access(CPUARMState *env, 1660 const ARMCPRegInfo *ri, 1661 bool isread) 1662 { 1663 return gt_counter_access(env, GTIMER_PHYS, isread); 1664 } 1665 1666 static CPAccessResult gt_vct_access(CPUARMState *env, 1667 const ARMCPRegInfo *ri, 1668 bool isread) 1669 { 1670 return gt_counter_access(env, GTIMER_VIRT, isread); 1671 } 1672 1673 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 1674 bool isread) 1675 { 1676 return gt_timer_access(env, GTIMER_PHYS, isread); 1677 } 1678 1679 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 1680 bool isread) 1681 { 1682 return gt_timer_access(env, GTIMER_VIRT, isread); 1683 } 1684 1685 static CPAccessResult gt_stimer_access(CPUARMState *env, 1686 const ARMCPRegInfo *ri, 1687 bool isread) 1688 { 1689 /* The AArch64 register view of the secure physical timer is 1690 * always accessible from EL3, and configurably accessible from 1691 * Secure EL1. 1692 */ 1693 switch (arm_current_el(env)) { 1694 case 1: 1695 if (!arm_is_secure(env)) { 1696 return CP_ACCESS_TRAP; 1697 } 1698 if (!(env->cp15.scr_el3 & SCR_ST)) { 1699 return CP_ACCESS_TRAP_EL3; 1700 } 1701 return CP_ACCESS_OK; 1702 case 0: 1703 case 2: 1704 return CP_ACCESS_TRAP; 1705 case 3: 1706 return CP_ACCESS_OK; 1707 default: 1708 g_assert_not_reached(); 1709 } 1710 } 1711 1712 static uint64_t gt_get_countervalue(CPUARMState *env) 1713 { 1714 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE; 1715 } 1716 1717 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 1718 { 1719 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 1720 1721 if (gt->ctl & 1) { 1722 /* Timer enabled: calculate and set current ISTATUS, irq, and 1723 * reset timer to when ISTATUS next has to change 1724 */ 1725 uint64_t offset = timeridx == GTIMER_VIRT ? 1726 cpu->env.cp15.cntvoff_el2 : 0; 1727 uint64_t count = gt_get_countervalue(&cpu->env); 1728 /* Note that this must be unsigned 64 bit arithmetic: */ 1729 int istatus = count - offset >= gt->cval; 1730 uint64_t nexttick; 1731 int irqstate; 1732 1733 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 1734 1735 irqstate = (istatus && !(gt->ctl & 2)); 1736 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 1737 1738 if (istatus) { 1739 /* Next transition is when count rolls back over to zero */ 1740 nexttick = UINT64_MAX; 1741 } else { 1742 /* Next transition is when we hit cval */ 1743 nexttick = gt->cval + offset; 1744 } 1745 /* Note that the desired next expiry time might be beyond the 1746 * signed-64-bit range of a QEMUTimer -- in this case we just 1747 * set the timer for as far in the future as possible. When the 1748 * timer expires we will reset the timer for any remaining period. 1749 */ 1750 if (nexttick > INT64_MAX / GTIMER_SCALE) { 1751 nexttick = INT64_MAX / GTIMER_SCALE; 1752 } 1753 timer_mod(cpu->gt_timer[timeridx], nexttick); 1754 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 1755 } else { 1756 /* Timer disabled: ISTATUS and timer output always clear */ 1757 gt->ctl &= ~4; 1758 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 1759 timer_del(cpu->gt_timer[timeridx]); 1760 trace_arm_gt_recalc_disabled(timeridx); 1761 } 1762 } 1763 1764 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 1765 int timeridx) 1766 { 1767 ARMCPU *cpu = arm_env_get_cpu(env); 1768 1769 timer_del(cpu->gt_timer[timeridx]); 1770 } 1771 1772 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 1773 { 1774 return gt_get_countervalue(env); 1775 } 1776 1777 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 1778 { 1779 return gt_get_countervalue(env) - env->cp15.cntvoff_el2; 1780 } 1781 1782 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1783 int timeridx, 1784 uint64_t value) 1785 { 1786 trace_arm_gt_cval_write(timeridx, value); 1787 env->cp15.c14_timer[timeridx].cval = value; 1788 gt_recalc_timer(arm_env_get_cpu(env), timeridx); 1789 } 1790 1791 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 1792 int timeridx) 1793 { 1794 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 1795 1796 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 1797 (gt_get_countervalue(env) - offset)); 1798 } 1799 1800 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1801 int timeridx, 1802 uint64_t value) 1803 { 1804 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 1805 1806 trace_arm_gt_tval_write(timeridx, value); 1807 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 1808 sextract64(value, 0, 32); 1809 gt_recalc_timer(arm_env_get_cpu(env), timeridx); 1810 } 1811 1812 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1813 int timeridx, 1814 uint64_t value) 1815 { 1816 ARMCPU *cpu = arm_env_get_cpu(env); 1817 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 1818 1819 trace_arm_gt_ctl_write(timeridx, value); 1820 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 1821 if ((oldval ^ value) & 1) { 1822 /* Enable toggled */ 1823 gt_recalc_timer(cpu, timeridx); 1824 } else if ((oldval ^ value) & 2) { 1825 /* IMASK toggled: don't need to recalculate, 1826 * just set the interrupt line based on ISTATUS 1827 */ 1828 int irqstate = (oldval & 4) && !(value & 2); 1829 1830 trace_arm_gt_imask_toggle(timeridx, irqstate); 1831 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 1832 } 1833 } 1834 1835 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1836 { 1837 gt_timer_reset(env, ri, GTIMER_PHYS); 1838 } 1839 1840 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1841 uint64_t value) 1842 { 1843 gt_cval_write(env, ri, GTIMER_PHYS, value); 1844 } 1845 1846 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1847 { 1848 return gt_tval_read(env, ri, GTIMER_PHYS); 1849 } 1850 1851 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1852 uint64_t value) 1853 { 1854 gt_tval_write(env, ri, GTIMER_PHYS, value); 1855 } 1856 1857 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1858 uint64_t value) 1859 { 1860 gt_ctl_write(env, ri, GTIMER_PHYS, value); 1861 } 1862 1863 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1864 { 1865 gt_timer_reset(env, ri, GTIMER_VIRT); 1866 } 1867 1868 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1869 uint64_t value) 1870 { 1871 gt_cval_write(env, ri, GTIMER_VIRT, value); 1872 } 1873 1874 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1875 { 1876 return gt_tval_read(env, ri, GTIMER_VIRT); 1877 } 1878 1879 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1880 uint64_t value) 1881 { 1882 gt_tval_write(env, ri, GTIMER_VIRT, value); 1883 } 1884 1885 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1886 uint64_t value) 1887 { 1888 gt_ctl_write(env, ri, GTIMER_VIRT, value); 1889 } 1890 1891 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 1892 uint64_t value) 1893 { 1894 ARMCPU *cpu = arm_env_get_cpu(env); 1895 1896 trace_arm_gt_cntvoff_write(value); 1897 raw_write(env, ri, value); 1898 gt_recalc_timer(cpu, GTIMER_VIRT); 1899 } 1900 1901 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1902 { 1903 gt_timer_reset(env, ri, GTIMER_HYP); 1904 } 1905 1906 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1907 uint64_t value) 1908 { 1909 gt_cval_write(env, ri, GTIMER_HYP, value); 1910 } 1911 1912 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1913 { 1914 return gt_tval_read(env, ri, GTIMER_HYP); 1915 } 1916 1917 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1918 uint64_t value) 1919 { 1920 gt_tval_write(env, ri, GTIMER_HYP, value); 1921 } 1922 1923 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1924 uint64_t value) 1925 { 1926 gt_ctl_write(env, ri, GTIMER_HYP, value); 1927 } 1928 1929 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1930 { 1931 gt_timer_reset(env, ri, GTIMER_SEC); 1932 } 1933 1934 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1935 uint64_t value) 1936 { 1937 gt_cval_write(env, ri, GTIMER_SEC, value); 1938 } 1939 1940 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1941 { 1942 return gt_tval_read(env, ri, GTIMER_SEC); 1943 } 1944 1945 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1946 uint64_t value) 1947 { 1948 gt_tval_write(env, ri, GTIMER_SEC, value); 1949 } 1950 1951 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1952 uint64_t value) 1953 { 1954 gt_ctl_write(env, ri, GTIMER_SEC, value); 1955 } 1956 1957 void arm_gt_ptimer_cb(void *opaque) 1958 { 1959 ARMCPU *cpu = opaque; 1960 1961 gt_recalc_timer(cpu, GTIMER_PHYS); 1962 } 1963 1964 void arm_gt_vtimer_cb(void *opaque) 1965 { 1966 ARMCPU *cpu = opaque; 1967 1968 gt_recalc_timer(cpu, GTIMER_VIRT); 1969 } 1970 1971 void arm_gt_htimer_cb(void *opaque) 1972 { 1973 ARMCPU *cpu = opaque; 1974 1975 gt_recalc_timer(cpu, GTIMER_HYP); 1976 } 1977 1978 void arm_gt_stimer_cb(void *opaque) 1979 { 1980 ARMCPU *cpu = opaque; 1981 1982 gt_recalc_timer(cpu, GTIMER_SEC); 1983 } 1984 1985 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 1986 /* Note that CNTFRQ is purely reads-as-written for the benefit 1987 * of software; writing it doesn't actually change the timer frequency. 1988 * Our reset value matches the fixed frequency we implement the timer at. 1989 */ 1990 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 1991 .type = ARM_CP_ALIAS, 1992 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 1993 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 1994 }, 1995 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 1996 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 1997 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 1998 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 1999 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE, 2000 }, 2001 /* overall control: mostly access permissions */ 2002 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 2003 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 2004 .access = PL1_RW, 2005 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 2006 .resetvalue = 0, 2007 }, 2008 /* per-timer control */ 2009 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2010 .secure = ARM_CP_SECSTATE_NS, 2011 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2012 .accessfn = gt_ptimer_access, 2013 .fieldoffset = offsetoflow32(CPUARMState, 2014 cp15.c14_timer[GTIMER_PHYS].ctl), 2015 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 2016 }, 2017 { .name = "CNTP_CTL_S", 2018 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2019 .secure = ARM_CP_SECSTATE_S, 2020 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2021 .accessfn = gt_ptimer_access, 2022 .fieldoffset = offsetoflow32(CPUARMState, 2023 cp15.c14_timer[GTIMER_SEC].ctl), 2024 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2025 }, 2026 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 2027 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 2028 .type = ARM_CP_IO, .access = PL1_RW | PL0_R, 2029 .accessfn = gt_ptimer_access, 2030 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 2031 .resetvalue = 0, 2032 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 2033 }, 2034 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 2035 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2036 .accessfn = gt_vtimer_access, 2037 .fieldoffset = offsetoflow32(CPUARMState, 2038 cp15.c14_timer[GTIMER_VIRT].ctl), 2039 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 2040 }, 2041 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 2042 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 2043 .type = ARM_CP_IO, .access = PL1_RW | PL0_R, 2044 .accessfn = gt_vtimer_access, 2045 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 2046 .resetvalue = 0, 2047 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 2048 }, 2049 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 2050 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2051 .secure = ARM_CP_SECSTATE_NS, 2052 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2053 .accessfn = gt_ptimer_access, 2054 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2055 }, 2056 { .name = "CNTP_TVAL_S", 2057 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2058 .secure = ARM_CP_SECSTATE_S, 2059 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2060 .accessfn = gt_ptimer_access, 2061 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 2062 }, 2063 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2064 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 2065 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2066 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 2067 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2068 }, 2069 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 2070 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2071 .accessfn = gt_vtimer_access, 2072 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2073 }, 2074 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2075 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 2076 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2077 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 2078 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2079 }, 2080 /* The counter itself */ 2081 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 2082 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2083 .accessfn = gt_pct_access, 2084 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 2085 }, 2086 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 2087 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 2088 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2089 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 2090 }, 2091 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 2092 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2093 .accessfn = gt_vct_access, 2094 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 2095 }, 2096 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 2097 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 2098 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2099 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 2100 }, 2101 /* Comparison value, indicating when the timer goes off */ 2102 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 2103 .secure = ARM_CP_SECSTATE_NS, 2104 .access = PL1_RW | PL0_R, 2105 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2106 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2107 .accessfn = gt_ptimer_access, 2108 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2109 }, 2110 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 2111 .secure = ARM_CP_SECSTATE_S, 2112 .access = PL1_RW | PL0_R, 2113 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2114 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2115 .accessfn = gt_ptimer_access, 2116 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2117 }, 2118 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2119 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 2120 .access = PL1_RW | PL0_R, 2121 .type = ARM_CP_IO, 2122 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2123 .resetvalue = 0, .accessfn = gt_ptimer_access, 2124 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2125 }, 2126 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 2127 .access = PL1_RW | PL0_R, 2128 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2129 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2130 .accessfn = gt_vtimer_access, 2131 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2132 }, 2133 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2134 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 2135 .access = PL1_RW | PL0_R, 2136 .type = ARM_CP_IO, 2137 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2138 .resetvalue = 0, .accessfn = gt_vtimer_access, 2139 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2140 }, 2141 /* Secure timer -- this is actually restricted to only EL3 2142 * and configurably Secure-EL1 via the accessfn. 2143 */ 2144 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 2145 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 2146 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 2147 .accessfn = gt_stimer_access, 2148 .readfn = gt_sec_tval_read, 2149 .writefn = gt_sec_tval_write, 2150 .resetfn = gt_sec_timer_reset, 2151 }, 2152 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 2153 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 2154 .type = ARM_CP_IO, .access = PL1_RW, 2155 .accessfn = gt_stimer_access, 2156 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 2157 .resetvalue = 0, 2158 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2159 }, 2160 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 2161 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 2162 .type = ARM_CP_IO, .access = PL1_RW, 2163 .accessfn = gt_stimer_access, 2164 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2165 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2166 }, 2167 REGINFO_SENTINEL 2168 }; 2169 2170 #else 2171 2172 /* In user-mode most of the generic timer registers are inaccessible 2173 * however modern kernels (4.12+) allow access to cntvct_el0 2174 */ 2175 2176 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2177 { 2178 /* Currently we have no support for QEMUTimer in linux-user so we 2179 * can't call gt_get_countervalue(env), instead we directly 2180 * call the lower level functions. 2181 */ 2182 return cpu_get_clock() / GTIMER_SCALE; 2183 } 2184 2185 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 2186 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 2187 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 2188 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, 2189 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 2190 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE, 2191 }, 2192 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 2193 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 2194 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2195 .readfn = gt_virt_cnt_read, 2196 }, 2197 REGINFO_SENTINEL 2198 }; 2199 2200 #endif 2201 2202 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2203 { 2204 if (arm_feature(env, ARM_FEATURE_LPAE)) { 2205 raw_write(env, ri, value); 2206 } else if (arm_feature(env, ARM_FEATURE_V7)) { 2207 raw_write(env, ri, value & 0xfffff6ff); 2208 } else { 2209 raw_write(env, ri, value & 0xfffff1ff); 2210 } 2211 } 2212 2213 #ifndef CONFIG_USER_ONLY 2214 /* get_phys_addr() isn't present for user-mode-only targets */ 2215 2216 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 2217 bool isread) 2218 { 2219 if (ri->opc2 & 4) { 2220 /* The ATS12NSO* operations must trap to EL3 if executed in 2221 * Secure EL1 (which can only happen if EL3 is AArch64). 2222 * They are simply UNDEF if executed from NS EL1. 2223 * They function normally from EL2 or EL3. 2224 */ 2225 if (arm_current_el(env) == 1) { 2226 if (arm_is_secure_below_el3(env)) { 2227 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 2228 } 2229 return CP_ACCESS_TRAP_UNCATEGORIZED; 2230 } 2231 } 2232 return CP_ACCESS_OK; 2233 } 2234 2235 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 2236 MMUAccessType access_type, ARMMMUIdx mmu_idx) 2237 { 2238 hwaddr phys_addr; 2239 target_ulong page_size; 2240 int prot; 2241 bool ret; 2242 uint64_t par64; 2243 bool format64 = false; 2244 MemTxAttrs attrs = {}; 2245 ARMMMUFaultInfo fi = {}; 2246 ARMCacheAttrs cacheattrs = {}; 2247 2248 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 2249 &prot, &page_size, &fi, &cacheattrs); 2250 2251 if (is_a64(env)) { 2252 format64 = true; 2253 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 2254 /* 2255 * ATS1Cxx: 2256 * * TTBCR.EAE determines whether the result is returned using the 2257 * 32-bit or the 64-bit PAR format 2258 * * Instructions executed in Hyp mode always use the 64bit format 2259 * 2260 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 2261 * * The Non-secure TTBCR.EAE bit is set to 1 2262 * * The implementation includes EL2, and the value of HCR.VM is 1 2263 * 2264 * ATS1Hx always uses the 64bit format (not supported yet). 2265 */ 2266 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 2267 2268 if (arm_feature(env, ARM_FEATURE_EL2)) { 2269 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 2270 format64 |= env->cp15.hcr_el2 & HCR_VM; 2271 } else { 2272 format64 |= arm_current_el(env) == 2; 2273 } 2274 } 2275 } 2276 2277 if (format64) { 2278 /* Create a 64-bit PAR */ 2279 par64 = (1 << 11); /* LPAE bit always set */ 2280 if (!ret) { 2281 par64 |= phys_addr & ~0xfffULL; 2282 if (!attrs.secure) { 2283 par64 |= (1 << 9); /* NS */ 2284 } 2285 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 2286 par64 |= cacheattrs.shareability << 7; /* SH */ 2287 } else { 2288 uint32_t fsr = arm_fi_to_lfsc(&fi); 2289 2290 par64 |= 1; /* F */ 2291 par64 |= (fsr & 0x3f) << 1; /* FS */ 2292 /* Note that S2WLK and FSTAGE are always zero, because we don't 2293 * implement virtualization and therefore there can't be a stage 2 2294 * fault. 2295 */ 2296 } 2297 } else { 2298 /* fsr is a DFSR/IFSR value for the short descriptor 2299 * translation table format (with WnR always clear). 2300 * Convert it to a 32-bit PAR. 2301 */ 2302 if (!ret) { 2303 /* We do not set any attribute bits in the PAR */ 2304 if (page_size == (1 << 24) 2305 && arm_feature(env, ARM_FEATURE_V7)) { 2306 par64 = (phys_addr & 0xff000000) | (1 << 1); 2307 } else { 2308 par64 = phys_addr & 0xfffff000; 2309 } 2310 if (!attrs.secure) { 2311 par64 |= (1 << 9); /* NS */ 2312 } 2313 } else { 2314 uint32_t fsr = arm_fi_to_sfsc(&fi); 2315 2316 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 2317 ((fsr & 0xf) << 1) | 1; 2318 } 2319 } 2320 return par64; 2321 } 2322 2323 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2324 { 2325 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2326 uint64_t par64; 2327 ARMMMUIdx mmu_idx; 2328 int el = arm_current_el(env); 2329 bool secure = arm_is_secure_below_el3(env); 2330 2331 switch (ri->opc2 & 6) { 2332 case 0: 2333 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */ 2334 switch (el) { 2335 case 3: 2336 mmu_idx = ARMMMUIdx_S1E3; 2337 break; 2338 case 2: 2339 mmu_idx = ARMMMUIdx_S1NSE1; 2340 break; 2341 case 1: 2342 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 2343 break; 2344 default: 2345 g_assert_not_reached(); 2346 } 2347 break; 2348 case 2: 2349 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 2350 switch (el) { 2351 case 3: 2352 mmu_idx = ARMMMUIdx_S1SE0; 2353 break; 2354 case 2: 2355 mmu_idx = ARMMMUIdx_S1NSE0; 2356 break; 2357 case 1: 2358 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 2359 break; 2360 default: 2361 g_assert_not_reached(); 2362 } 2363 break; 2364 case 4: 2365 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 2366 mmu_idx = ARMMMUIdx_S12NSE1; 2367 break; 2368 case 6: 2369 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 2370 mmu_idx = ARMMMUIdx_S12NSE0; 2371 break; 2372 default: 2373 g_assert_not_reached(); 2374 } 2375 2376 par64 = do_ats_write(env, value, access_type, mmu_idx); 2377 2378 A32_BANKED_CURRENT_REG_SET(env, par, par64); 2379 } 2380 2381 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 2382 uint64_t value) 2383 { 2384 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2385 uint64_t par64; 2386 2387 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS); 2388 2389 A32_BANKED_CURRENT_REG_SET(env, par, par64); 2390 } 2391 2392 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 2393 bool isread) 2394 { 2395 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { 2396 return CP_ACCESS_TRAP; 2397 } 2398 return CP_ACCESS_OK; 2399 } 2400 2401 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 2402 uint64_t value) 2403 { 2404 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2405 ARMMMUIdx mmu_idx; 2406 int secure = arm_is_secure_below_el3(env); 2407 2408 switch (ri->opc2 & 6) { 2409 case 0: 2410 switch (ri->opc1) { 2411 case 0: /* AT S1E1R, AT S1E1W */ 2412 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 2413 break; 2414 case 4: /* AT S1E2R, AT S1E2W */ 2415 mmu_idx = ARMMMUIdx_S1E2; 2416 break; 2417 case 6: /* AT S1E3R, AT S1E3W */ 2418 mmu_idx = ARMMMUIdx_S1E3; 2419 break; 2420 default: 2421 g_assert_not_reached(); 2422 } 2423 break; 2424 case 2: /* AT S1E0R, AT S1E0W */ 2425 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 2426 break; 2427 case 4: /* AT S12E1R, AT S12E1W */ 2428 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1; 2429 break; 2430 case 6: /* AT S12E0R, AT S12E0W */ 2431 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0; 2432 break; 2433 default: 2434 g_assert_not_reached(); 2435 } 2436 2437 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 2438 } 2439 #endif 2440 2441 static const ARMCPRegInfo vapa_cp_reginfo[] = { 2442 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 2443 .access = PL1_RW, .resetvalue = 0, 2444 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 2445 offsetoflow32(CPUARMState, cp15.par_ns) }, 2446 .writefn = par_write }, 2447 #ifndef CONFIG_USER_ONLY 2448 /* This underdecoding is safe because the reginfo is NO_RAW. */ 2449 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 2450 .access = PL1_W, .accessfn = ats_access, 2451 .writefn = ats_write, .type = ARM_CP_NO_RAW }, 2452 #endif 2453 REGINFO_SENTINEL 2454 }; 2455 2456 /* Return basic MPU access permission bits. */ 2457 static uint32_t simple_mpu_ap_bits(uint32_t val) 2458 { 2459 uint32_t ret; 2460 uint32_t mask; 2461 int i; 2462 ret = 0; 2463 mask = 3; 2464 for (i = 0; i < 16; i += 2) { 2465 ret |= (val >> i) & mask; 2466 mask <<= 2; 2467 } 2468 return ret; 2469 } 2470 2471 /* Pad basic MPU access permission bits to extended format. */ 2472 static uint32_t extended_mpu_ap_bits(uint32_t val) 2473 { 2474 uint32_t ret; 2475 uint32_t mask; 2476 int i; 2477 ret = 0; 2478 mask = 3; 2479 for (i = 0; i < 16; i += 2) { 2480 ret |= (val & mask) << i; 2481 mask <<= 2; 2482 } 2483 return ret; 2484 } 2485 2486 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2487 uint64_t value) 2488 { 2489 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 2490 } 2491 2492 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2493 { 2494 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 2495 } 2496 2497 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2498 uint64_t value) 2499 { 2500 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 2501 } 2502 2503 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2504 { 2505 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 2506 } 2507 2508 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 2509 { 2510 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 2511 2512 if (!u32p) { 2513 return 0; 2514 } 2515 2516 u32p += env->pmsav7.rnr[M_REG_NS]; 2517 return *u32p; 2518 } 2519 2520 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 2521 uint64_t value) 2522 { 2523 ARMCPU *cpu = arm_env_get_cpu(env); 2524 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 2525 2526 if (!u32p) { 2527 return; 2528 } 2529 2530 u32p += env->pmsav7.rnr[M_REG_NS]; 2531 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 2532 *u32p = value; 2533 } 2534 2535 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2536 uint64_t value) 2537 { 2538 ARMCPU *cpu = arm_env_get_cpu(env); 2539 uint32_t nrgs = cpu->pmsav7_dregion; 2540 2541 if (value >= nrgs) { 2542 qemu_log_mask(LOG_GUEST_ERROR, 2543 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 2544 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 2545 return; 2546 } 2547 2548 raw_write(env, ri, value); 2549 } 2550 2551 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 2552 /* Reset for all these registers is handled in arm_cpu_reset(), 2553 * because the PMSAv7 is also used by M-profile CPUs, which do 2554 * not register cpregs but still need the state to be reset. 2555 */ 2556 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 2557 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2558 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 2559 .readfn = pmsav7_read, .writefn = pmsav7_write, 2560 .resetfn = arm_cp_reset_ignore }, 2561 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 2562 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2563 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 2564 .readfn = pmsav7_read, .writefn = pmsav7_write, 2565 .resetfn = arm_cp_reset_ignore }, 2566 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 2567 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2568 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 2569 .readfn = pmsav7_read, .writefn = pmsav7_write, 2570 .resetfn = arm_cp_reset_ignore }, 2571 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 2572 .access = PL1_RW, 2573 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 2574 .writefn = pmsav7_rgnr_write, 2575 .resetfn = arm_cp_reset_ignore }, 2576 REGINFO_SENTINEL 2577 }; 2578 2579 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 2580 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 2581 .access = PL1_RW, .type = ARM_CP_ALIAS, 2582 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 2583 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 2584 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 2585 .access = PL1_RW, .type = ARM_CP_ALIAS, 2586 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 2587 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 2588 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 2589 .access = PL1_RW, 2590 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 2591 .resetvalue = 0, }, 2592 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 2593 .access = PL1_RW, 2594 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 2595 .resetvalue = 0, }, 2596 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 2597 .access = PL1_RW, 2598 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 2599 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 2600 .access = PL1_RW, 2601 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 2602 /* Protection region base and size registers */ 2603 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 2604 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2605 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 2606 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 2607 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2608 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 2609 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 2610 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2611 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 2612 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 2613 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2614 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 2615 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 2616 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2617 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 2618 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 2619 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2620 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 2621 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 2622 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2623 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 2624 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 2625 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2626 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 2627 REGINFO_SENTINEL 2628 }; 2629 2630 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 2631 uint64_t value) 2632 { 2633 TCR *tcr = raw_ptr(env, ri); 2634 int maskshift = extract32(value, 0, 3); 2635 2636 if (!arm_feature(env, ARM_FEATURE_V8)) { 2637 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 2638 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 2639 * using Long-desciptor translation table format */ 2640 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 2641 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 2642 /* In an implementation that includes the Security Extensions 2643 * TTBCR has additional fields PD0 [4] and PD1 [5] for 2644 * Short-descriptor translation table format. 2645 */ 2646 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 2647 } else { 2648 value &= TTBCR_N; 2649 } 2650 } 2651 2652 /* Update the masks corresponding to the TCR bank being written 2653 * Note that we always calculate mask and base_mask, but 2654 * they are only used for short-descriptor tables (ie if EAE is 0); 2655 * for long-descriptor tables the TCR fields are used differently 2656 * and the mask and base_mask values are meaningless. 2657 */ 2658 tcr->raw_tcr = value; 2659 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 2660 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 2661 } 2662 2663 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2664 uint64_t value) 2665 { 2666 ARMCPU *cpu = arm_env_get_cpu(env); 2667 2668 if (arm_feature(env, ARM_FEATURE_LPAE)) { 2669 /* With LPAE the TTBCR could result in a change of ASID 2670 * via the TTBCR.A1 bit, so do a TLB flush. 2671 */ 2672 tlb_flush(CPU(cpu)); 2673 } 2674 vmsa_ttbcr_raw_write(env, ri, value); 2675 } 2676 2677 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2678 { 2679 TCR *tcr = raw_ptr(env, ri); 2680 2681 /* Reset both the TCR as well as the masks corresponding to the bank of 2682 * the TCR being reset. 2683 */ 2684 tcr->raw_tcr = 0; 2685 tcr->mask = 0; 2686 tcr->base_mask = 0xffffc000u; 2687 } 2688 2689 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, 2690 uint64_t value) 2691 { 2692 ARMCPU *cpu = arm_env_get_cpu(env); 2693 TCR *tcr = raw_ptr(env, ri); 2694 2695 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 2696 tlb_flush(CPU(cpu)); 2697 tcr->raw_tcr = value; 2698 } 2699 2700 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2701 uint64_t value) 2702 { 2703 /* 64 bit accesses to the TTBRs can change the ASID and so we 2704 * must flush the TLB. 2705 */ 2706 if (cpreg_field_is_64bit(ri)) { 2707 ARMCPU *cpu = arm_env_get_cpu(env); 2708 2709 tlb_flush(CPU(cpu)); 2710 } 2711 raw_write(env, ri, value); 2712 } 2713 2714 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2715 uint64_t value) 2716 { 2717 ARMCPU *cpu = arm_env_get_cpu(env); 2718 CPUState *cs = CPU(cpu); 2719 2720 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */ 2721 if (raw_read(env, ri) != value) { 2722 tlb_flush_by_mmuidx(cs, 2723 ARMMMUIdxBit_S12NSE1 | 2724 ARMMMUIdxBit_S12NSE0 | 2725 ARMMMUIdxBit_S2NS); 2726 raw_write(env, ri, value); 2727 } 2728 } 2729 2730 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 2731 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 2732 .access = PL1_RW, .type = ARM_CP_ALIAS, 2733 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 2734 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 2735 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 2736 .access = PL1_RW, .resetvalue = 0, 2737 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 2738 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 2739 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 2740 .access = PL1_RW, .resetvalue = 0, 2741 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 2742 offsetof(CPUARMState, cp15.dfar_ns) } }, 2743 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 2744 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 2745 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 2746 .resetvalue = 0, }, 2747 REGINFO_SENTINEL 2748 }; 2749 2750 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 2751 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 2752 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 2753 .access = PL1_RW, 2754 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 2755 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 2756 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 2757 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 2758 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 2759 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 2760 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 2761 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 2762 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 2763 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 2764 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 2765 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 2766 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 2767 .access = PL1_RW, .writefn = vmsa_tcr_el1_write, 2768 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 2769 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 2770 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 2771 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 2772 .raw_writefn = vmsa_ttbcr_raw_write, 2773 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), 2774 offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, 2775 REGINFO_SENTINEL 2776 }; 2777 2778 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 2779 uint64_t value) 2780 { 2781 env->cp15.c15_ticonfig = value & 0xe7; 2782 /* The OS_TYPE bit in this register changes the reported CPUID! */ 2783 env->cp15.c0_cpuid = (value & (1 << 5)) ? 2784 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 2785 } 2786 2787 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 2788 uint64_t value) 2789 { 2790 env->cp15.c15_threadid = value & 0xffff; 2791 } 2792 2793 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 2794 uint64_t value) 2795 { 2796 /* Wait-for-interrupt (deprecated) */ 2797 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT); 2798 } 2799 2800 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 2801 uint64_t value) 2802 { 2803 /* On OMAP there are registers indicating the max/min index of dcache lines 2804 * containing a dirty line; cache flush operations have to reset these. 2805 */ 2806 env->cp15.c15_i_max = 0x000; 2807 env->cp15.c15_i_min = 0xff0; 2808 } 2809 2810 static const ARMCPRegInfo omap_cp_reginfo[] = { 2811 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 2812 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 2813 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 2814 .resetvalue = 0, }, 2815 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 2816 .access = PL1_RW, .type = ARM_CP_NOP }, 2817 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 2818 .access = PL1_RW, 2819 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 2820 .writefn = omap_ticonfig_write }, 2821 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 2822 .access = PL1_RW, 2823 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 2824 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 2825 .access = PL1_RW, .resetvalue = 0xff0, 2826 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 2827 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 2828 .access = PL1_RW, 2829 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 2830 .writefn = omap_threadid_write }, 2831 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 2832 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 2833 .type = ARM_CP_NO_RAW, 2834 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 2835 /* TODO: Peripheral port remap register: 2836 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 2837 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 2838 * when MMU is off. 2839 */ 2840 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 2841 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 2842 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 2843 .writefn = omap_cachemaint_write }, 2844 { .name = "C9", .cp = 15, .crn = 9, 2845 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 2846 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 2847 REGINFO_SENTINEL 2848 }; 2849 2850 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 2851 uint64_t value) 2852 { 2853 env->cp15.c15_cpar = value & 0x3fff; 2854 } 2855 2856 static const ARMCPRegInfo xscale_cp_reginfo[] = { 2857 { .name = "XSCALE_CPAR", 2858 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 2859 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 2860 .writefn = xscale_cpar_write, }, 2861 { .name = "XSCALE_AUXCR", 2862 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 2863 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 2864 .resetvalue = 0, }, 2865 /* XScale specific cache-lockdown: since we have no cache we NOP these 2866 * and hope the guest does not really rely on cache behaviour. 2867 */ 2868 { .name = "XSCALE_LOCK_ICACHE_LINE", 2869 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 2870 .access = PL1_W, .type = ARM_CP_NOP }, 2871 { .name = "XSCALE_UNLOCK_ICACHE", 2872 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 2873 .access = PL1_W, .type = ARM_CP_NOP }, 2874 { .name = "XSCALE_DCACHE_LOCK", 2875 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 2876 .access = PL1_RW, .type = ARM_CP_NOP }, 2877 { .name = "XSCALE_UNLOCK_DCACHE", 2878 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 2879 .access = PL1_W, .type = ARM_CP_NOP }, 2880 REGINFO_SENTINEL 2881 }; 2882 2883 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 2884 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 2885 * implementation of this implementation-defined space. 2886 * Ideally this should eventually disappear in favour of actually 2887 * implementing the correct behaviour for all cores. 2888 */ 2889 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 2890 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 2891 .access = PL1_RW, 2892 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 2893 .resetvalue = 0 }, 2894 REGINFO_SENTINEL 2895 }; 2896 2897 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 2898 /* Cache status: RAZ because we have no cache so it's always clean */ 2899 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 2900 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2901 .resetvalue = 0 }, 2902 REGINFO_SENTINEL 2903 }; 2904 2905 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 2906 /* We never have a a block transfer operation in progress */ 2907 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 2908 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2909 .resetvalue = 0 }, 2910 /* The cache ops themselves: these all NOP for QEMU */ 2911 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 2912 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2913 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 2914 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2915 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 2916 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2917 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 2918 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2919 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 2920 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2921 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 2922 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2923 REGINFO_SENTINEL 2924 }; 2925 2926 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 2927 /* The cache test-and-clean instructions always return (1 << 30) 2928 * to indicate that there are no dirty cache lines. 2929 */ 2930 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 2931 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2932 .resetvalue = (1 << 30) }, 2933 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 2934 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2935 .resetvalue = (1 << 30) }, 2936 REGINFO_SENTINEL 2937 }; 2938 2939 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 2940 /* Ignore ReadBuffer accesses */ 2941 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 2942 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 2943 .access = PL1_RW, .resetvalue = 0, 2944 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 2945 REGINFO_SENTINEL 2946 }; 2947 2948 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2949 { 2950 ARMCPU *cpu = arm_env_get_cpu(env); 2951 unsigned int cur_el = arm_current_el(env); 2952 bool secure = arm_is_secure(env); 2953 2954 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 2955 return env->cp15.vpidr_el2; 2956 } 2957 return raw_read(env, ri); 2958 } 2959 2960 static uint64_t mpidr_read_val(CPUARMState *env) 2961 { 2962 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env)); 2963 uint64_t mpidr = cpu->mp_affinity; 2964 2965 if (arm_feature(env, ARM_FEATURE_V7MP)) { 2966 mpidr |= (1U << 31); 2967 /* Cores which are uniprocessor (non-coherent) 2968 * but still implement the MP extensions set 2969 * bit 30. (For instance, Cortex-R5). 2970 */ 2971 if (cpu->mp_is_up) { 2972 mpidr |= (1u << 30); 2973 } 2974 } 2975 return mpidr; 2976 } 2977 2978 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2979 { 2980 unsigned int cur_el = arm_current_el(env); 2981 bool secure = arm_is_secure(env); 2982 2983 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 2984 return env->cp15.vmpidr_el2; 2985 } 2986 return mpidr_read_val(env); 2987 } 2988 2989 static const ARMCPRegInfo mpidr_cp_reginfo[] = { 2990 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH, 2991 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 2992 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 2993 REGINFO_SENTINEL 2994 }; 2995 2996 static const ARMCPRegInfo lpae_cp_reginfo[] = { 2997 /* NOP AMAIR0/1 */ 2998 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 2999 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 3000 .access = PL1_RW, .type = ARM_CP_CONST, 3001 .resetvalue = 0 }, 3002 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 3003 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 3004 .access = PL1_RW, .type = ARM_CP_CONST, 3005 .resetvalue = 0 }, 3006 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 3007 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 3008 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 3009 offsetof(CPUARMState, cp15.par_ns)} }, 3010 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 3011 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3012 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 3013 offsetof(CPUARMState, cp15.ttbr0_ns) }, 3014 .writefn = vmsa_ttbr_write, }, 3015 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 3016 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3017 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 3018 offsetof(CPUARMState, cp15.ttbr1_ns) }, 3019 .writefn = vmsa_ttbr_write, }, 3020 REGINFO_SENTINEL 3021 }; 3022 3023 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3024 { 3025 return vfp_get_fpcr(env); 3026 } 3027 3028 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3029 uint64_t value) 3030 { 3031 vfp_set_fpcr(env, value); 3032 } 3033 3034 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3035 { 3036 return vfp_get_fpsr(env); 3037 } 3038 3039 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3040 uint64_t value) 3041 { 3042 vfp_set_fpsr(env, value); 3043 } 3044 3045 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 3046 bool isread) 3047 { 3048 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) { 3049 return CP_ACCESS_TRAP; 3050 } 3051 return CP_ACCESS_OK; 3052 } 3053 3054 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 3055 uint64_t value) 3056 { 3057 env->daif = value & PSTATE_DAIF; 3058 } 3059 3060 static CPAccessResult aa64_cacheop_access(CPUARMState *env, 3061 const ARMCPRegInfo *ri, 3062 bool isread) 3063 { 3064 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless 3065 * SCTLR_EL1.UCI is set. 3066 */ 3067 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) { 3068 return CP_ACCESS_TRAP; 3069 } 3070 return CP_ACCESS_OK; 3071 } 3072 3073 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 3074 * Page D4-1736 (DDI0487A.b) 3075 */ 3076 3077 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3078 uint64_t value) 3079 { 3080 CPUState *cs = ENV_GET_CPU(env); 3081 3082 if (arm_is_secure_below_el3(env)) { 3083 tlb_flush_by_mmuidx(cs, 3084 ARMMMUIdxBit_S1SE1 | 3085 ARMMMUIdxBit_S1SE0); 3086 } else { 3087 tlb_flush_by_mmuidx(cs, 3088 ARMMMUIdxBit_S12NSE1 | 3089 ARMMMUIdxBit_S12NSE0); 3090 } 3091 } 3092 3093 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3094 uint64_t value) 3095 { 3096 CPUState *cs = ENV_GET_CPU(env); 3097 bool sec = arm_is_secure_below_el3(env); 3098 3099 if (sec) { 3100 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3101 ARMMMUIdxBit_S1SE1 | 3102 ARMMMUIdxBit_S1SE0); 3103 } else { 3104 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3105 ARMMMUIdxBit_S12NSE1 | 3106 ARMMMUIdxBit_S12NSE0); 3107 } 3108 } 3109 3110 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3111 uint64_t value) 3112 { 3113 /* Note that the 'ALL' scope must invalidate both stage 1 and 3114 * stage 2 translations, whereas most other scopes only invalidate 3115 * stage 1 translations. 3116 */ 3117 ARMCPU *cpu = arm_env_get_cpu(env); 3118 CPUState *cs = CPU(cpu); 3119 3120 if (arm_is_secure_below_el3(env)) { 3121 tlb_flush_by_mmuidx(cs, 3122 ARMMMUIdxBit_S1SE1 | 3123 ARMMMUIdxBit_S1SE0); 3124 } else { 3125 if (arm_feature(env, ARM_FEATURE_EL2)) { 3126 tlb_flush_by_mmuidx(cs, 3127 ARMMMUIdxBit_S12NSE1 | 3128 ARMMMUIdxBit_S12NSE0 | 3129 ARMMMUIdxBit_S2NS); 3130 } else { 3131 tlb_flush_by_mmuidx(cs, 3132 ARMMMUIdxBit_S12NSE1 | 3133 ARMMMUIdxBit_S12NSE0); 3134 } 3135 } 3136 } 3137 3138 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3139 uint64_t value) 3140 { 3141 ARMCPU *cpu = arm_env_get_cpu(env); 3142 CPUState *cs = CPU(cpu); 3143 3144 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 3145 } 3146 3147 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3148 uint64_t value) 3149 { 3150 ARMCPU *cpu = arm_env_get_cpu(env); 3151 CPUState *cs = CPU(cpu); 3152 3153 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3); 3154 } 3155 3156 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3157 uint64_t value) 3158 { 3159 /* Note that the 'ALL' scope must invalidate both stage 1 and 3160 * stage 2 translations, whereas most other scopes only invalidate 3161 * stage 1 translations. 3162 */ 3163 CPUState *cs = ENV_GET_CPU(env); 3164 bool sec = arm_is_secure_below_el3(env); 3165 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); 3166 3167 if (sec) { 3168 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3169 ARMMMUIdxBit_S1SE1 | 3170 ARMMMUIdxBit_S1SE0); 3171 } else if (has_el2) { 3172 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3173 ARMMMUIdxBit_S12NSE1 | 3174 ARMMMUIdxBit_S12NSE0 | 3175 ARMMMUIdxBit_S2NS); 3176 } else { 3177 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3178 ARMMMUIdxBit_S12NSE1 | 3179 ARMMMUIdxBit_S12NSE0); 3180 } 3181 } 3182 3183 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3184 uint64_t value) 3185 { 3186 CPUState *cs = ENV_GET_CPU(env); 3187 3188 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 3189 } 3190 3191 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3192 uint64_t value) 3193 { 3194 CPUState *cs = ENV_GET_CPU(env); 3195 3196 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3); 3197 } 3198 3199 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3200 uint64_t value) 3201 { 3202 /* Invalidate by VA, EL1&0 (AArch64 version). 3203 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 3204 * since we don't support flush-for-specific-ASID-only or 3205 * flush-last-level-only. 3206 */ 3207 ARMCPU *cpu = arm_env_get_cpu(env); 3208 CPUState *cs = CPU(cpu); 3209 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3210 3211 if (arm_is_secure_below_el3(env)) { 3212 tlb_flush_page_by_mmuidx(cs, pageaddr, 3213 ARMMMUIdxBit_S1SE1 | 3214 ARMMMUIdxBit_S1SE0); 3215 } else { 3216 tlb_flush_page_by_mmuidx(cs, pageaddr, 3217 ARMMMUIdxBit_S12NSE1 | 3218 ARMMMUIdxBit_S12NSE0); 3219 } 3220 } 3221 3222 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3223 uint64_t value) 3224 { 3225 /* Invalidate by VA, EL2 3226 * Currently handles both VAE2 and VALE2, since we don't support 3227 * flush-last-level-only. 3228 */ 3229 ARMCPU *cpu = arm_env_get_cpu(env); 3230 CPUState *cs = CPU(cpu); 3231 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3232 3233 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 3234 } 3235 3236 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3237 uint64_t value) 3238 { 3239 /* Invalidate by VA, EL3 3240 * Currently handles both VAE3 and VALE3, since we don't support 3241 * flush-last-level-only. 3242 */ 3243 ARMCPU *cpu = arm_env_get_cpu(env); 3244 CPUState *cs = CPU(cpu); 3245 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3246 3247 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3); 3248 } 3249 3250 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3251 uint64_t value) 3252 { 3253 ARMCPU *cpu = arm_env_get_cpu(env); 3254 CPUState *cs = CPU(cpu); 3255 bool sec = arm_is_secure_below_el3(env); 3256 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3257 3258 if (sec) { 3259 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3260 ARMMMUIdxBit_S1SE1 | 3261 ARMMMUIdxBit_S1SE0); 3262 } else { 3263 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3264 ARMMMUIdxBit_S12NSE1 | 3265 ARMMMUIdxBit_S12NSE0); 3266 } 3267 } 3268 3269 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3270 uint64_t value) 3271 { 3272 CPUState *cs = ENV_GET_CPU(env); 3273 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3274 3275 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3276 ARMMMUIdxBit_S1E2); 3277 } 3278 3279 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3280 uint64_t value) 3281 { 3282 CPUState *cs = ENV_GET_CPU(env); 3283 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3284 3285 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3286 ARMMMUIdxBit_S1E3); 3287 } 3288 3289 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3290 uint64_t value) 3291 { 3292 /* Invalidate by IPA. This has to invalidate any structures that 3293 * contain only stage 2 translation information, but does not need 3294 * to apply to structures that contain combined stage 1 and stage 2 3295 * translation information. 3296 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 3297 */ 3298 ARMCPU *cpu = arm_env_get_cpu(env); 3299 CPUState *cs = CPU(cpu); 3300 uint64_t pageaddr; 3301 3302 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 3303 return; 3304 } 3305 3306 pageaddr = sextract64(value << 12, 0, 48); 3307 3308 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 3309 } 3310 3311 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3312 uint64_t value) 3313 { 3314 CPUState *cs = ENV_GET_CPU(env); 3315 uint64_t pageaddr; 3316 3317 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 3318 return; 3319 } 3320 3321 pageaddr = sextract64(value << 12, 0, 48); 3322 3323 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3324 ARMMMUIdxBit_S2NS); 3325 } 3326 3327 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 3328 bool isread) 3329 { 3330 /* We don't implement EL2, so the only control on DC ZVA is the 3331 * bit in the SCTLR which can prohibit access for EL0. 3332 */ 3333 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 3334 return CP_ACCESS_TRAP; 3335 } 3336 return CP_ACCESS_OK; 3337 } 3338 3339 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 3340 { 3341 ARMCPU *cpu = arm_env_get_cpu(env); 3342 int dzp_bit = 1 << 4; 3343 3344 /* DZP indicates whether DC ZVA access is allowed */ 3345 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 3346 dzp_bit = 0; 3347 } 3348 return cpu->dcz_blocksize | dzp_bit; 3349 } 3350 3351 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 3352 bool isread) 3353 { 3354 if (!(env->pstate & PSTATE_SP)) { 3355 /* Access to SP_EL0 is undefined if it's being used as 3356 * the stack pointer. 3357 */ 3358 return CP_ACCESS_TRAP_UNCATEGORIZED; 3359 } 3360 return CP_ACCESS_OK; 3361 } 3362 3363 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 3364 { 3365 return env->pstate & PSTATE_SP; 3366 } 3367 3368 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 3369 { 3370 update_spsel(env, val); 3371 } 3372 3373 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3374 uint64_t value) 3375 { 3376 ARMCPU *cpu = arm_env_get_cpu(env); 3377 3378 if (raw_read(env, ri) == value) { 3379 /* Skip the TLB flush if nothing actually changed; Linux likes 3380 * to do a lot of pointless SCTLR writes. 3381 */ 3382 return; 3383 } 3384 3385 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 3386 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 3387 value &= ~SCTLR_M; 3388 } 3389 3390 raw_write(env, ri, value); 3391 /* ??? Lots of these bits are not implemented. */ 3392 /* This may enable/disable the MMU, so do a TLB flush. */ 3393 tlb_flush(CPU(cpu)); 3394 } 3395 3396 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, 3397 bool isread) 3398 { 3399 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { 3400 return CP_ACCESS_TRAP_FP_EL2; 3401 } 3402 if (env->cp15.cptr_el[3] & CPTR_TFP) { 3403 return CP_ACCESS_TRAP_FP_EL3; 3404 } 3405 return CP_ACCESS_OK; 3406 } 3407 3408 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3409 uint64_t value) 3410 { 3411 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 3412 } 3413 3414 static const ARMCPRegInfo v8_cp_reginfo[] = { 3415 /* Minimal set of EL0-visible registers. This will need to be expanded 3416 * significantly for system emulation of AArch64 CPUs. 3417 */ 3418 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 3419 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 3420 .access = PL0_RW, .type = ARM_CP_NZCV }, 3421 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 3422 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 3423 .type = ARM_CP_NO_RAW, 3424 .access = PL0_RW, .accessfn = aa64_daif_access, 3425 .fieldoffset = offsetof(CPUARMState, daif), 3426 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 3427 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 3428 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 3429 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 3430 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 3431 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 3432 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 3433 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 3434 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 3435 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 3436 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 3437 .access = PL0_R, .type = ARM_CP_NO_RAW, 3438 .readfn = aa64_dczid_read }, 3439 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 3440 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 3441 .access = PL0_W, .type = ARM_CP_DC_ZVA, 3442 #ifndef CONFIG_USER_ONLY 3443 /* Avoid overhead of an access check that always passes in user-mode */ 3444 .accessfn = aa64_zva_access, 3445 #endif 3446 }, 3447 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 3448 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 3449 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 3450 /* Cache ops: all NOPs since we don't emulate caches */ 3451 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 3452 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 3453 .access = PL1_W, .type = ARM_CP_NOP }, 3454 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 3455 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 3456 .access = PL1_W, .type = ARM_CP_NOP }, 3457 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 3458 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 3459 .access = PL0_W, .type = ARM_CP_NOP, 3460 .accessfn = aa64_cacheop_access }, 3461 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 3462 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 3463 .access = PL1_W, .type = ARM_CP_NOP }, 3464 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 3465 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 3466 .access = PL1_W, .type = ARM_CP_NOP }, 3467 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 3468 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 3469 .access = PL0_W, .type = ARM_CP_NOP, 3470 .accessfn = aa64_cacheop_access }, 3471 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 3472 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 3473 .access = PL1_W, .type = ARM_CP_NOP }, 3474 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 3475 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 3476 .access = PL0_W, .type = ARM_CP_NOP, 3477 .accessfn = aa64_cacheop_access }, 3478 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 3479 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 3480 .access = PL0_W, .type = ARM_CP_NOP, 3481 .accessfn = aa64_cacheop_access }, 3482 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 3483 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 3484 .access = PL1_W, .type = ARM_CP_NOP }, 3485 /* TLBI operations */ 3486 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 3487 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 3488 .access = PL1_W, .type = ARM_CP_NO_RAW, 3489 .writefn = tlbi_aa64_vmalle1is_write }, 3490 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 3491 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 3492 .access = PL1_W, .type = ARM_CP_NO_RAW, 3493 .writefn = tlbi_aa64_vae1is_write }, 3494 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 3495 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 3496 .access = PL1_W, .type = ARM_CP_NO_RAW, 3497 .writefn = tlbi_aa64_vmalle1is_write }, 3498 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 3499 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 3500 .access = PL1_W, .type = ARM_CP_NO_RAW, 3501 .writefn = tlbi_aa64_vae1is_write }, 3502 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 3503 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 3504 .access = PL1_W, .type = ARM_CP_NO_RAW, 3505 .writefn = tlbi_aa64_vae1is_write }, 3506 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 3507 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 3508 .access = PL1_W, .type = ARM_CP_NO_RAW, 3509 .writefn = tlbi_aa64_vae1is_write }, 3510 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 3511 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 3512 .access = PL1_W, .type = ARM_CP_NO_RAW, 3513 .writefn = tlbi_aa64_vmalle1_write }, 3514 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 3515 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 3516 .access = PL1_W, .type = ARM_CP_NO_RAW, 3517 .writefn = tlbi_aa64_vae1_write }, 3518 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 3519 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 3520 .access = PL1_W, .type = ARM_CP_NO_RAW, 3521 .writefn = tlbi_aa64_vmalle1_write }, 3522 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 3523 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 3524 .access = PL1_W, .type = ARM_CP_NO_RAW, 3525 .writefn = tlbi_aa64_vae1_write }, 3526 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 3527 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 3528 .access = PL1_W, .type = ARM_CP_NO_RAW, 3529 .writefn = tlbi_aa64_vae1_write }, 3530 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 3531 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 3532 .access = PL1_W, .type = ARM_CP_NO_RAW, 3533 .writefn = tlbi_aa64_vae1_write }, 3534 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 3535 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 3536 .access = PL2_W, .type = ARM_CP_NO_RAW, 3537 .writefn = tlbi_aa64_ipas2e1is_write }, 3538 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 3539 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 3540 .access = PL2_W, .type = ARM_CP_NO_RAW, 3541 .writefn = tlbi_aa64_ipas2e1is_write }, 3542 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 3543 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 3544 .access = PL2_W, .type = ARM_CP_NO_RAW, 3545 .writefn = tlbi_aa64_alle1is_write }, 3546 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 3547 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 3548 .access = PL2_W, .type = ARM_CP_NO_RAW, 3549 .writefn = tlbi_aa64_alle1is_write }, 3550 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 3551 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 3552 .access = PL2_W, .type = ARM_CP_NO_RAW, 3553 .writefn = tlbi_aa64_ipas2e1_write }, 3554 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 3555 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 3556 .access = PL2_W, .type = ARM_CP_NO_RAW, 3557 .writefn = tlbi_aa64_ipas2e1_write }, 3558 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 3559 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 3560 .access = PL2_W, .type = ARM_CP_NO_RAW, 3561 .writefn = tlbi_aa64_alle1_write }, 3562 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 3563 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 3564 .access = PL2_W, .type = ARM_CP_NO_RAW, 3565 .writefn = tlbi_aa64_alle1is_write }, 3566 #ifndef CONFIG_USER_ONLY 3567 /* 64 bit address translation operations */ 3568 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 3569 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 3570 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3571 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 3572 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 3573 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3574 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 3575 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 3576 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3577 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 3578 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 3579 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3580 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 3581 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 3582 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3583 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 3584 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 3585 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3586 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 3587 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 3588 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3589 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 3590 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 3591 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3592 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 3593 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 3594 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 3595 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3596 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 3597 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 3598 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3599 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 3600 .type = ARM_CP_ALIAS, 3601 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 3602 .access = PL1_RW, .resetvalue = 0, 3603 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 3604 .writefn = par_write }, 3605 #endif 3606 /* TLB invalidate last level of translation table walk */ 3607 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 3608 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 3609 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 3610 .type = ARM_CP_NO_RAW, .access = PL1_W, 3611 .writefn = tlbimvaa_is_write }, 3612 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 3613 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 3614 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 3615 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 3616 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 3617 .type = ARM_CP_NO_RAW, .access = PL2_W, 3618 .writefn = tlbimva_hyp_write }, 3619 { .name = "TLBIMVALHIS", 3620 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 3621 .type = ARM_CP_NO_RAW, .access = PL2_W, 3622 .writefn = tlbimva_hyp_is_write }, 3623 { .name = "TLBIIPAS2", 3624 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 3625 .type = ARM_CP_NO_RAW, .access = PL2_W, 3626 .writefn = tlbiipas2_write }, 3627 { .name = "TLBIIPAS2IS", 3628 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 3629 .type = ARM_CP_NO_RAW, .access = PL2_W, 3630 .writefn = tlbiipas2_is_write }, 3631 { .name = "TLBIIPAS2L", 3632 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 3633 .type = ARM_CP_NO_RAW, .access = PL2_W, 3634 .writefn = tlbiipas2_write }, 3635 { .name = "TLBIIPAS2LIS", 3636 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 3637 .type = ARM_CP_NO_RAW, .access = PL2_W, 3638 .writefn = tlbiipas2_is_write }, 3639 /* 32 bit cache operations */ 3640 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 3641 .type = ARM_CP_NOP, .access = PL1_W }, 3642 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 3643 .type = ARM_CP_NOP, .access = PL1_W }, 3644 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 3645 .type = ARM_CP_NOP, .access = PL1_W }, 3646 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 3647 .type = ARM_CP_NOP, .access = PL1_W }, 3648 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 3649 .type = ARM_CP_NOP, .access = PL1_W }, 3650 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 3651 .type = ARM_CP_NOP, .access = PL1_W }, 3652 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 3653 .type = ARM_CP_NOP, .access = PL1_W }, 3654 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 3655 .type = ARM_CP_NOP, .access = PL1_W }, 3656 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 3657 .type = ARM_CP_NOP, .access = PL1_W }, 3658 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 3659 .type = ARM_CP_NOP, .access = PL1_W }, 3660 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 3661 .type = ARM_CP_NOP, .access = PL1_W }, 3662 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 3663 .type = ARM_CP_NOP, .access = PL1_W }, 3664 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 3665 .type = ARM_CP_NOP, .access = PL1_W }, 3666 /* MMU Domain access control / MPU write buffer control */ 3667 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 3668 .access = PL1_RW, .resetvalue = 0, 3669 .writefn = dacr_write, .raw_writefn = raw_write, 3670 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 3671 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 3672 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 3673 .type = ARM_CP_ALIAS, 3674 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 3675 .access = PL1_RW, 3676 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 3677 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 3678 .type = ARM_CP_ALIAS, 3679 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 3680 .access = PL1_RW, 3681 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 3682 /* We rely on the access checks not allowing the guest to write to the 3683 * state field when SPSel indicates that it's being used as the stack 3684 * pointer. 3685 */ 3686 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 3687 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 3688 .access = PL1_RW, .accessfn = sp_el0_access, 3689 .type = ARM_CP_ALIAS, 3690 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 3691 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 3692 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 3693 .access = PL2_RW, .type = ARM_CP_ALIAS, 3694 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 3695 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 3696 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 3697 .type = ARM_CP_NO_RAW, 3698 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 3699 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 3700 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 3701 .type = ARM_CP_ALIAS, 3702 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), 3703 .access = PL2_RW, .accessfn = fpexc32_access }, 3704 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 3705 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 3706 .access = PL2_RW, .resetvalue = 0, 3707 .writefn = dacr_write, .raw_writefn = raw_write, 3708 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 3709 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 3710 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 3711 .access = PL2_RW, .resetvalue = 0, 3712 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 3713 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 3714 .type = ARM_CP_ALIAS, 3715 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 3716 .access = PL2_RW, 3717 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 3718 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 3719 .type = ARM_CP_ALIAS, 3720 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 3721 .access = PL2_RW, 3722 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 3723 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 3724 .type = ARM_CP_ALIAS, 3725 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 3726 .access = PL2_RW, 3727 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 3728 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 3729 .type = ARM_CP_ALIAS, 3730 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 3731 .access = PL2_RW, 3732 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 3733 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 3734 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 3735 .resetvalue = 0, 3736 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 3737 { .name = "SDCR", .type = ARM_CP_ALIAS, 3738 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 3739 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 3740 .writefn = sdcr_write, 3741 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 3742 REGINFO_SENTINEL 3743 }; 3744 3745 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 3746 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 3747 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64, 3748 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 3749 .access = PL2_RW, 3750 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 3751 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 3752 .type = ARM_CP_NO_RAW, 3753 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 3754 .access = PL2_RW, 3755 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 3756 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 3757 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 3758 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3759 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 3760 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 3761 .access = PL2_RW, .type = ARM_CP_CONST, 3762 .resetvalue = 0 }, 3763 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3764 .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 3765 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3766 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 3767 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 3768 .access = PL2_RW, .type = ARM_CP_CONST, 3769 .resetvalue = 0 }, 3770 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3771 .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 3772 .access = PL2_RW, .type = ARM_CP_CONST, 3773 .resetvalue = 0 }, 3774 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 3775 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 3776 .access = PL2_RW, .type = ARM_CP_CONST, 3777 .resetvalue = 0 }, 3778 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 3779 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 3780 .access = PL2_RW, .type = ARM_CP_CONST, 3781 .resetvalue = 0 }, 3782 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 3783 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 3784 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3785 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 3786 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3787 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 3788 .type = ARM_CP_CONST, .resetvalue = 0 }, 3789 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 3790 .cp = 15, .opc1 = 6, .crm = 2, 3791 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3792 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 3793 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 3794 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 3795 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3796 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 3797 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 3798 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3799 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 3800 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 3801 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3802 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 3803 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 3804 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3805 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 3806 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3807 .resetvalue = 0 }, 3808 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 3809 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 3810 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3811 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 3812 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 3813 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3814 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 3815 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3816 .resetvalue = 0 }, 3817 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 3818 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 3819 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3820 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 3821 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3822 .resetvalue = 0 }, 3823 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 3824 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 3825 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3826 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 3827 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 3828 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3829 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 3830 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 3831 .access = PL2_RW, .accessfn = access_tda, 3832 .type = ARM_CP_CONST, .resetvalue = 0 }, 3833 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 3834 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 3835 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 3836 .type = ARM_CP_CONST, .resetvalue = 0 }, 3837 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 3838 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 3839 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3840 REGINFO_SENTINEL 3841 }; 3842 3843 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3844 { 3845 ARMCPU *cpu = arm_env_get_cpu(env); 3846 uint64_t valid_mask = HCR_MASK; 3847 3848 if (arm_feature(env, ARM_FEATURE_EL3)) { 3849 valid_mask &= ~HCR_HCD; 3850 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 3851 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 3852 * However, if we're using the SMC PSCI conduit then QEMU is 3853 * effectively acting like EL3 firmware and so the guest at 3854 * EL2 should retain the ability to prevent EL1 from being 3855 * able to make SMC calls into the ersatz firmware, so in 3856 * that case HCR.TSC should be read/write. 3857 */ 3858 valid_mask &= ~HCR_TSC; 3859 } 3860 3861 /* Clear RES0 bits. */ 3862 value &= valid_mask; 3863 3864 /* These bits change the MMU setup: 3865 * HCR_VM enables stage 2 translation 3866 * HCR_PTW forbids certain page-table setups 3867 * HCR_DC Disables stage1 and enables stage2 translation 3868 */ 3869 if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { 3870 tlb_flush(CPU(cpu)); 3871 } 3872 raw_write(env, ri, value); 3873 } 3874 3875 static const ARMCPRegInfo el2_cp_reginfo[] = { 3876 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 3877 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 3878 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 3879 .writefn = hcr_write }, 3880 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 3881 .type = ARM_CP_ALIAS, 3882 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 3883 .access = PL2_RW, 3884 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 3885 { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64, 3886 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 3887 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 3888 { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64, 3889 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 3890 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 3891 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 3892 .type = ARM_CP_ALIAS, 3893 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 3894 .access = PL2_RW, 3895 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 3896 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64, 3897 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 3898 .access = PL2_RW, .writefn = vbar_write, 3899 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 3900 .resetvalue = 0 }, 3901 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 3902 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 3903 .access = PL3_RW, .type = ARM_CP_ALIAS, 3904 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 3905 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 3906 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 3907 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 3908 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) }, 3909 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 3910 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 3911 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 3912 .resetvalue = 0 }, 3913 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3914 .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 3915 .access = PL2_RW, .type = ARM_CP_ALIAS, 3916 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 3917 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 3918 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 3919 .access = PL2_RW, .type = ARM_CP_CONST, 3920 .resetvalue = 0 }, 3921 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 3922 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3923 .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 3924 .access = PL2_RW, .type = ARM_CP_CONST, 3925 .resetvalue = 0 }, 3926 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 3927 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 3928 .access = PL2_RW, .type = ARM_CP_CONST, 3929 .resetvalue = 0 }, 3930 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 3931 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 3932 .access = PL2_RW, .type = ARM_CP_CONST, 3933 .resetvalue = 0 }, 3934 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 3935 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 3936 .access = PL2_RW, 3937 /* no .writefn needed as this can't cause an ASID change; 3938 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 3939 */ 3940 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 3941 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 3942 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3943 .type = ARM_CP_ALIAS, 3944 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3945 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 3946 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 3947 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3948 .access = PL2_RW, 3949 /* no .writefn needed as this can't cause an ASID change; 3950 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 3951 */ 3952 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 3953 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 3954 .cp = 15, .opc1 = 6, .crm = 2, 3955 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3956 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3957 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 3958 .writefn = vttbr_write }, 3959 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 3960 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 3961 .access = PL2_RW, .writefn = vttbr_write, 3962 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 3963 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 3964 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 3965 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 3966 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 3967 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 3968 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 3969 .access = PL2_RW, .resetvalue = 0, 3970 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 3971 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 3972 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 3973 .access = PL2_RW, .resetvalue = 0, 3974 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 3975 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 3976 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3977 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 3978 { .name = "TLBIALLNSNH", 3979 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 3980 .type = ARM_CP_NO_RAW, .access = PL2_W, 3981 .writefn = tlbiall_nsnh_write }, 3982 { .name = "TLBIALLNSNHIS", 3983 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 3984 .type = ARM_CP_NO_RAW, .access = PL2_W, 3985 .writefn = tlbiall_nsnh_is_write }, 3986 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 3987 .type = ARM_CP_NO_RAW, .access = PL2_W, 3988 .writefn = tlbiall_hyp_write }, 3989 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 3990 .type = ARM_CP_NO_RAW, .access = PL2_W, 3991 .writefn = tlbiall_hyp_is_write }, 3992 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 3993 .type = ARM_CP_NO_RAW, .access = PL2_W, 3994 .writefn = tlbimva_hyp_write }, 3995 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 3996 .type = ARM_CP_NO_RAW, .access = PL2_W, 3997 .writefn = tlbimva_hyp_is_write }, 3998 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 3999 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 4000 .type = ARM_CP_NO_RAW, .access = PL2_W, 4001 .writefn = tlbi_aa64_alle2_write }, 4002 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 4003 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 4004 .type = ARM_CP_NO_RAW, .access = PL2_W, 4005 .writefn = tlbi_aa64_vae2_write }, 4006 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 4007 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 4008 .access = PL2_W, .type = ARM_CP_NO_RAW, 4009 .writefn = tlbi_aa64_vae2_write }, 4010 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 4011 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 4012 .access = PL2_W, .type = ARM_CP_NO_RAW, 4013 .writefn = tlbi_aa64_alle2is_write }, 4014 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 4015 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 4016 .type = ARM_CP_NO_RAW, .access = PL2_W, 4017 .writefn = tlbi_aa64_vae2is_write }, 4018 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 4019 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 4020 .access = PL2_W, .type = ARM_CP_NO_RAW, 4021 .writefn = tlbi_aa64_vae2is_write }, 4022 #ifndef CONFIG_USER_ONLY 4023 /* Unlike the other EL2-related AT operations, these must 4024 * UNDEF from EL3 if EL2 is not implemented, which is why we 4025 * define them here rather than with the rest of the AT ops. 4026 */ 4027 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 4028 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 4029 .access = PL2_W, .accessfn = at_s1e2_access, 4030 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4031 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 4032 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 4033 .access = PL2_W, .accessfn = at_s1e2_access, 4034 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4035 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 4036 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 4037 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 4038 * to behave as if SCR.NS was 1. 4039 */ 4040 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 4041 .access = PL2_W, 4042 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 4043 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 4044 .access = PL2_W, 4045 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 4046 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 4047 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 4048 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 4049 * reset values as IMPDEF. We choose to reset to 3 to comply with 4050 * both ARMv7 and ARMv8. 4051 */ 4052 .access = PL2_RW, .resetvalue = 3, 4053 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 4054 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 4055 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 4056 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 4057 .writefn = gt_cntvoff_write, 4058 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 4059 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 4060 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 4061 .writefn = gt_cntvoff_write, 4062 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 4063 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 4064 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 4065 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 4066 .type = ARM_CP_IO, .access = PL2_RW, 4067 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4068 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 4069 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 4070 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 4071 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4072 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 4073 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 4074 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 4075 .resetfn = gt_hyp_timer_reset, 4076 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 4077 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 4078 .type = ARM_CP_IO, 4079 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 4080 .access = PL2_RW, 4081 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 4082 .resetvalue = 0, 4083 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 4084 #endif 4085 /* The only field of MDCR_EL2 that has a defined architectural reset value 4086 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we 4087 * don't impelment any PMU event counters, so using zero as a reset 4088 * value for MDCR_EL2 is okay 4089 */ 4090 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 4091 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 4092 .access = PL2_RW, .resetvalue = 0, 4093 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 4094 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 4095 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4096 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4097 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4098 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 4099 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4100 .access = PL2_RW, 4101 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4102 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 4103 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 4104 .access = PL2_RW, 4105 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 4106 REGINFO_SENTINEL 4107 }; 4108 4109 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 4110 bool isread) 4111 { 4112 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 4113 * At Secure EL1 it traps to EL3. 4114 */ 4115 if (arm_current_el(env) == 3) { 4116 return CP_ACCESS_OK; 4117 } 4118 if (arm_is_secure_below_el3(env)) { 4119 return CP_ACCESS_TRAP_EL3; 4120 } 4121 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 4122 if (isread) { 4123 return CP_ACCESS_OK; 4124 } 4125 return CP_ACCESS_TRAP_UNCATEGORIZED; 4126 } 4127 4128 static const ARMCPRegInfo el3_cp_reginfo[] = { 4129 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 4130 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 4131 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 4132 .resetvalue = 0, .writefn = scr_write }, 4133 { .name = "SCR", .type = ARM_CP_ALIAS, 4134 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 4135 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4136 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 4137 .writefn = scr_write }, 4138 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 4139 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 4140 .access = PL3_RW, .resetvalue = 0, 4141 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 4142 { .name = "SDER", 4143 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 4144 .access = PL3_RW, .resetvalue = 0, 4145 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 4146 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 4147 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4148 .writefn = vbar_write, .resetvalue = 0, 4149 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 4150 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 4151 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 4152 .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 4153 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 4154 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 4155 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 4156 .access = PL3_RW, 4157 /* no .writefn needed as this can't cause an ASID change; 4158 * we must provide a .raw_writefn and .resetfn because we handle 4159 * reset and migration for the AArch32 TTBCR(S), which might be 4160 * using mask and base_mask. 4161 */ 4162 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 4163 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 4164 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 4165 .type = ARM_CP_ALIAS, 4166 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 4167 .access = PL3_RW, 4168 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 4169 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 4170 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 4171 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 4172 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 4173 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 4174 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 4175 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 4176 .type = ARM_CP_ALIAS, 4177 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 4178 .access = PL3_RW, 4179 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 4180 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 4181 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 4182 .access = PL3_RW, .writefn = vbar_write, 4183 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 4184 .resetvalue = 0 }, 4185 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 4186 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 4187 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 4188 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 4189 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 4190 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 4191 .access = PL3_RW, .resetvalue = 0, 4192 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 4193 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 4194 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 4195 .access = PL3_RW, .type = ARM_CP_CONST, 4196 .resetvalue = 0 }, 4197 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 4198 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 4199 .access = PL3_RW, .type = ARM_CP_CONST, 4200 .resetvalue = 0 }, 4201 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 4202 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 4203 .access = PL3_RW, .type = ARM_CP_CONST, 4204 .resetvalue = 0 }, 4205 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 4206 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 4207 .access = PL3_W, .type = ARM_CP_NO_RAW, 4208 .writefn = tlbi_aa64_alle3is_write }, 4209 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 4210 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 4211 .access = PL3_W, .type = ARM_CP_NO_RAW, 4212 .writefn = tlbi_aa64_vae3is_write }, 4213 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 4214 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 4215 .access = PL3_W, .type = ARM_CP_NO_RAW, 4216 .writefn = tlbi_aa64_vae3is_write }, 4217 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 4218 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 4219 .access = PL3_W, .type = ARM_CP_NO_RAW, 4220 .writefn = tlbi_aa64_alle3_write }, 4221 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 4222 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 4223 .access = PL3_W, .type = ARM_CP_NO_RAW, 4224 .writefn = tlbi_aa64_vae3_write }, 4225 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 4226 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 4227 .access = PL3_W, .type = ARM_CP_NO_RAW, 4228 .writefn = tlbi_aa64_vae3_write }, 4229 REGINFO_SENTINEL 4230 }; 4231 4232 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4233 bool isread) 4234 { 4235 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64, 4236 * but the AArch32 CTR has its own reginfo struct) 4237 */ 4238 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 4239 return CP_ACCESS_TRAP; 4240 } 4241 return CP_ACCESS_OK; 4242 } 4243 4244 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 4245 uint64_t value) 4246 { 4247 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 4248 * read via a bit in OSLSR_EL1. 4249 */ 4250 int oslock; 4251 4252 if (ri->state == ARM_CP_STATE_AA32) { 4253 oslock = (value == 0xC5ACCE55); 4254 } else { 4255 oslock = value & 1; 4256 } 4257 4258 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 4259 } 4260 4261 static const ARMCPRegInfo debug_cp_reginfo[] = { 4262 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 4263 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 4264 * unlike DBGDRAR it is never accessible from EL0. 4265 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 4266 * accessor. 4267 */ 4268 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 4269 .access = PL0_R, .accessfn = access_tdra, 4270 .type = ARM_CP_CONST, .resetvalue = 0 }, 4271 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 4272 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 4273 .access = PL1_R, .accessfn = access_tdra, 4274 .type = ARM_CP_CONST, .resetvalue = 0 }, 4275 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 4276 .access = PL0_R, .accessfn = access_tdra, 4277 .type = ARM_CP_CONST, .resetvalue = 0 }, 4278 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 4279 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 4280 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 4281 .access = PL1_RW, .accessfn = access_tda, 4282 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 4283 .resetvalue = 0 }, 4284 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. 4285 * We don't implement the configurable EL0 access. 4286 */ 4287 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, 4288 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 4289 .type = ARM_CP_ALIAS, 4290 .access = PL1_R, .accessfn = access_tda, 4291 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 4292 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 4293 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 4294 .access = PL1_W, .type = ARM_CP_NO_RAW, 4295 .accessfn = access_tdosa, 4296 .writefn = oslar_write }, 4297 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 4298 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 4299 .access = PL1_R, .resetvalue = 10, 4300 .accessfn = access_tdosa, 4301 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 4302 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 4303 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 4304 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 4305 .access = PL1_RW, .accessfn = access_tdosa, 4306 .type = ARM_CP_NOP }, 4307 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 4308 * implement vector catch debug events yet. 4309 */ 4310 { .name = "DBGVCR", 4311 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 4312 .access = PL1_RW, .accessfn = access_tda, 4313 .type = ARM_CP_NOP }, 4314 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 4315 * to save and restore a 32-bit guest's DBGVCR) 4316 */ 4317 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 4318 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 4319 .access = PL2_RW, .accessfn = access_tda, 4320 .type = ARM_CP_NOP }, 4321 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 4322 * Channel but Linux may try to access this register. The 32-bit 4323 * alias is DBGDCCINT. 4324 */ 4325 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 4326 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 4327 .access = PL1_RW, .accessfn = access_tda, 4328 .type = ARM_CP_NOP }, 4329 REGINFO_SENTINEL 4330 }; 4331 4332 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 4333 /* 64 bit access versions of the (dummy) debug registers */ 4334 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 4335 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 4336 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 4337 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 4338 REGINFO_SENTINEL 4339 }; 4340 4341 /* Return the exception level to which SVE-disabled exceptions should 4342 * be taken, or 0 if SVE is enabled. 4343 */ 4344 static int sve_exception_el(CPUARMState *env) 4345 { 4346 #ifndef CONFIG_USER_ONLY 4347 unsigned current_el = arm_current_el(env); 4348 4349 /* The CPACR.ZEN controls traps to EL1: 4350 * 0, 2 : trap EL0 and EL1 accesses 4351 * 1 : trap only EL0 accesses 4352 * 3 : trap no accesses 4353 */ 4354 switch (extract32(env->cp15.cpacr_el1, 16, 2)) { 4355 default: 4356 if (current_el <= 1) { 4357 /* Trap to PL1, which might be EL1 or EL3 */ 4358 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 4359 return 3; 4360 } 4361 return 1; 4362 } 4363 break; 4364 case 1: 4365 if (current_el == 0) { 4366 return 1; 4367 } 4368 break; 4369 case 3: 4370 break; 4371 } 4372 4373 /* Similarly for CPACR.FPEN, after having checked ZEN. */ 4374 switch (extract32(env->cp15.cpacr_el1, 20, 2)) { 4375 default: 4376 if (current_el <= 1) { 4377 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 4378 return 3; 4379 } 4380 return 1; 4381 } 4382 break; 4383 case 1: 4384 if (current_el == 0) { 4385 return 1; 4386 } 4387 break; 4388 case 3: 4389 break; 4390 } 4391 4392 /* CPTR_EL2. Check both TZ and TFP. */ 4393 if (current_el <= 2 4394 && (env->cp15.cptr_el[2] & (CPTR_TFP | CPTR_TZ)) 4395 && !arm_is_secure_below_el3(env)) { 4396 return 2; 4397 } 4398 4399 /* CPTR_EL3. Check both EZ and TFP. */ 4400 if (!(env->cp15.cptr_el[3] & CPTR_EZ) 4401 || (env->cp15.cptr_el[3] & CPTR_TFP)) { 4402 return 3; 4403 } 4404 #endif 4405 return 0; 4406 } 4407 4408 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4409 uint64_t value) 4410 { 4411 /* Bits other than [3:0] are RAZ/WI. */ 4412 raw_write(env, ri, value & 0xf); 4413 } 4414 4415 static const ARMCPRegInfo zcr_el1_reginfo = { 4416 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 4417 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 4418 .access = PL1_RW, .type = ARM_CP_SVE, 4419 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 4420 .writefn = zcr_write, .raw_writefn = raw_write 4421 }; 4422 4423 static const ARMCPRegInfo zcr_el2_reginfo = { 4424 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 4425 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 4426 .access = PL2_RW, .type = ARM_CP_SVE, 4427 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 4428 .writefn = zcr_write, .raw_writefn = raw_write 4429 }; 4430 4431 static const ARMCPRegInfo zcr_no_el2_reginfo = { 4432 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 4433 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 4434 .access = PL2_RW, .type = ARM_CP_SVE, 4435 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 4436 }; 4437 4438 static const ARMCPRegInfo zcr_el3_reginfo = { 4439 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 4440 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 4441 .access = PL3_RW, .type = ARM_CP_SVE, 4442 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 4443 .writefn = zcr_write, .raw_writefn = raw_write 4444 }; 4445 4446 void hw_watchpoint_update(ARMCPU *cpu, int n) 4447 { 4448 CPUARMState *env = &cpu->env; 4449 vaddr len = 0; 4450 vaddr wvr = env->cp15.dbgwvr[n]; 4451 uint64_t wcr = env->cp15.dbgwcr[n]; 4452 int mask; 4453 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 4454 4455 if (env->cpu_watchpoint[n]) { 4456 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 4457 env->cpu_watchpoint[n] = NULL; 4458 } 4459 4460 if (!extract64(wcr, 0, 1)) { 4461 /* E bit clear : watchpoint disabled */ 4462 return; 4463 } 4464 4465 switch (extract64(wcr, 3, 2)) { 4466 case 0: 4467 /* LSC 00 is reserved and must behave as if the wp is disabled */ 4468 return; 4469 case 1: 4470 flags |= BP_MEM_READ; 4471 break; 4472 case 2: 4473 flags |= BP_MEM_WRITE; 4474 break; 4475 case 3: 4476 flags |= BP_MEM_ACCESS; 4477 break; 4478 } 4479 4480 /* Attempts to use both MASK and BAS fields simultaneously are 4481 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 4482 * thus generating a watchpoint for every byte in the masked region. 4483 */ 4484 mask = extract64(wcr, 24, 4); 4485 if (mask == 1 || mask == 2) { 4486 /* Reserved values of MASK; we must act as if the mask value was 4487 * some non-reserved value, or as if the watchpoint were disabled. 4488 * We choose the latter. 4489 */ 4490 return; 4491 } else if (mask) { 4492 /* Watchpoint covers an aligned area up to 2GB in size */ 4493 len = 1ULL << mask; 4494 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 4495 * whether the watchpoint fires when the unmasked bits match; we opt 4496 * to generate the exceptions. 4497 */ 4498 wvr &= ~(len - 1); 4499 } else { 4500 /* Watchpoint covers bytes defined by the byte address select bits */ 4501 int bas = extract64(wcr, 5, 8); 4502 int basstart; 4503 4504 if (bas == 0) { 4505 /* This must act as if the watchpoint is disabled */ 4506 return; 4507 } 4508 4509 if (extract64(wvr, 2, 1)) { 4510 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 4511 * ignored, and BAS[3:0] define which bytes to watch. 4512 */ 4513 bas &= 0xf; 4514 } 4515 /* The BAS bits are supposed to be programmed to indicate a contiguous 4516 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 4517 * we fire for each byte in the word/doubleword addressed by the WVR. 4518 * We choose to ignore any non-zero bits after the first range of 1s. 4519 */ 4520 basstart = ctz32(bas); 4521 len = cto32(bas >> basstart); 4522 wvr += basstart; 4523 } 4524 4525 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 4526 &env->cpu_watchpoint[n]); 4527 } 4528 4529 void hw_watchpoint_update_all(ARMCPU *cpu) 4530 { 4531 int i; 4532 CPUARMState *env = &cpu->env; 4533 4534 /* Completely clear out existing QEMU watchpoints and our array, to 4535 * avoid possible stale entries following migration load. 4536 */ 4537 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 4538 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 4539 4540 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 4541 hw_watchpoint_update(cpu, i); 4542 } 4543 } 4544 4545 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4546 uint64_t value) 4547 { 4548 ARMCPU *cpu = arm_env_get_cpu(env); 4549 int i = ri->crm; 4550 4551 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the 4552 * register reads and behaves as if values written are sign extended. 4553 * Bits [1:0] are RES0. 4554 */ 4555 value = sextract64(value, 0, 49) & ~3ULL; 4556 4557 raw_write(env, ri, value); 4558 hw_watchpoint_update(cpu, i); 4559 } 4560 4561 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4562 uint64_t value) 4563 { 4564 ARMCPU *cpu = arm_env_get_cpu(env); 4565 int i = ri->crm; 4566 4567 raw_write(env, ri, value); 4568 hw_watchpoint_update(cpu, i); 4569 } 4570 4571 void hw_breakpoint_update(ARMCPU *cpu, int n) 4572 { 4573 CPUARMState *env = &cpu->env; 4574 uint64_t bvr = env->cp15.dbgbvr[n]; 4575 uint64_t bcr = env->cp15.dbgbcr[n]; 4576 vaddr addr; 4577 int bt; 4578 int flags = BP_CPU; 4579 4580 if (env->cpu_breakpoint[n]) { 4581 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 4582 env->cpu_breakpoint[n] = NULL; 4583 } 4584 4585 if (!extract64(bcr, 0, 1)) { 4586 /* E bit clear : watchpoint disabled */ 4587 return; 4588 } 4589 4590 bt = extract64(bcr, 20, 4); 4591 4592 switch (bt) { 4593 case 4: /* unlinked address mismatch (reserved if AArch64) */ 4594 case 5: /* linked address mismatch (reserved if AArch64) */ 4595 qemu_log_mask(LOG_UNIMP, 4596 "arm: address mismatch breakpoint types not implemented\n"); 4597 return; 4598 case 0: /* unlinked address match */ 4599 case 1: /* linked address match */ 4600 { 4601 /* Bits [63:49] are hardwired to the value of bit [48]; that is, 4602 * we behave as if the register was sign extended. Bits [1:0] are 4603 * RES0. The BAS field is used to allow setting breakpoints on 16 4604 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether 4605 * a bp will fire if the addresses covered by the bp and the addresses 4606 * covered by the insn overlap but the insn doesn't start at the 4607 * start of the bp address range. We choose to require the insn and 4608 * the bp to have the same address. The constraints on writing to 4609 * BAS enforced in dbgbcr_write mean we have only four cases: 4610 * 0b0000 => no breakpoint 4611 * 0b0011 => breakpoint on addr 4612 * 0b1100 => breakpoint on addr + 2 4613 * 0b1111 => breakpoint on addr 4614 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 4615 */ 4616 int bas = extract64(bcr, 5, 4); 4617 addr = sextract64(bvr, 0, 49) & ~3ULL; 4618 if (bas == 0) { 4619 return; 4620 } 4621 if (bas == 0xc) { 4622 addr += 2; 4623 } 4624 break; 4625 } 4626 case 2: /* unlinked context ID match */ 4627 case 8: /* unlinked VMID match (reserved if no EL2) */ 4628 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 4629 qemu_log_mask(LOG_UNIMP, 4630 "arm: unlinked context breakpoint types not implemented\n"); 4631 return; 4632 case 9: /* linked VMID match (reserved if no EL2) */ 4633 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 4634 case 3: /* linked context ID match */ 4635 default: 4636 /* We must generate no events for Linked context matches (unless 4637 * they are linked to by some other bp/wp, which is handled in 4638 * updates for the linking bp/wp). We choose to also generate no events 4639 * for reserved values. 4640 */ 4641 return; 4642 } 4643 4644 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 4645 } 4646 4647 void hw_breakpoint_update_all(ARMCPU *cpu) 4648 { 4649 int i; 4650 CPUARMState *env = &cpu->env; 4651 4652 /* Completely clear out existing QEMU breakpoints and our array, to 4653 * avoid possible stale entries following migration load. 4654 */ 4655 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 4656 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 4657 4658 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 4659 hw_breakpoint_update(cpu, i); 4660 } 4661 } 4662 4663 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4664 uint64_t value) 4665 { 4666 ARMCPU *cpu = arm_env_get_cpu(env); 4667 int i = ri->crm; 4668 4669 raw_write(env, ri, value); 4670 hw_breakpoint_update(cpu, i); 4671 } 4672 4673 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4674 uint64_t value) 4675 { 4676 ARMCPU *cpu = arm_env_get_cpu(env); 4677 int i = ri->crm; 4678 4679 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 4680 * copy of BAS[0]. 4681 */ 4682 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 4683 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 4684 4685 raw_write(env, ri, value); 4686 hw_breakpoint_update(cpu, i); 4687 } 4688 4689 static void define_debug_regs(ARMCPU *cpu) 4690 { 4691 /* Define v7 and v8 architectural debug registers. 4692 * These are just dummy implementations for now. 4693 */ 4694 int i; 4695 int wrps, brps, ctx_cmps; 4696 ARMCPRegInfo dbgdidr = { 4697 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 4698 .access = PL0_R, .accessfn = access_tda, 4699 .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr, 4700 }; 4701 4702 /* Note that all these register fields hold "number of Xs minus 1". */ 4703 brps = extract32(cpu->dbgdidr, 24, 4); 4704 wrps = extract32(cpu->dbgdidr, 28, 4); 4705 ctx_cmps = extract32(cpu->dbgdidr, 20, 4); 4706 4707 assert(ctx_cmps <= brps); 4708 4709 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties 4710 * of the debug registers such as number of breakpoints; 4711 * check that if they both exist then they agree. 4712 */ 4713 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 4714 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps); 4715 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps); 4716 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps); 4717 } 4718 4719 define_one_arm_cp_reg(cpu, &dbgdidr); 4720 define_arm_cp_regs(cpu, debug_cp_reginfo); 4721 4722 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 4723 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 4724 } 4725 4726 for (i = 0; i < brps + 1; i++) { 4727 ARMCPRegInfo dbgregs[] = { 4728 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 4729 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 4730 .access = PL1_RW, .accessfn = access_tda, 4731 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 4732 .writefn = dbgbvr_write, .raw_writefn = raw_write 4733 }, 4734 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 4735 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 4736 .access = PL1_RW, .accessfn = access_tda, 4737 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 4738 .writefn = dbgbcr_write, .raw_writefn = raw_write 4739 }, 4740 REGINFO_SENTINEL 4741 }; 4742 define_arm_cp_regs(cpu, dbgregs); 4743 } 4744 4745 for (i = 0; i < wrps + 1; i++) { 4746 ARMCPRegInfo dbgregs[] = { 4747 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 4748 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 4749 .access = PL1_RW, .accessfn = access_tda, 4750 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 4751 .writefn = dbgwvr_write, .raw_writefn = raw_write 4752 }, 4753 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 4754 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 4755 .access = PL1_RW, .accessfn = access_tda, 4756 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 4757 .writefn = dbgwcr_write, .raw_writefn = raw_write 4758 }, 4759 REGINFO_SENTINEL 4760 }; 4761 define_arm_cp_regs(cpu, dbgregs); 4762 } 4763 } 4764 4765 /* We don't know until after realize whether there's a GICv3 4766 * attached, and that is what registers the gicv3 sysregs. 4767 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 4768 * at runtime. 4769 */ 4770 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 4771 { 4772 ARMCPU *cpu = arm_env_get_cpu(env); 4773 uint64_t pfr1 = cpu->id_pfr1; 4774 4775 if (env->gicv3state) { 4776 pfr1 |= 1 << 28; 4777 } 4778 return pfr1; 4779 } 4780 4781 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 4782 { 4783 ARMCPU *cpu = arm_env_get_cpu(env); 4784 uint64_t pfr0 = cpu->id_aa64pfr0; 4785 4786 if (env->gicv3state) { 4787 pfr0 |= 1 << 24; 4788 } 4789 return pfr0; 4790 } 4791 4792 void register_cp_regs_for_features(ARMCPU *cpu) 4793 { 4794 /* Register all the coprocessor registers based on feature bits */ 4795 CPUARMState *env = &cpu->env; 4796 if (arm_feature(env, ARM_FEATURE_M)) { 4797 /* M profile has no coprocessor registers */ 4798 return; 4799 } 4800 4801 define_arm_cp_regs(cpu, cp_reginfo); 4802 if (!arm_feature(env, ARM_FEATURE_V8)) { 4803 /* Must go early as it is full of wildcards that may be 4804 * overridden by later definitions. 4805 */ 4806 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 4807 } 4808 4809 if (arm_feature(env, ARM_FEATURE_V6)) { 4810 /* The ID registers all have impdef reset values */ 4811 ARMCPRegInfo v6_idregs[] = { 4812 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 4813 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 4814 .access = PL1_R, .type = ARM_CP_CONST, 4815 .resetvalue = cpu->id_pfr0 }, 4816 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 4817 * the value of the GIC field until after we define these regs. 4818 */ 4819 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 4820 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 4821 .access = PL1_R, .type = ARM_CP_NO_RAW, 4822 .readfn = id_pfr1_read, 4823 .writefn = arm_cp_write_ignore }, 4824 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 4825 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 4826 .access = PL1_R, .type = ARM_CP_CONST, 4827 .resetvalue = cpu->id_dfr0 }, 4828 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 4829 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 4830 .access = PL1_R, .type = ARM_CP_CONST, 4831 .resetvalue = cpu->id_afr0 }, 4832 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 4833 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 4834 .access = PL1_R, .type = ARM_CP_CONST, 4835 .resetvalue = cpu->id_mmfr0 }, 4836 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 4837 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 4838 .access = PL1_R, .type = ARM_CP_CONST, 4839 .resetvalue = cpu->id_mmfr1 }, 4840 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 4841 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 4842 .access = PL1_R, .type = ARM_CP_CONST, 4843 .resetvalue = cpu->id_mmfr2 }, 4844 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 4845 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 4846 .access = PL1_R, .type = ARM_CP_CONST, 4847 .resetvalue = cpu->id_mmfr3 }, 4848 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 4849 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 4850 .access = PL1_R, .type = ARM_CP_CONST, 4851 .resetvalue = cpu->id_isar0 }, 4852 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 4853 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 4854 .access = PL1_R, .type = ARM_CP_CONST, 4855 .resetvalue = cpu->id_isar1 }, 4856 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 4857 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 4858 .access = PL1_R, .type = ARM_CP_CONST, 4859 .resetvalue = cpu->id_isar2 }, 4860 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 4861 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 4862 .access = PL1_R, .type = ARM_CP_CONST, 4863 .resetvalue = cpu->id_isar3 }, 4864 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 4865 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 4866 .access = PL1_R, .type = ARM_CP_CONST, 4867 .resetvalue = cpu->id_isar4 }, 4868 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 4869 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 4870 .access = PL1_R, .type = ARM_CP_CONST, 4871 .resetvalue = cpu->id_isar5 }, 4872 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 4873 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 4874 .access = PL1_R, .type = ARM_CP_CONST, 4875 .resetvalue = cpu->id_mmfr4 }, 4876 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, 4877 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 4878 .access = PL1_R, .type = ARM_CP_CONST, 4879 .resetvalue = cpu->id_isar6 }, 4880 REGINFO_SENTINEL 4881 }; 4882 define_arm_cp_regs(cpu, v6_idregs); 4883 define_arm_cp_regs(cpu, v6_cp_reginfo); 4884 } else { 4885 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 4886 } 4887 if (arm_feature(env, ARM_FEATURE_V6K)) { 4888 define_arm_cp_regs(cpu, v6k_cp_reginfo); 4889 } 4890 if (arm_feature(env, ARM_FEATURE_V7MP) && 4891 !arm_feature(env, ARM_FEATURE_PMSA)) { 4892 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 4893 } 4894 if (arm_feature(env, ARM_FEATURE_V7)) { 4895 /* v7 performance monitor control register: same implementor 4896 * field as main ID register, and we implement only the cycle 4897 * count register. 4898 */ 4899 #ifndef CONFIG_USER_ONLY 4900 ARMCPRegInfo pmcr = { 4901 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 4902 .access = PL0_RW, 4903 .type = ARM_CP_IO | ARM_CP_ALIAS, 4904 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 4905 .accessfn = pmreg_access, .writefn = pmcr_write, 4906 .raw_writefn = raw_write, 4907 }; 4908 ARMCPRegInfo pmcr64 = { 4909 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 4910 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 4911 .access = PL0_RW, .accessfn = pmreg_access, 4912 .type = ARM_CP_IO, 4913 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 4914 .resetvalue = cpu->midr & 0xff000000, 4915 .writefn = pmcr_write, .raw_writefn = raw_write, 4916 }; 4917 define_one_arm_cp_reg(cpu, &pmcr); 4918 define_one_arm_cp_reg(cpu, &pmcr64); 4919 #endif 4920 ARMCPRegInfo clidr = { 4921 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 4922 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 4923 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr 4924 }; 4925 define_one_arm_cp_reg(cpu, &clidr); 4926 define_arm_cp_regs(cpu, v7_cp_reginfo); 4927 define_debug_regs(cpu); 4928 } else { 4929 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 4930 } 4931 if (arm_feature(env, ARM_FEATURE_V8)) { 4932 /* AArch64 ID registers, which all have impdef reset values. 4933 * Note that within the ID register ranges the unused slots 4934 * must all RAZ, not UNDEF; future architecture versions may 4935 * define new registers here. 4936 */ 4937 ARMCPRegInfo v8_idregs[] = { 4938 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't 4939 * know the right value for the GIC field until after we 4940 * define these regs. 4941 */ 4942 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 4943 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 4944 .access = PL1_R, .type = ARM_CP_NO_RAW, 4945 .readfn = id_aa64pfr0_read, 4946 .writefn = arm_cp_write_ignore }, 4947 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 4948 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 4949 .access = PL1_R, .type = ARM_CP_CONST, 4950 .resetvalue = cpu->id_aa64pfr1}, 4951 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4952 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 4953 .access = PL1_R, .type = ARM_CP_CONST, 4954 .resetvalue = 0 }, 4955 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4956 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 4957 .access = PL1_R, .type = ARM_CP_CONST, 4958 .resetvalue = 0 }, 4959 { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4960 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 4961 .access = PL1_R, .type = ARM_CP_CONST, 4962 .resetvalue = 0 }, 4963 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4964 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 4965 .access = PL1_R, .type = ARM_CP_CONST, 4966 .resetvalue = 0 }, 4967 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4968 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 4969 .access = PL1_R, .type = ARM_CP_CONST, 4970 .resetvalue = 0 }, 4971 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4972 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 4973 .access = PL1_R, .type = ARM_CP_CONST, 4974 .resetvalue = 0 }, 4975 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 4976 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 4977 .access = PL1_R, .type = ARM_CP_CONST, 4978 .resetvalue = cpu->id_aa64dfr0 }, 4979 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 4980 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 4981 .access = PL1_R, .type = ARM_CP_CONST, 4982 .resetvalue = cpu->id_aa64dfr1 }, 4983 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4984 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 4985 .access = PL1_R, .type = ARM_CP_CONST, 4986 .resetvalue = 0 }, 4987 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4988 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 4989 .access = PL1_R, .type = ARM_CP_CONST, 4990 .resetvalue = 0 }, 4991 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 4992 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 4993 .access = PL1_R, .type = ARM_CP_CONST, 4994 .resetvalue = cpu->id_aa64afr0 }, 4995 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 4996 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 4997 .access = PL1_R, .type = ARM_CP_CONST, 4998 .resetvalue = cpu->id_aa64afr1 }, 4999 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5000 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 5001 .access = PL1_R, .type = ARM_CP_CONST, 5002 .resetvalue = 0 }, 5003 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5004 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 5005 .access = PL1_R, .type = ARM_CP_CONST, 5006 .resetvalue = 0 }, 5007 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 5008 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 5009 .access = PL1_R, .type = ARM_CP_CONST, 5010 .resetvalue = cpu->id_aa64isar0 }, 5011 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 5012 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 5013 .access = PL1_R, .type = ARM_CP_CONST, 5014 .resetvalue = cpu->id_aa64isar1 }, 5015 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5016 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 5017 .access = PL1_R, .type = ARM_CP_CONST, 5018 .resetvalue = 0 }, 5019 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5020 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 5021 .access = PL1_R, .type = ARM_CP_CONST, 5022 .resetvalue = 0 }, 5023 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5024 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 5025 .access = PL1_R, .type = ARM_CP_CONST, 5026 .resetvalue = 0 }, 5027 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5028 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 5029 .access = PL1_R, .type = ARM_CP_CONST, 5030 .resetvalue = 0 }, 5031 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5032 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 5033 .access = PL1_R, .type = ARM_CP_CONST, 5034 .resetvalue = 0 }, 5035 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5036 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 5037 .access = PL1_R, .type = ARM_CP_CONST, 5038 .resetvalue = 0 }, 5039 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 5040 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 5041 .access = PL1_R, .type = ARM_CP_CONST, 5042 .resetvalue = cpu->id_aa64mmfr0 }, 5043 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 5044 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 5045 .access = PL1_R, .type = ARM_CP_CONST, 5046 .resetvalue = cpu->id_aa64mmfr1 }, 5047 { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5048 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 5049 .access = PL1_R, .type = ARM_CP_CONST, 5050 .resetvalue = 0 }, 5051 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5052 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 5053 .access = PL1_R, .type = ARM_CP_CONST, 5054 .resetvalue = 0 }, 5055 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5056 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 5057 .access = PL1_R, .type = ARM_CP_CONST, 5058 .resetvalue = 0 }, 5059 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5060 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 5061 .access = PL1_R, .type = ARM_CP_CONST, 5062 .resetvalue = 0 }, 5063 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5064 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 5065 .access = PL1_R, .type = ARM_CP_CONST, 5066 .resetvalue = 0 }, 5067 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5068 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 5069 .access = PL1_R, .type = ARM_CP_CONST, 5070 .resetvalue = 0 }, 5071 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 5072 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 5073 .access = PL1_R, .type = ARM_CP_CONST, 5074 .resetvalue = cpu->mvfr0 }, 5075 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 5076 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 5077 .access = PL1_R, .type = ARM_CP_CONST, 5078 .resetvalue = cpu->mvfr1 }, 5079 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 5080 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 5081 .access = PL1_R, .type = ARM_CP_CONST, 5082 .resetvalue = cpu->mvfr2 }, 5083 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5084 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 5085 .access = PL1_R, .type = ARM_CP_CONST, 5086 .resetvalue = 0 }, 5087 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5088 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 5089 .access = PL1_R, .type = ARM_CP_CONST, 5090 .resetvalue = 0 }, 5091 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5092 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 5093 .access = PL1_R, .type = ARM_CP_CONST, 5094 .resetvalue = 0 }, 5095 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5096 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 5097 .access = PL1_R, .type = ARM_CP_CONST, 5098 .resetvalue = 0 }, 5099 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5100 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 5101 .access = PL1_R, .type = ARM_CP_CONST, 5102 .resetvalue = 0 }, 5103 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 5104 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 5105 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5106 .resetvalue = cpu->pmceid0 }, 5107 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 5108 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 5109 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5110 .resetvalue = cpu->pmceid0 }, 5111 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 5112 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 5113 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5114 .resetvalue = cpu->pmceid1 }, 5115 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 5116 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 5117 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5118 .resetvalue = cpu->pmceid1 }, 5119 REGINFO_SENTINEL 5120 }; 5121 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 5122 if (!arm_feature(env, ARM_FEATURE_EL3) && 5123 !arm_feature(env, ARM_FEATURE_EL2)) { 5124 ARMCPRegInfo rvbar = { 5125 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 5126 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 5127 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar 5128 }; 5129 define_one_arm_cp_reg(cpu, &rvbar); 5130 } 5131 define_arm_cp_regs(cpu, v8_idregs); 5132 define_arm_cp_regs(cpu, v8_cp_reginfo); 5133 } 5134 if (arm_feature(env, ARM_FEATURE_EL2)) { 5135 uint64_t vmpidr_def = mpidr_read_val(env); 5136 ARMCPRegInfo vpidr_regs[] = { 5137 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 5138 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5139 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5140 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, 5141 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 5142 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 5143 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5144 .access = PL2_RW, .resetvalue = cpu->midr, 5145 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 5146 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 5147 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5148 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5149 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, 5150 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 5151 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 5152 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5153 .access = PL2_RW, 5154 .resetvalue = vmpidr_def, 5155 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 5156 REGINFO_SENTINEL 5157 }; 5158 define_arm_cp_regs(cpu, vpidr_regs); 5159 define_arm_cp_regs(cpu, el2_cp_reginfo); 5160 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 5161 if (!arm_feature(env, ARM_FEATURE_EL3)) { 5162 ARMCPRegInfo rvbar = { 5163 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 5164 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 5165 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar 5166 }; 5167 define_one_arm_cp_reg(cpu, &rvbar); 5168 } 5169 } else { 5170 /* If EL2 is missing but higher ELs are enabled, we need to 5171 * register the no_el2 reginfos. 5172 */ 5173 if (arm_feature(env, ARM_FEATURE_EL3)) { 5174 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 5175 * of MIDR_EL1 and MPIDR_EL1. 5176 */ 5177 ARMCPRegInfo vpidr_regs[] = { 5178 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5179 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5180 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5181 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 5182 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 5183 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5184 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5185 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5186 .type = ARM_CP_NO_RAW, 5187 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 5188 REGINFO_SENTINEL 5189 }; 5190 define_arm_cp_regs(cpu, vpidr_regs); 5191 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 5192 } 5193 } 5194 if (arm_feature(env, ARM_FEATURE_EL3)) { 5195 define_arm_cp_regs(cpu, el3_cp_reginfo); 5196 ARMCPRegInfo el3_regs[] = { 5197 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 5198 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 5199 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, 5200 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 5201 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 5202 .access = PL3_RW, 5203 .raw_writefn = raw_write, .writefn = sctlr_write, 5204 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 5205 .resetvalue = cpu->reset_sctlr }, 5206 REGINFO_SENTINEL 5207 }; 5208 5209 define_arm_cp_regs(cpu, el3_regs); 5210 } 5211 /* The behaviour of NSACR is sufficiently various that we don't 5212 * try to describe it in a single reginfo: 5213 * if EL3 is 64 bit, then trap to EL3 from S EL1, 5214 * reads as constant 0xc00 from NS EL1 and NS EL2 5215 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 5216 * if v7 without EL3, register doesn't exist 5217 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 5218 */ 5219 if (arm_feature(env, ARM_FEATURE_EL3)) { 5220 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5221 ARMCPRegInfo nsacr = { 5222 .name = "NSACR", .type = ARM_CP_CONST, 5223 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5224 .access = PL1_RW, .accessfn = nsacr_access, 5225 .resetvalue = 0xc00 5226 }; 5227 define_one_arm_cp_reg(cpu, &nsacr); 5228 } else { 5229 ARMCPRegInfo nsacr = { 5230 .name = "NSACR", 5231 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5232 .access = PL3_RW | PL1_R, 5233 .resetvalue = 0, 5234 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 5235 }; 5236 define_one_arm_cp_reg(cpu, &nsacr); 5237 } 5238 } else { 5239 if (arm_feature(env, ARM_FEATURE_V8)) { 5240 ARMCPRegInfo nsacr = { 5241 .name = "NSACR", .type = ARM_CP_CONST, 5242 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5243 .access = PL1_R, 5244 .resetvalue = 0xc00 5245 }; 5246 define_one_arm_cp_reg(cpu, &nsacr); 5247 } 5248 } 5249 5250 if (arm_feature(env, ARM_FEATURE_PMSA)) { 5251 if (arm_feature(env, ARM_FEATURE_V6)) { 5252 /* PMSAv6 not implemented */ 5253 assert(arm_feature(env, ARM_FEATURE_V7)); 5254 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 5255 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 5256 } else { 5257 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 5258 } 5259 } else { 5260 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 5261 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 5262 } 5263 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 5264 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 5265 } 5266 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 5267 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 5268 } 5269 if (arm_feature(env, ARM_FEATURE_VAPA)) { 5270 define_arm_cp_regs(cpu, vapa_cp_reginfo); 5271 } 5272 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 5273 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 5274 } 5275 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 5276 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 5277 } 5278 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 5279 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 5280 } 5281 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 5282 define_arm_cp_regs(cpu, omap_cp_reginfo); 5283 } 5284 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 5285 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 5286 } 5287 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 5288 define_arm_cp_regs(cpu, xscale_cp_reginfo); 5289 } 5290 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 5291 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 5292 } 5293 if (arm_feature(env, ARM_FEATURE_LPAE)) { 5294 define_arm_cp_regs(cpu, lpae_cp_reginfo); 5295 } 5296 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 5297 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 5298 * be read-only (ie write causes UNDEF exception). 5299 */ 5300 { 5301 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 5302 /* Pre-v8 MIDR space. 5303 * Note that the MIDR isn't a simple constant register because 5304 * of the TI925 behaviour where writes to another register can 5305 * cause the MIDR value to change. 5306 * 5307 * Unimplemented registers in the c15 0 0 0 space default to 5308 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 5309 * and friends override accordingly. 5310 */ 5311 { .name = "MIDR", 5312 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 5313 .access = PL1_R, .resetvalue = cpu->midr, 5314 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 5315 .readfn = midr_read, 5316 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 5317 .type = ARM_CP_OVERRIDE }, 5318 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 5319 { .name = "DUMMY", 5320 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 5321 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5322 { .name = "DUMMY", 5323 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 5324 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5325 { .name = "DUMMY", 5326 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 5327 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5328 { .name = "DUMMY", 5329 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 5330 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5331 { .name = "DUMMY", 5332 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 5333 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5334 REGINFO_SENTINEL 5335 }; 5336 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 5337 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 5338 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 5339 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 5340 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 5341 .readfn = midr_read }, 5342 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 5343 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 5344 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 5345 .access = PL1_R, .resetvalue = cpu->midr }, 5346 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 5347 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 5348 .access = PL1_R, .resetvalue = cpu->midr }, 5349 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 5350 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 5351 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 5352 REGINFO_SENTINEL 5353 }; 5354 ARMCPRegInfo id_cp_reginfo[] = { 5355 /* These are common to v8 and pre-v8 */ 5356 { .name = "CTR", 5357 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 5358 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 5359 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 5360 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 5361 .access = PL0_R, .accessfn = ctr_el0_access, 5362 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 5363 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 5364 { .name = "TCMTR", 5365 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 5366 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5367 REGINFO_SENTINEL 5368 }; 5369 /* TLBTR is specific to VMSA */ 5370 ARMCPRegInfo id_tlbtr_reginfo = { 5371 .name = "TLBTR", 5372 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 5373 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0, 5374 }; 5375 /* MPUIR is specific to PMSA V6+ */ 5376 ARMCPRegInfo id_mpuir_reginfo = { 5377 .name = "MPUIR", 5378 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 5379 .access = PL1_R, .type = ARM_CP_CONST, 5380 .resetvalue = cpu->pmsav7_dregion << 8 5381 }; 5382 ARMCPRegInfo crn0_wi_reginfo = { 5383 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 5384 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 5385 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 5386 }; 5387 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 5388 arm_feature(env, ARM_FEATURE_STRONGARM)) { 5389 ARMCPRegInfo *r; 5390 /* Register the blanket "writes ignored" value first to cover the 5391 * whole space. Then update the specific ID registers to allow write 5392 * access, so that they ignore writes rather than causing them to 5393 * UNDEF. 5394 */ 5395 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 5396 for (r = id_pre_v8_midr_cp_reginfo; 5397 r->type != ARM_CP_SENTINEL; r++) { 5398 r->access = PL1_RW; 5399 } 5400 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { 5401 r->access = PL1_RW; 5402 } 5403 id_mpuir_reginfo.access = PL1_RW; 5404 id_tlbtr_reginfo.access = PL1_RW; 5405 } 5406 if (arm_feature(env, ARM_FEATURE_V8)) { 5407 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 5408 } else { 5409 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 5410 } 5411 define_arm_cp_regs(cpu, id_cp_reginfo); 5412 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 5413 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 5414 } else if (arm_feature(env, ARM_FEATURE_V7)) { 5415 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 5416 } 5417 } 5418 5419 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 5420 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 5421 } 5422 5423 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 5424 ARMCPRegInfo auxcr_reginfo[] = { 5425 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 5426 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 5427 .access = PL1_RW, .type = ARM_CP_CONST, 5428 .resetvalue = cpu->reset_auxcr }, 5429 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 5430 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 5431 .access = PL2_RW, .type = ARM_CP_CONST, 5432 .resetvalue = 0 }, 5433 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 5434 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 5435 .access = PL3_RW, .type = ARM_CP_CONST, 5436 .resetvalue = 0 }, 5437 REGINFO_SENTINEL 5438 }; 5439 define_arm_cp_regs(cpu, auxcr_reginfo); 5440 } 5441 5442 if (arm_feature(env, ARM_FEATURE_CBAR)) { 5443 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5444 /* 32 bit view is [31:18] 0...0 [43:32]. */ 5445 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 5446 | extract64(cpu->reset_cbar, 32, 12); 5447 ARMCPRegInfo cbar_reginfo[] = { 5448 { .name = "CBAR", 5449 .type = ARM_CP_CONST, 5450 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 5451 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 5452 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 5453 .type = ARM_CP_CONST, 5454 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 5455 .access = PL1_R, .resetvalue = cbar32 }, 5456 REGINFO_SENTINEL 5457 }; 5458 /* We don't implement a r/w 64 bit CBAR currently */ 5459 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 5460 define_arm_cp_regs(cpu, cbar_reginfo); 5461 } else { 5462 ARMCPRegInfo cbar = { 5463 .name = "CBAR", 5464 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 5465 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 5466 .fieldoffset = offsetof(CPUARMState, 5467 cp15.c15_config_base_address) 5468 }; 5469 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 5470 cbar.access = PL1_R; 5471 cbar.fieldoffset = 0; 5472 cbar.type = ARM_CP_CONST; 5473 } 5474 define_one_arm_cp_reg(cpu, &cbar); 5475 } 5476 } 5477 5478 if (arm_feature(env, ARM_FEATURE_VBAR)) { 5479 ARMCPRegInfo vbar_cp_reginfo[] = { 5480 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 5481 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 5482 .access = PL1_RW, .writefn = vbar_write, 5483 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 5484 offsetof(CPUARMState, cp15.vbar_ns) }, 5485 .resetvalue = 0 }, 5486 REGINFO_SENTINEL 5487 }; 5488 define_arm_cp_regs(cpu, vbar_cp_reginfo); 5489 } 5490 5491 /* Generic registers whose values depend on the implementation */ 5492 { 5493 ARMCPRegInfo sctlr = { 5494 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 5495 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 5496 .access = PL1_RW, 5497 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 5498 offsetof(CPUARMState, cp15.sctlr_ns) }, 5499 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 5500 .raw_writefn = raw_write, 5501 }; 5502 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 5503 /* Normally we would always end the TB on an SCTLR write, but Linux 5504 * arch/arm/mach-pxa/sleep.S expects two instructions following 5505 * an MMU enable to execute from cache. Imitate this behaviour. 5506 */ 5507 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 5508 } 5509 define_one_arm_cp_reg(cpu, &sctlr); 5510 } 5511 5512 if (arm_feature(env, ARM_FEATURE_SVE)) { 5513 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 5514 if (arm_feature(env, ARM_FEATURE_EL2)) { 5515 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 5516 } else { 5517 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 5518 } 5519 if (arm_feature(env, ARM_FEATURE_EL3)) { 5520 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 5521 } 5522 } 5523 } 5524 5525 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 5526 { 5527 CPUState *cs = CPU(cpu); 5528 CPUARMState *env = &cpu->env; 5529 5530 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5531 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, 5532 aarch64_fpu_gdb_set_reg, 5533 34, "aarch64-fpu.xml", 0); 5534 } else if (arm_feature(env, ARM_FEATURE_NEON)) { 5535 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5536 51, "arm-neon.xml", 0); 5537 } else if (arm_feature(env, ARM_FEATURE_VFP3)) { 5538 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5539 35, "arm-vfp3.xml", 0); 5540 } else if (arm_feature(env, ARM_FEATURE_VFP)) { 5541 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5542 19, "arm-vfp.xml", 0); 5543 } 5544 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, 5545 arm_gen_dynamic_xml(cs), 5546 "system-registers.xml", 0); 5547 } 5548 5549 /* Sort alphabetically by type name, except for "any". */ 5550 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 5551 { 5552 ObjectClass *class_a = (ObjectClass *)a; 5553 ObjectClass *class_b = (ObjectClass *)b; 5554 const char *name_a, *name_b; 5555 5556 name_a = object_class_get_name(class_a); 5557 name_b = object_class_get_name(class_b); 5558 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 5559 return 1; 5560 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 5561 return -1; 5562 } else { 5563 return strcmp(name_a, name_b); 5564 } 5565 } 5566 5567 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 5568 { 5569 ObjectClass *oc = data; 5570 CPUListState *s = user_data; 5571 const char *typename; 5572 char *name; 5573 5574 typename = object_class_get_name(oc); 5575 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 5576 (*s->cpu_fprintf)(s->file, " %s\n", 5577 name); 5578 g_free(name); 5579 } 5580 5581 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf) 5582 { 5583 CPUListState s = { 5584 .file = f, 5585 .cpu_fprintf = cpu_fprintf, 5586 }; 5587 GSList *list; 5588 5589 list = object_class_get_list(TYPE_ARM_CPU, false); 5590 list = g_slist_sort(list, arm_cpu_list_compare); 5591 (*cpu_fprintf)(f, "Available CPUs:\n"); 5592 g_slist_foreach(list, arm_cpu_list_entry, &s); 5593 g_slist_free(list); 5594 #ifdef CONFIG_KVM 5595 /* The 'host' CPU type is dynamically registered only if KVM is 5596 * enabled, so we have to special-case it here: 5597 */ 5598 (*cpu_fprintf)(f, " host (only available in KVM mode)\n"); 5599 #endif 5600 } 5601 5602 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 5603 { 5604 ObjectClass *oc = data; 5605 CpuDefinitionInfoList **cpu_list = user_data; 5606 CpuDefinitionInfoList *entry; 5607 CpuDefinitionInfo *info; 5608 const char *typename; 5609 5610 typename = object_class_get_name(oc); 5611 info = g_malloc0(sizeof(*info)); 5612 info->name = g_strndup(typename, 5613 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 5614 info->q_typename = g_strdup(typename); 5615 5616 entry = g_malloc0(sizeof(*entry)); 5617 entry->value = info; 5618 entry->next = *cpu_list; 5619 *cpu_list = entry; 5620 } 5621 5622 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) 5623 { 5624 CpuDefinitionInfoList *cpu_list = NULL; 5625 GSList *list; 5626 5627 list = object_class_get_list(TYPE_ARM_CPU, false); 5628 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 5629 g_slist_free(list); 5630 5631 return cpu_list; 5632 } 5633 5634 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 5635 void *opaque, int state, int secstate, 5636 int crm, int opc1, int opc2, 5637 const char *name) 5638 { 5639 /* Private utility function for define_one_arm_cp_reg_with_opaque(): 5640 * add a single reginfo struct to the hash table. 5641 */ 5642 uint32_t *key = g_new(uint32_t, 1); 5643 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); 5644 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; 5645 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; 5646 5647 r2->name = g_strdup(name); 5648 /* Reset the secure state to the specific incoming state. This is 5649 * necessary as the register may have been defined with both states. 5650 */ 5651 r2->secure = secstate; 5652 5653 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 5654 /* Register is banked (using both entries in array). 5655 * Overwriting fieldoffset as the array is only used to define 5656 * banked registers but later only fieldoffset is used. 5657 */ 5658 r2->fieldoffset = r->bank_fieldoffsets[ns]; 5659 } 5660 5661 if (state == ARM_CP_STATE_AA32) { 5662 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 5663 /* If the register is banked then we don't need to migrate or 5664 * reset the 32-bit instance in certain cases: 5665 * 5666 * 1) If the register has both 32-bit and 64-bit instances then we 5667 * can count on the 64-bit instance taking care of the 5668 * non-secure bank. 5669 * 2) If ARMv8 is enabled then we can count on a 64-bit version 5670 * taking care of the secure bank. This requires that separate 5671 * 32 and 64-bit definitions are provided. 5672 */ 5673 if ((r->state == ARM_CP_STATE_BOTH && ns) || 5674 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 5675 r2->type |= ARM_CP_ALIAS; 5676 } 5677 } else if ((secstate != r->secure) && !ns) { 5678 /* The register is not banked so we only want to allow migration of 5679 * the non-secure instance. 5680 */ 5681 r2->type |= ARM_CP_ALIAS; 5682 } 5683 5684 if (r->state == ARM_CP_STATE_BOTH) { 5685 /* We assume it is a cp15 register if the .cp field is left unset. 5686 */ 5687 if (r2->cp == 0) { 5688 r2->cp = 15; 5689 } 5690 5691 #ifdef HOST_WORDS_BIGENDIAN 5692 if (r2->fieldoffset) { 5693 r2->fieldoffset += sizeof(uint32_t); 5694 } 5695 #endif 5696 } 5697 } 5698 if (state == ARM_CP_STATE_AA64) { 5699 /* To allow abbreviation of ARMCPRegInfo 5700 * definitions, we treat cp == 0 as equivalent to 5701 * the value for "standard guest-visible sysreg". 5702 * STATE_BOTH definitions are also always "standard 5703 * sysreg" in their AArch64 view (the .cp value may 5704 * be non-zero for the benefit of the AArch32 view). 5705 */ 5706 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { 5707 r2->cp = CP_REG_ARM64_SYSREG_CP; 5708 } 5709 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, 5710 r2->opc0, opc1, opc2); 5711 } else { 5712 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); 5713 } 5714 if (opaque) { 5715 r2->opaque = opaque; 5716 } 5717 /* reginfo passed to helpers is correct for the actual access, 5718 * and is never ARM_CP_STATE_BOTH: 5719 */ 5720 r2->state = state; 5721 /* Make sure reginfo passed to helpers for wildcarded regs 5722 * has the correct crm/opc1/opc2 for this reg, not CP_ANY: 5723 */ 5724 r2->crm = crm; 5725 r2->opc1 = opc1; 5726 r2->opc2 = opc2; 5727 /* By convention, for wildcarded registers only the first 5728 * entry is used for migration; the others are marked as 5729 * ALIAS so we don't try to transfer the register 5730 * multiple times. Special registers (ie NOP/WFI) are 5731 * never migratable and not even raw-accessible. 5732 */ 5733 if ((r->type & ARM_CP_SPECIAL)) { 5734 r2->type |= ARM_CP_NO_RAW; 5735 } 5736 if (((r->crm == CP_ANY) && crm != 0) || 5737 ((r->opc1 == CP_ANY) && opc1 != 0) || 5738 ((r->opc2 == CP_ANY) && opc2 != 0)) { 5739 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 5740 } 5741 5742 /* Check that raw accesses are either forbidden or handled. Note that 5743 * we can't assert this earlier because the setup of fieldoffset for 5744 * banked registers has to be done first. 5745 */ 5746 if (!(r2->type & ARM_CP_NO_RAW)) { 5747 assert(!raw_accessors_invalid(r2)); 5748 } 5749 5750 /* Overriding of an existing definition must be explicitly 5751 * requested. 5752 */ 5753 if (!(r->type & ARM_CP_OVERRIDE)) { 5754 ARMCPRegInfo *oldreg; 5755 oldreg = g_hash_table_lookup(cpu->cp_regs, key); 5756 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { 5757 fprintf(stderr, "Register redefined: cp=%d %d bit " 5758 "crn=%d crm=%d opc1=%d opc2=%d, " 5759 "was %s, now %s\n", r2->cp, 32 + 32 * is64, 5760 r2->crn, r2->crm, r2->opc1, r2->opc2, 5761 oldreg->name, r2->name); 5762 g_assert_not_reached(); 5763 } 5764 } 5765 g_hash_table_insert(cpu->cp_regs, key, r2); 5766 } 5767 5768 5769 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 5770 const ARMCPRegInfo *r, void *opaque) 5771 { 5772 /* Define implementations of coprocessor registers. 5773 * We store these in a hashtable because typically 5774 * there are less than 150 registers in a space which 5775 * is 16*16*16*8*8 = 262144 in size. 5776 * Wildcarding is supported for the crm, opc1 and opc2 fields. 5777 * If a register is defined twice then the second definition is 5778 * used, so this can be used to define some generic registers and 5779 * then override them with implementation specific variations. 5780 * At least one of the original and the second definition should 5781 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 5782 * against accidental use. 5783 * 5784 * The state field defines whether the register is to be 5785 * visible in the AArch32 or AArch64 execution state. If the 5786 * state is set to ARM_CP_STATE_BOTH then we synthesise a 5787 * reginfo structure for the AArch32 view, which sees the lower 5788 * 32 bits of the 64 bit register. 5789 * 5790 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 5791 * be wildcarded. AArch64 registers are always considered to be 64 5792 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 5793 * the register, if any. 5794 */ 5795 int crm, opc1, opc2, state; 5796 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 5797 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 5798 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 5799 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 5800 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 5801 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 5802 /* 64 bit registers have only CRm and Opc1 fields */ 5803 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 5804 /* op0 only exists in the AArch64 encodings */ 5805 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 5806 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 5807 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 5808 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 5809 * encodes a minimum access level for the register. We roll this 5810 * runtime check into our general permission check code, so check 5811 * here that the reginfo's specified permissions are strict enough 5812 * to encompass the generic architectural permission check. 5813 */ 5814 if (r->state != ARM_CP_STATE_AA32) { 5815 int mask = 0; 5816 switch (r->opc1) { 5817 case 0: case 1: case 2: 5818 /* min_EL EL1 */ 5819 mask = PL1_RW; 5820 break; 5821 case 3: 5822 /* min_EL EL0 */ 5823 mask = PL0_RW; 5824 break; 5825 case 4: 5826 /* min_EL EL2 */ 5827 mask = PL2_RW; 5828 break; 5829 case 5: 5830 /* unallocated encoding, so not possible */ 5831 assert(false); 5832 break; 5833 case 6: 5834 /* min_EL EL3 */ 5835 mask = PL3_RW; 5836 break; 5837 case 7: 5838 /* min_EL EL1, secure mode only (we don't check the latter) */ 5839 mask = PL1_RW; 5840 break; 5841 default: 5842 /* broken reginfo with out-of-range opc1 */ 5843 assert(false); 5844 break; 5845 } 5846 /* assert our permissions are not too lax (stricter is fine) */ 5847 assert((r->access & ~mask) == 0); 5848 } 5849 5850 /* Check that the register definition has enough info to handle 5851 * reads and writes if they are permitted. 5852 */ 5853 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { 5854 if (r->access & PL3_R) { 5855 assert((r->fieldoffset || 5856 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 5857 r->readfn); 5858 } 5859 if (r->access & PL3_W) { 5860 assert((r->fieldoffset || 5861 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 5862 r->writefn); 5863 } 5864 } 5865 /* Bad type field probably means missing sentinel at end of reg list */ 5866 assert(cptype_valid(r->type)); 5867 for (crm = crmmin; crm <= crmmax; crm++) { 5868 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 5869 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 5870 for (state = ARM_CP_STATE_AA32; 5871 state <= ARM_CP_STATE_AA64; state++) { 5872 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 5873 continue; 5874 } 5875 if (state == ARM_CP_STATE_AA32) { 5876 /* Under AArch32 CP registers can be common 5877 * (same for secure and non-secure world) or banked. 5878 */ 5879 char *name; 5880 5881 switch (r->secure) { 5882 case ARM_CP_SECSTATE_S: 5883 case ARM_CP_SECSTATE_NS: 5884 add_cpreg_to_hashtable(cpu, r, opaque, state, 5885 r->secure, crm, opc1, opc2, 5886 r->name); 5887 break; 5888 default: 5889 name = g_strdup_printf("%s_S", r->name); 5890 add_cpreg_to_hashtable(cpu, r, opaque, state, 5891 ARM_CP_SECSTATE_S, 5892 crm, opc1, opc2, name); 5893 g_free(name); 5894 add_cpreg_to_hashtable(cpu, r, opaque, state, 5895 ARM_CP_SECSTATE_NS, 5896 crm, opc1, opc2, r->name); 5897 break; 5898 } 5899 } else { 5900 /* AArch64 registers get mapped to non-secure instance 5901 * of AArch32 */ 5902 add_cpreg_to_hashtable(cpu, r, opaque, state, 5903 ARM_CP_SECSTATE_NS, 5904 crm, opc1, opc2, r->name); 5905 } 5906 } 5907 } 5908 } 5909 } 5910 } 5911 5912 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 5913 const ARMCPRegInfo *regs, void *opaque) 5914 { 5915 /* Define a whole list of registers */ 5916 const ARMCPRegInfo *r; 5917 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 5918 define_one_arm_cp_reg_with_opaque(cpu, r, opaque); 5919 } 5920 } 5921 5922 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 5923 { 5924 return g_hash_table_lookup(cpregs, &encoded_cp); 5925 } 5926 5927 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 5928 uint64_t value) 5929 { 5930 /* Helper coprocessor write function for write-ignore registers */ 5931 } 5932 5933 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 5934 { 5935 /* Helper coprocessor write function for read-as-zero registers */ 5936 return 0; 5937 } 5938 5939 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 5940 { 5941 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 5942 } 5943 5944 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 5945 { 5946 /* Return true if it is not valid for us to switch to 5947 * this CPU mode (ie all the UNPREDICTABLE cases in 5948 * the ARM ARM CPSRWriteByInstr pseudocode). 5949 */ 5950 5951 /* Changes to or from Hyp via MSR and CPS are illegal. */ 5952 if (write_type == CPSRWriteByInstr && 5953 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 5954 mode == ARM_CPU_MODE_HYP)) { 5955 return 1; 5956 } 5957 5958 switch (mode) { 5959 case ARM_CPU_MODE_USR: 5960 return 0; 5961 case ARM_CPU_MODE_SYS: 5962 case ARM_CPU_MODE_SVC: 5963 case ARM_CPU_MODE_ABT: 5964 case ARM_CPU_MODE_UND: 5965 case ARM_CPU_MODE_IRQ: 5966 case ARM_CPU_MODE_FIQ: 5967 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 5968 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 5969 */ 5970 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 5971 * and CPS are treated as illegal mode changes. 5972 */ 5973 if (write_type == CPSRWriteByInstr && 5974 (env->cp15.hcr_el2 & HCR_TGE) && 5975 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 5976 !arm_is_secure_below_el3(env)) { 5977 return 1; 5978 } 5979 return 0; 5980 case ARM_CPU_MODE_HYP: 5981 return !arm_feature(env, ARM_FEATURE_EL2) 5982 || arm_current_el(env) < 2 || arm_is_secure(env); 5983 case ARM_CPU_MODE_MON: 5984 return arm_current_el(env) < 3; 5985 default: 5986 return 1; 5987 } 5988 } 5989 5990 uint32_t cpsr_read(CPUARMState *env) 5991 { 5992 int ZF; 5993 ZF = (env->ZF == 0); 5994 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 5995 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 5996 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 5997 | ((env->condexec_bits & 0xfc) << 8) 5998 | (env->GE << 16) | (env->daif & CPSR_AIF); 5999 } 6000 6001 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 6002 CPSRWriteType write_type) 6003 { 6004 uint32_t changed_daif; 6005 6006 if (mask & CPSR_NZCV) { 6007 env->ZF = (~val) & CPSR_Z; 6008 env->NF = val; 6009 env->CF = (val >> 29) & 1; 6010 env->VF = (val << 3) & 0x80000000; 6011 } 6012 if (mask & CPSR_Q) 6013 env->QF = ((val & CPSR_Q) != 0); 6014 if (mask & CPSR_T) 6015 env->thumb = ((val & CPSR_T) != 0); 6016 if (mask & CPSR_IT_0_1) { 6017 env->condexec_bits &= ~3; 6018 env->condexec_bits |= (val >> 25) & 3; 6019 } 6020 if (mask & CPSR_IT_2_7) { 6021 env->condexec_bits &= 3; 6022 env->condexec_bits |= (val >> 8) & 0xfc; 6023 } 6024 if (mask & CPSR_GE) { 6025 env->GE = (val >> 16) & 0xf; 6026 } 6027 6028 /* In a V7 implementation that includes the security extensions but does 6029 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 6030 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 6031 * bits respectively. 6032 * 6033 * In a V8 implementation, it is permitted for privileged software to 6034 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 6035 */ 6036 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 6037 arm_feature(env, ARM_FEATURE_EL3) && 6038 !arm_feature(env, ARM_FEATURE_EL2) && 6039 !arm_is_secure(env)) { 6040 6041 changed_daif = (env->daif ^ val) & mask; 6042 6043 if (changed_daif & CPSR_A) { 6044 /* Check to see if we are allowed to change the masking of async 6045 * abort exceptions from a non-secure state. 6046 */ 6047 if (!(env->cp15.scr_el3 & SCR_AW)) { 6048 qemu_log_mask(LOG_GUEST_ERROR, 6049 "Ignoring attempt to switch CPSR_A flag from " 6050 "non-secure world with SCR.AW bit clear\n"); 6051 mask &= ~CPSR_A; 6052 } 6053 } 6054 6055 if (changed_daif & CPSR_F) { 6056 /* Check to see if we are allowed to change the masking of FIQ 6057 * exceptions from a non-secure state. 6058 */ 6059 if (!(env->cp15.scr_el3 & SCR_FW)) { 6060 qemu_log_mask(LOG_GUEST_ERROR, 6061 "Ignoring attempt to switch CPSR_F flag from " 6062 "non-secure world with SCR.FW bit clear\n"); 6063 mask &= ~CPSR_F; 6064 } 6065 6066 /* Check whether non-maskable FIQ (NMFI) support is enabled. 6067 * If this bit is set software is not allowed to mask 6068 * FIQs, but is allowed to set CPSR_F to 0. 6069 */ 6070 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 6071 (val & CPSR_F)) { 6072 qemu_log_mask(LOG_GUEST_ERROR, 6073 "Ignoring attempt to enable CPSR_F flag " 6074 "(non-maskable FIQ [NMFI] support enabled)\n"); 6075 mask &= ~CPSR_F; 6076 } 6077 } 6078 } 6079 6080 env->daif &= ~(CPSR_AIF & mask); 6081 env->daif |= val & CPSR_AIF & mask; 6082 6083 if (write_type != CPSRWriteRaw && 6084 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 6085 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 6086 /* Note that we can only get here in USR mode if this is a 6087 * gdb stub write; for this case we follow the architectural 6088 * behaviour for guest writes in USR mode of ignoring an attempt 6089 * to switch mode. (Those are caught by translate.c for writes 6090 * triggered by guest instructions.) 6091 */ 6092 mask &= ~CPSR_M; 6093 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 6094 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 6095 * v7, and has defined behaviour in v8: 6096 * + leave CPSR.M untouched 6097 * + allow changes to the other CPSR fields 6098 * + set PSTATE.IL 6099 * For user changes via the GDB stub, we don't set PSTATE.IL, 6100 * as this would be unnecessarily harsh for a user error. 6101 */ 6102 mask &= ~CPSR_M; 6103 if (write_type != CPSRWriteByGDBStub && 6104 arm_feature(env, ARM_FEATURE_V8)) { 6105 mask |= CPSR_IL; 6106 val |= CPSR_IL; 6107 } 6108 } else { 6109 switch_mode(env, val & CPSR_M); 6110 } 6111 } 6112 mask &= ~CACHED_CPSR_BITS; 6113 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 6114 } 6115 6116 /* Sign/zero extend */ 6117 uint32_t HELPER(sxtb16)(uint32_t x) 6118 { 6119 uint32_t res; 6120 res = (uint16_t)(int8_t)x; 6121 res |= (uint32_t)(int8_t)(x >> 16) << 16; 6122 return res; 6123 } 6124 6125 uint32_t HELPER(uxtb16)(uint32_t x) 6126 { 6127 uint32_t res; 6128 res = (uint16_t)(uint8_t)x; 6129 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 6130 return res; 6131 } 6132 6133 int32_t HELPER(sdiv)(int32_t num, int32_t den) 6134 { 6135 if (den == 0) 6136 return 0; 6137 if (num == INT_MIN && den == -1) 6138 return INT_MIN; 6139 return num / den; 6140 } 6141 6142 uint32_t HELPER(udiv)(uint32_t num, uint32_t den) 6143 { 6144 if (den == 0) 6145 return 0; 6146 return num / den; 6147 } 6148 6149 uint32_t HELPER(rbit)(uint32_t x) 6150 { 6151 return revbit32(x); 6152 } 6153 6154 #if defined(CONFIG_USER_ONLY) 6155 6156 /* These should probably raise undefined insn exceptions. */ 6157 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) 6158 { 6159 ARMCPU *cpu = arm_env_get_cpu(env); 6160 6161 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg); 6162 } 6163 6164 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 6165 { 6166 ARMCPU *cpu = arm_env_get_cpu(env); 6167 6168 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg); 6169 return 0; 6170 } 6171 6172 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) 6173 { 6174 /* translate.c should never generate calls here in user-only mode */ 6175 g_assert_not_reached(); 6176 } 6177 6178 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 6179 { 6180 /* translate.c should never generate calls here in user-only mode */ 6181 g_assert_not_reached(); 6182 } 6183 6184 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) 6185 { 6186 /* The TT instructions can be used by unprivileged code, but in 6187 * user-only emulation we don't have the MPU. 6188 * Luckily since we know we are NonSecure unprivileged (and that in 6189 * turn means that the A flag wasn't specified), all the bits in the 6190 * register must be zero: 6191 * IREGION: 0 because IRVALID is 0 6192 * IRVALID: 0 because NS 6193 * S: 0 because NS 6194 * NSRW: 0 because NS 6195 * NSR: 0 because NS 6196 * RW: 0 because unpriv and A flag not set 6197 * R: 0 because unpriv and A flag not set 6198 * SRVALID: 0 because NS 6199 * MRVALID: 0 because unpriv and A flag not set 6200 * SREGION: 0 becaus SRVALID is 0 6201 * MREGION: 0 because MRVALID is 0 6202 */ 6203 return 0; 6204 } 6205 6206 void switch_mode(CPUARMState *env, int mode) 6207 { 6208 ARMCPU *cpu = arm_env_get_cpu(env); 6209 6210 if (mode != ARM_CPU_MODE_USR) { 6211 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 6212 } 6213 } 6214 6215 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 6216 uint32_t cur_el, bool secure) 6217 { 6218 return 1; 6219 } 6220 6221 void aarch64_sync_64_to_32(CPUARMState *env) 6222 { 6223 g_assert_not_reached(); 6224 } 6225 6226 #else 6227 6228 void switch_mode(CPUARMState *env, int mode) 6229 { 6230 int old_mode; 6231 int i; 6232 6233 old_mode = env->uncached_cpsr & CPSR_M; 6234 if (mode == old_mode) 6235 return; 6236 6237 if (old_mode == ARM_CPU_MODE_FIQ) { 6238 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 6239 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 6240 } else if (mode == ARM_CPU_MODE_FIQ) { 6241 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 6242 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 6243 } 6244 6245 i = bank_number(old_mode); 6246 env->banked_r13[i] = env->regs[13]; 6247 env->banked_r14[i] = env->regs[14]; 6248 env->banked_spsr[i] = env->spsr; 6249 6250 i = bank_number(mode); 6251 env->regs[13] = env->banked_r13[i]; 6252 env->regs[14] = env->banked_r14[i]; 6253 env->spsr = env->banked_spsr[i]; 6254 } 6255 6256 /* Physical Interrupt Target EL Lookup Table 6257 * 6258 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 6259 * 6260 * The below multi-dimensional table is used for looking up the target 6261 * exception level given numerous condition criteria. Specifically, the 6262 * target EL is based on SCR and HCR routing controls as well as the 6263 * currently executing EL and secure state. 6264 * 6265 * Dimensions: 6266 * target_el_table[2][2][2][2][2][4] 6267 * | | | | | +--- Current EL 6268 * | | | | +------ Non-secure(0)/Secure(1) 6269 * | | | +--------- HCR mask override 6270 * | | +------------ SCR exec state control 6271 * | +--------------- SCR mask override 6272 * +------------------ 32-bit(0)/64-bit(1) EL3 6273 * 6274 * The table values are as such: 6275 * 0-3 = EL0-EL3 6276 * -1 = Cannot occur 6277 * 6278 * The ARM ARM target EL table includes entries indicating that an "exception 6279 * is not taken". The two cases where this is applicable are: 6280 * 1) An exception is taken from EL3 but the SCR does not have the exception 6281 * routed to EL3. 6282 * 2) An exception is taken from EL2 but the HCR does not have the exception 6283 * routed to EL2. 6284 * In these two cases, the below table contain a target of EL1. This value is 6285 * returned as it is expected that the consumer of the table data will check 6286 * for "target EL >= current EL" to ensure the exception is not taken. 6287 * 6288 * SCR HCR 6289 * 64 EA AMO From 6290 * BIT IRQ IMO Non-secure Secure 6291 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 6292 */ 6293 static const int8_t target_el_table[2][2][2][2][2][4] = { 6294 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 6295 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 6296 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 6297 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 6298 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 6299 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 6300 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 6301 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 6302 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 6303 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, 6304 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, 6305 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, 6306 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 6307 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 6308 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 6309 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, 6310 }; 6311 6312 /* 6313 * Determine the target EL for physical exceptions 6314 */ 6315 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 6316 uint32_t cur_el, bool secure) 6317 { 6318 CPUARMState *env = cs->env_ptr; 6319 int rw; 6320 int scr; 6321 int hcr; 6322 int target_el; 6323 /* Is the highest EL AArch64? */ 6324 int is64 = arm_feature(env, ARM_FEATURE_AARCH64); 6325 6326 if (arm_feature(env, ARM_FEATURE_EL3)) { 6327 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 6328 } else { 6329 /* Either EL2 is the highest EL (and so the EL2 register width 6330 * is given by is64); or there is no EL2 or EL3, in which case 6331 * the value of 'rw' does not affect the table lookup anyway. 6332 */ 6333 rw = is64; 6334 } 6335 6336 switch (excp_idx) { 6337 case EXCP_IRQ: 6338 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 6339 hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO); 6340 break; 6341 case EXCP_FIQ: 6342 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 6343 hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO); 6344 break; 6345 default: 6346 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 6347 hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO); 6348 break; 6349 }; 6350 6351 /* If HCR.TGE is set then HCR is treated as being 1 */ 6352 hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE); 6353 6354 /* Perform a table-lookup for the target EL given the current state */ 6355 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 6356 6357 assert(target_el > 0); 6358 6359 return target_el; 6360 } 6361 6362 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, 6363 ARMMMUIdx mmu_idx, bool ignfault) 6364 { 6365 CPUState *cs = CPU(cpu); 6366 CPUARMState *env = &cpu->env; 6367 MemTxAttrs attrs = {}; 6368 MemTxResult txres; 6369 target_ulong page_size; 6370 hwaddr physaddr; 6371 int prot; 6372 ARMMMUFaultInfo fi; 6373 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 6374 int exc; 6375 bool exc_secure; 6376 6377 if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr, 6378 &attrs, &prot, &page_size, &fi, NULL)) { 6379 /* MPU/SAU lookup failed */ 6380 if (fi.type == ARMFault_QEMU_SFault) { 6381 qemu_log_mask(CPU_LOG_INT, 6382 "...SecureFault with SFSR.AUVIOL during stacking\n"); 6383 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; 6384 env->v7m.sfar = addr; 6385 exc = ARMV7M_EXCP_SECURE; 6386 exc_secure = false; 6387 } else { 6388 qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n"); 6389 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK; 6390 exc = ARMV7M_EXCP_MEM; 6391 exc_secure = secure; 6392 } 6393 goto pend_fault; 6394 } 6395 address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value, 6396 attrs, &txres); 6397 if (txres != MEMTX_OK) { 6398 /* BusFault trying to write the data */ 6399 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n"); 6400 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK; 6401 exc = ARMV7M_EXCP_BUS; 6402 exc_secure = false; 6403 goto pend_fault; 6404 } 6405 return true; 6406 6407 pend_fault: 6408 /* By pending the exception at this point we are making 6409 * the IMPDEF choice "overridden exceptions pended" (see the 6410 * MergeExcInfo() pseudocode). The other choice would be to not 6411 * pend them now and then make a choice about which to throw away 6412 * later if we have two derived exceptions. 6413 * The only case when we must not pend the exception but instead 6414 * throw it away is if we are doing the push of the callee registers 6415 * and we've already generated a derived exception. Even in this 6416 * case we will still update the fault status registers. 6417 */ 6418 if (!ignfault) { 6419 armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure); 6420 } 6421 return false; 6422 } 6423 6424 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, 6425 ARMMMUIdx mmu_idx) 6426 { 6427 CPUState *cs = CPU(cpu); 6428 CPUARMState *env = &cpu->env; 6429 MemTxAttrs attrs = {}; 6430 MemTxResult txres; 6431 target_ulong page_size; 6432 hwaddr physaddr; 6433 int prot; 6434 ARMMMUFaultInfo fi; 6435 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 6436 int exc; 6437 bool exc_secure; 6438 uint32_t value; 6439 6440 if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr, 6441 &attrs, &prot, &page_size, &fi, NULL)) { 6442 /* MPU/SAU lookup failed */ 6443 if (fi.type == ARMFault_QEMU_SFault) { 6444 qemu_log_mask(CPU_LOG_INT, 6445 "...SecureFault with SFSR.AUVIOL during unstack\n"); 6446 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; 6447 env->v7m.sfar = addr; 6448 exc = ARMV7M_EXCP_SECURE; 6449 exc_secure = false; 6450 } else { 6451 qemu_log_mask(CPU_LOG_INT, 6452 "...MemManageFault with CFSR.MUNSTKERR\n"); 6453 env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK; 6454 exc = ARMV7M_EXCP_MEM; 6455 exc_secure = secure; 6456 } 6457 goto pend_fault; 6458 } 6459 6460 value = address_space_ldl(arm_addressspace(cs, attrs), physaddr, 6461 attrs, &txres); 6462 if (txres != MEMTX_OK) { 6463 /* BusFault trying to read the data */ 6464 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n"); 6465 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK; 6466 exc = ARMV7M_EXCP_BUS; 6467 exc_secure = false; 6468 goto pend_fault; 6469 } 6470 6471 *dest = value; 6472 return true; 6473 6474 pend_fault: 6475 /* By pending the exception at this point we are making 6476 * the IMPDEF choice "overridden exceptions pended" (see the 6477 * MergeExcInfo() pseudocode). The other choice would be to not 6478 * pend them now and then make a choice about which to throw away 6479 * later if we have two derived exceptions. 6480 */ 6481 armv7m_nvic_set_pending(env->nvic, exc, exc_secure); 6482 return false; 6483 } 6484 6485 /* Return true if we're using the process stack pointer (not the MSP) */ 6486 static bool v7m_using_psp(CPUARMState *env) 6487 { 6488 /* Handler mode always uses the main stack; for thread mode 6489 * the CONTROL.SPSEL bit determines the answer. 6490 * Note that in v7M it is not possible to be in Handler mode with 6491 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 6492 */ 6493 return !arm_v7m_is_handler_mode(env) && 6494 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 6495 } 6496 6497 /* Write to v7M CONTROL.SPSEL bit for the specified security bank. 6498 * This may change the current stack pointer between Main and Process 6499 * stack pointers if it is done for the CONTROL register for the current 6500 * security state. 6501 */ 6502 static void write_v7m_control_spsel_for_secstate(CPUARMState *env, 6503 bool new_spsel, 6504 bool secstate) 6505 { 6506 bool old_is_psp = v7m_using_psp(env); 6507 6508 env->v7m.control[secstate] = 6509 deposit32(env->v7m.control[secstate], 6510 R_V7M_CONTROL_SPSEL_SHIFT, 6511 R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); 6512 6513 if (secstate == env->v7m.secure) { 6514 bool new_is_psp = v7m_using_psp(env); 6515 uint32_t tmp; 6516 6517 if (old_is_psp != new_is_psp) { 6518 tmp = env->v7m.other_sp; 6519 env->v7m.other_sp = env->regs[13]; 6520 env->regs[13] = tmp; 6521 } 6522 } 6523 } 6524 6525 /* Write to v7M CONTROL.SPSEL bit. This may change the current 6526 * stack pointer between Main and Process stack pointers. 6527 */ 6528 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) 6529 { 6530 write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure); 6531 } 6532 6533 void write_v7m_exception(CPUARMState *env, uint32_t new_exc) 6534 { 6535 /* Write a new value to v7m.exception, thus transitioning into or out 6536 * of Handler mode; this may result in a change of active stack pointer. 6537 */ 6538 bool new_is_psp, old_is_psp = v7m_using_psp(env); 6539 uint32_t tmp; 6540 6541 env->v7m.exception = new_exc; 6542 6543 new_is_psp = v7m_using_psp(env); 6544 6545 if (old_is_psp != new_is_psp) { 6546 tmp = env->v7m.other_sp; 6547 env->v7m.other_sp = env->regs[13]; 6548 env->regs[13] = tmp; 6549 } 6550 } 6551 6552 /* Switch M profile security state between NS and S */ 6553 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate) 6554 { 6555 uint32_t new_ss_msp, new_ss_psp; 6556 6557 if (env->v7m.secure == new_secstate) { 6558 return; 6559 } 6560 6561 /* All the banked state is accessed by looking at env->v7m.secure 6562 * except for the stack pointer; rearrange the SP appropriately. 6563 */ 6564 new_ss_msp = env->v7m.other_ss_msp; 6565 new_ss_psp = env->v7m.other_ss_psp; 6566 6567 if (v7m_using_psp(env)) { 6568 env->v7m.other_ss_psp = env->regs[13]; 6569 env->v7m.other_ss_msp = env->v7m.other_sp; 6570 } else { 6571 env->v7m.other_ss_msp = env->regs[13]; 6572 env->v7m.other_ss_psp = env->v7m.other_sp; 6573 } 6574 6575 env->v7m.secure = new_secstate; 6576 6577 if (v7m_using_psp(env)) { 6578 env->regs[13] = new_ss_psp; 6579 env->v7m.other_sp = new_ss_msp; 6580 } else { 6581 env->regs[13] = new_ss_msp; 6582 env->v7m.other_sp = new_ss_psp; 6583 } 6584 } 6585 6586 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) 6587 { 6588 /* Handle v7M BXNS: 6589 * - if the return value is a magic value, do exception return (like BX) 6590 * - otherwise bit 0 of the return value is the target security state 6591 */ 6592 uint32_t min_magic; 6593 6594 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6595 /* Covers FNC_RETURN and EXC_RETURN magic */ 6596 min_magic = FNC_RETURN_MIN_MAGIC; 6597 } else { 6598 /* EXC_RETURN magic only */ 6599 min_magic = EXC_RETURN_MIN_MAGIC; 6600 } 6601 6602 if (dest >= min_magic) { 6603 /* This is an exception return magic value; put it where 6604 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT. 6605 * Note that if we ever add gen_ss_advance() singlestep support to 6606 * M profile this should count as an "instruction execution complete" 6607 * event (compare gen_bx_excret_final_code()). 6608 */ 6609 env->regs[15] = dest & ~1; 6610 env->thumb = dest & 1; 6611 HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT); 6612 /* notreached */ 6613 } 6614 6615 /* translate.c should have made BXNS UNDEF unless we're secure */ 6616 assert(env->v7m.secure); 6617 6618 switch_v7m_security_state(env, dest & 1); 6619 env->thumb = 1; 6620 env->regs[15] = dest & ~1; 6621 } 6622 6623 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 6624 { 6625 /* Handle v7M BLXNS: 6626 * - bit 0 of the destination address is the target security state 6627 */ 6628 6629 /* At this point regs[15] is the address just after the BLXNS */ 6630 uint32_t nextinst = env->regs[15] | 1; 6631 uint32_t sp = env->regs[13] - 8; 6632 uint32_t saved_psr; 6633 6634 /* translate.c will have made BLXNS UNDEF unless we're secure */ 6635 assert(env->v7m.secure); 6636 6637 if (dest & 1) { 6638 /* target is Secure, so this is just a normal BLX, 6639 * except that the low bit doesn't indicate Thumb/not. 6640 */ 6641 env->regs[14] = nextinst; 6642 env->thumb = 1; 6643 env->regs[15] = dest & ~1; 6644 return; 6645 } 6646 6647 /* Target is non-secure: first push a stack frame */ 6648 if (!QEMU_IS_ALIGNED(sp, 8)) { 6649 qemu_log_mask(LOG_GUEST_ERROR, 6650 "BLXNS with misaligned SP is UNPREDICTABLE\n"); 6651 } 6652 6653 saved_psr = env->v7m.exception; 6654 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) { 6655 saved_psr |= XPSR_SFPA; 6656 } 6657 6658 /* Note that these stores can throw exceptions on MPU faults */ 6659 cpu_stl_data(env, sp, nextinst); 6660 cpu_stl_data(env, sp + 4, saved_psr); 6661 6662 env->regs[13] = sp; 6663 env->regs[14] = 0xfeffffff; 6664 if (arm_v7m_is_handler_mode(env)) { 6665 /* Write a dummy value to IPSR, to avoid leaking the current secure 6666 * exception number to non-secure code. This is guaranteed not 6667 * to cause write_v7m_exception() to actually change stacks. 6668 */ 6669 write_v7m_exception(env, 1); 6670 } 6671 switch_v7m_security_state(env, 0); 6672 env->thumb = 1; 6673 env->regs[15] = dest; 6674 } 6675 6676 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, 6677 bool spsel) 6678 { 6679 /* Return a pointer to the location where we currently store the 6680 * stack pointer for the requested security state and thread mode. 6681 * This pointer will become invalid if the CPU state is updated 6682 * such that the stack pointers are switched around (eg changing 6683 * the SPSEL control bit). 6684 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). 6685 * Unlike that pseudocode, we require the caller to pass us in the 6686 * SPSEL control bit value; this is because we also use this 6687 * function in handling of pushing of the callee-saves registers 6688 * part of the v8M stack frame (pseudocode PushCalleeStack()), 6689 * and in the tailchain codepath the SPSEL bit comes from the exception 6690 * return magic LR value from the previous exception. The pseudocode 6691 * opencodes the stack-selection in PushCalleeStack(), but we prefer 6692 * to make this utility function generic enough to do the job. 6693 */ 6694 bool want_psp = threadmode && spsel; 6695 6696 if (secure == env->v7m.secure) { 6697 if (want_psp == v7m_using_psp(env)) { 6698 return &env->regs[13]; 6699 } else { 6700 return &env->v7m.other_sp; 6701 } 6702 } else { 6703 if (want_psp) { 6704 return &env->v7m.other_ss_psp; 6705 } else { 6706 return &env->v7m.other_ss_msp; 6707 } 6708 } 6709 } 6710 6711 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, 6712 uint32_t *pvec) 6713 { 6714 CPUState *cs = CPU(cpu); 6715 CPUARMState *env = &cpu->env; 6716 MemTxResult result; 6717 uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4; 6718 uint32_t vector_entry; 6719 MemTxAttrs attrs = {}; 6720 ARMMMUIdx mmu_idx; 6721 bool exc_secure; 6722 6723 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true); 6724 6725 /* We don't do a get_phys_addr() here because the rules for vector 6726 * loads are special: they always use the default memory map, and 6727 * the default memory map permits reads from all addresses. 6728 * Since there's no easy way to pass through to pmsav8_mpu_lookup() 6729 * that we want this special case which would always say "yes", 6730 * we just do the SAU lookup here followed by a direct physical load. 6731 */ 6732 attrs.secure = targets_secure; 6733 attrs.user = false; 6734 6735 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6736 V8M_SAttributes sattrs = {}; 6737 6738 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); 6739 if (sattrs.ns) { 6740 attrs.secure = false; 6741 } else if (!targets_secure) { 6742 /* NS access to S memory */ 6743 goto load_fail; 6744 } 6745 } 6746 6747 vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr, 6748 attrs, &result); 6749 if (result != MEMTX_OK) { 6750 goto load_fail; 6751 } 6752 *pvec = vector_entry; 6753 return true; 6754 6755 load_fail: 6756 /* All vector table fetch fails are reported as HardFault, with 6757 * HFSR.VECTTBL and .FORCED set. (FORCED is set because 6758 * technically the underlying exception is a MemManage or BusFault 6759 * that is escalated to HardFault.) This is a terminal exception, 6760 * so we will either take the HardFault immediately or else enter 6761 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()). 6762 */ 6763 exc_secure = targets_secure || 6764 !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); 6765 env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK; 6766 armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure); 6767 return false; 6768 } 6769 6770 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, 6771 bool ignore_faults) 6772 { 6773 /* For v8M, push the callee-saves register part of the stack frame. 6774 * Compare the v8M pseudocode PushCalleeStack(). 6775 * In the tailchaining case this may not be the current stack. 6776 */ 6777 CPUARMState *env = &cpu->env; 6778 uint32_t *frame_sp_p; 6779 uint32_t frameptr; 6780 ARMMMUIdx mmu_idx; 6781 bool stacked_ok; 6782 6783 if (dotailchain) { 6784 bool mode = lr & R_V7M_EXCRET_MODE_MASK; 6785 bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) || 6786 !mode; 6787 6788 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); 6789 frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, 6790 lr & R_V7M_EXCRET_SPSEL_MASK); 6791 } else { 6792 mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 6793 frame_sp_p = &env->regs[13]; 6794 } 6795 6796 frameptr = *frame_sp_p - 0x28; 6797 6798 /* Write as much of the stack frame as we can. A write failure may 6799 * cause us to pend a derived exception. 6800 */ 6801 stacked_ok = 6802 v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) && 6803 v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, 6804 ignore_faults) && 6805 v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, 6806 ignore_faults) && 6807 v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, 6808 ignore_faults) && 6809 v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, 6810 ignore_faults) && 6811 v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, 6812 ignore_faults) && 6813 v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, 6814 ignore_faults) && 6815 v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, 6816 ignore_faults) && 6817 v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, 6818 ignore_faults); 6819 6820 /* Update SP regardless of whether any of the stack accesses failed. 6821 * When we implement v8M stack limit checking then this attempt to 6822 * update SP might also fail and result in a derived exception. 6823 */ 6824 *frame_sp_p = frameptr; 6825 6826 return !stacked_ok; 6827 } 6828 6829 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, 6830 bool ignore_stackfaults) 6831 { 6832 /* Do the "take the exception" parts of exception entry, 6833 * but not the pushing of state to the stack. This is 6834 * similar to the pseudocode ExceptionTaken() function. 6835 */ 6836 CPUARMState *env = &cpu->env; 6837 uint32_t addr; 6838 bool targets_secure; 6839 int exc; 6840 bool push_failed = false; 6841 6842 armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); 6843 6844 if (arm_feature(env, ARM_FEATURE_V8)) { 6845 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 6846 (lr & R_V7M_EXCRET_S_MASK)) { 6847 /* The background code (the owner of the registers in the 6848 * exception frame) is Secure. This means it may either already 6849 * have or now needs to push callee-saves registers. 6850 */ 6851 if (targets_secure) { 6852 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) { 6853 /* We took an exception from Secure to NonSecure 6854 * (which means the callee-saved registers got stacked) 6855 * and are now tailchaining to a Secure exception. 6856 * Clear DCRS so eventual return from this Secure 6857 * exception unstacks the callee-saved registers. 6858 */ 6859 lr &= ~R_V7M_EXCRET_DCRS_MASK; 6860 } 6861 } else { 6862 /* We're going to a non-secure exception; push the 6863 * callee-saves registers to the stack now, if they're 6864 * not already saved. 6865 */ 6866 if (lr & R_V7M_EXCRET_DCRS_MASK && 6867 !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) { 6868 push_failed = v7m_push_callee_stack(cpu, lr, dotailchain, 6869 ignore_stackfaults); 6870 } 6871 lr |= R_V7M_EXCRET_DCRS_MASK; 6872 } 6873 } 6874 6875 lr &= ~R_V7M_EXCRET_ES_MASK; 6876 if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6877 lr |= R_V7M_EXCRET_ES_MASK; 6878 } 6879 lr &= ~R_V7M_EXCRET_SPSEL_MASK; 6880 if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) { 6881 lr |= R_V7M_EXCRET_SPSEL_MASK; 6882 } 6883 6884 /* Clear registers if necessary to prevent non-secure exception 6885 * code being able to see register values from secure code. 6886 * Where register values become architecturally UNKNOWN we leave 6887 * them with their previous values. 6888 */ 6889 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6890 if (!targets_secure) { 6891 /* Always clear the caller-saved registers (they have been 6892 * pushed to the stack earlier in v7m_push_stack()). 6893 * Clear callee-saved registers if the background code is 6894 * Secure (in which case these regs were saved in 6895 * v7m_push_callee_stack()). 6896 */ 6897 int i; 6898 6899 for (i = 0; i < 13; i++) { 6900 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */ 6901 if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) { 6902 env->regs[i] = 0; 6903 } 6904 } 6905 /* Clear EAPSR */ 6906 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT); 6907 } 6908 } 6909 } 6910 6911 if (push_failed && !ignore_stackfaults) { 6912 /* Derived exception on callee-saves register stacking: 6913 * we might now want to take a different exception which 6914 * targets a different security state, so try again from the top. 6915 */ 6916 v7m_exception_taken(cpu, lr, true, true); 6917 return; 6918 } 6919 6920 if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) { 6921 /* Vector load failed: derived exception */ 6922 v7m_exception_taken(cpu, lr, true, true); 6923 return; 6924 } 6925 6926 /* Now we've done everything that might cause a derived exception 6927 * we can go ahead and activate whichever exception we're going to 6928 * take (which might now be the derived exception). 6929 */ 6930 armv7m_nvic_acknowledge_irq(env->nvic); 6931 6932 /* Switch to target security state -- must do this before writing SPSEL */ 6933 switch_v7m_security_state(env, targets_secure); 6934 write_v7m_control_spsel(env, 0); 6935 arm_clear_exclusive(env); 6936 /* Clear IT bits */ 6937 env->condexec_bits = 0; 6938 env->regs[14] = lr; 6939 env->regs[15] = addr & 0xfffffffe; 6940 env->thumb = addr & 1; 6941 } 6942 6943 static bool v7m_push_stack(ARMCPU *cpu) 6944 { 6945 /* Do the "set up stack frame" part of exception entry, 6946 * similar to pseudocode PushStack(). 6947 * Return true if we generate a derived exception (and so 6948 * should ignore further stack faults trying to process 6949 * that derived exception.) 6950 */ 6951 bool stacked_ok; 6952 CPUARMState *env = &cpu->env; 6953 uint32_t xpsr = xpsr_read(env); 6954 uint32_t frameptr = env->regs[13]; 6955 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 6956 6957 /* Align stack pointer if the guest wants that */ 6958 if ((frameptr & 4) && 6959 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) { 6960 frameptr -= 4; 6961 xpsr |= XPSR_SPREALIGN; 6962 } 6963 6964 frameptr -= 0x20; 6965 6966 /* Write as much of the stack frame as we can. If we fail a stack 6967 * write this will result in a derived exception being pended 6968 * (which may be taken in preference to the one we started with 6969 * if it has higher priority). 6970 */ 6971 stacked_ok = 6972 v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) && 6973 v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) && 6974 v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) && 6975 v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) && 6976 v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) && 6977 v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) && 6978 v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) && 6979 v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false); 6980 6981 /* Update SP regardless of whether any of the stack accesses failed. 6982 * When we implement v8M stack limit checking then this attempt to 6983 * update SP might also fail and result in a derived exception. 6984 */ 6985 env->regs[13] = frameptr; 6986 6987 return !stacked_ok; 6988 } 6989 6990 static void do_v7m_exception_exit(ARMCPU *cpu) 6991 { 6992 CPUARMState *env = &cpu->env; 6993 uint32_t excret; 6994 uint32_t xpsr; 6995 bool ufault = false; 6996 bool sfault = false; 6997 bool return_to_sp_process; 6998 bool return_to_handler; 6999 bool rettobase = false; 7000 bool exc_secure = false; 7001 bool return_to_secure; 7002 7003 /* If we're not in Handler mode then jumps to magic exception-exit 7004 * addresses don't have magic behaviour. However for the v8M 7005 * security extensions the magic secure-function-return has to 7006 * work in thread mode too, so to avoid doing an extra check in 7007 * the generated code we allow exception-exit magic to also cause the 7008 * internal exception and bring us here in thread mode. Correct code 7009 * will never try to do this (the following insn fetch will always 7010 * fault) so we the overhead of having taken an unnecessary exception 7011 * doesn't matter. 7012 */ 7013 if (!arm_v7m_is_handler_mode(env)) { 7014 return; 7015 } 7016 7017 /* In the spec pseudocode ExceptionReturn() is called directly 7018 * from BXWritePC() and gets the full target PC value including 7019 * bit zero. In QEMU's implementation we treat it as a normal 7020 * jump-to-register (which is then caught later on), and so split 7021 * the target value up between env->regs[15] and env->thumb in 7022 * gen_bx(). Reconstitute it. 7023 */ 7024 excret = env->regs[15]; 7025 if (env->thumb) { 7026 excret |= 1; 7027 } 7028 7029 qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32 7030 " previous exception %d\n", 7031 excret, env->v7m.exception); 7032 7033 if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) { 7034 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception " 7035 "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", 7036 excret); 7037 } 7038 7039 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7040 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before 7041 * we pick which FAULTMASK to clear. 7042 */ 7043 if (!env->v7m.secure && 7044 ((excret & R_V7M_EXCRET_ES_MASK) || 7045 !(excret & R_V7M_EXCRET_DCRS_MASK))) { 7046 sfault = 1; 7047 /* For all other purposes, treat ES as 0 (R_HXSR) */ 7048 excret &= ~R_V7M_EXCRET_ES_MASK; 7049 } 7050 } 7051 7052 if (env->v7m.exception != ARMV7M_EXCP_NMI) { 7053 /* Auto-clear FAULTMASK on return from other than NMI. 7054 * If the security extension is implemented then this only 7055 * happens if the raw execution priority is >= 0; the 7056 * value of the ES bit in the exception return value indicates 7057 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.) 7058 */ 7059 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7060 exc_secure = excret & R_V7M_EXCRET_ES_MASK; 7061 if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) { 7062 env->v7m.faultmask[exc_secure] = 0; 7063 } 7064 } else { 7065 env->v7m.faultmask[M_REG_NS] = 0; 7066 } 7067 } 7068 7069 switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception, 7070 exc_secure)) { 7071 case -1: 7072 /* attempt to exit an exception that isn't active */ 7073 ufault = true; 7074 break; 7075 case 0: 7076 /* still an irq active now */ 7077 break; 7078 case 1: 7079 /* we returned to base exception level, no nesting. 7080 * (In the pseudocode this is written using "NestedActivation != 1" 7081 * where we have 'rettobase == false'.) 7082 */ 7083 rettobase = true; 7084 break; 7085 default: 7086 g_assert_not_reached(); 7087 } 7088 7089 return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK); 7090 return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK; 7091 return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && 7092 (excret & R_V7M_EXCRET_S_MASK); 7093 7094 if (arm_feature(env, ARM_FEATURE_V8)) { 7095 if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7096 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP); 7097 * we choose to take the UsageFault. 7098 */ 7099 if ((excret & R_V7M_EXCRET_S_MASK) || 7100 (excret & R_V7M_EXCRET_ES_MASK) || 7101 !(excret & R_V7M_EXCRET_DCRS_MASK)) { 7102 ufault = true; 7103 } 7104 } 7105 if (excret & R_V7M_EXCRET_RES0_MASK) { 7106 ufault = true; 7107 } 7108 } else { 7109 /* For v7M we only recognize certain combinations of the low bits */ 7110 switch (excret & 0xf) { 7111 case 1: /* Return to Handler */ 7112 break; 7113 case 13: /* Return to Thread using Process stack */ 7114 case 9: /* Return to Thread using Main stack */ 7115 /* We only need to check NONBASETHRDENA for v7M, because in 7116 * v8M this bit does not exist (it is RES1). 7117 */ 7118 if (!rettobase && 7119 !(env->v7m.ccr[env->v7m.secure] & 7120 R_V7M_CCR_NONBASETHRDENA_MASK)) { 7121 ufault = true; 7122 } 7123 break; 7124 default: 7125 ufault = true; 7126 } 7127 } 7128 7129 if (sfault) { 7130 env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; 7131 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7132 v7m_exception_taken(cpu, excret, true, false); 7133 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " 7134 "stackframe: failed EXC_RETURN.ES validity check\n"); 7135 return; 7136 } 7137 7138 if (ufault) { 7139 /* Bad exception return: instead of popping the exception 7140 * stack, directly take a usage fault on the current stack. 7141 */ 7142 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7143 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7144 v7m_exception_taken(cpu, excret, true, false); 7145 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " 7146 "stackframe: failed exception return integrity check\n"); 7147 return; 7148 } 7149 7150 /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in 7151 * Handler mode (and will be until we write the new XPSR.Interrupt 7152 * field) this does not switch around the current stack pointer. 7153 */ 7154 write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure); 7155 7156 switch_v7m_security_state(env, return_to_secure); 7157 7158 { 7159 /* The stack pointer we should be reading the exception frame from 7160 * depends on bits in the magic exception return type value (and 7161 * for v8M isn't necessarily the stack pointer we will eventually 7162 * end up resuming execution with). Get a pointer to the location 7163 * in the CPU state struct where the SP we need is currently being 7164 * stored; we will use and modify it in place. 7165 * We use this limited C variable scope so we don't accidentally 7166 * use 'frame_sp_p' after we do something that makes it invalid. 7167 */ 7168 uint32_t *frame_sp_p = get_v7m_sp_ptr(env, 7169 return_to_secure, 7170 !return_to_handler, 7171 return_to_sp_process); 7172 uint32_t frameptr = *frame_sp_p; 7173 bool pop_ok = true; 7174 ARMMMUIdx mmu_idx; 7175 bool return_to_priv = return_to_handler || 7176 !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK); 7177 7178 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure, 7179 return_to_priv); 7180 7181 if (!QEMU_IS_ALIGNED(frameptr, 8) && 7182 arm_feature(env, ARM_FEATURE_V8)) { 7183 qemu_log_mask(LOG_GUEST_ERROR, 7184 "M profile exception return with non-8-aligned SP " 7185 "for destination state is UNPREDICTABLE\n"); 7186 } 7187 7188 /* Do we need to pop callee-saved registers? */ 7189 if (return_to_secure && 7190 ((excret & R_V7M_EXCRET_ES_MASK) == 0 || 7191 (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) { 7192 uint32_t expected_sig = 0xfefa125b; 7193 uint32_t actual_sig; 7194 7195 pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx); 7196 7197 if (pop_ok && expected_sig != actual_sig) { 7198 /* Take a SecureFault on the current stack */ 7199 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; 7200 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7201 v7m_exception_taken(cpu, excret, true, false); 7202 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " 7203 "stackframe: failed exception return integrity " 7204 "signature check\n"); 7205 return; 7206 } 7207 7208 pop_ok = pop_ok && 7209 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && 7210 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && 7211 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) && 7212 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) && 7213 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) && 7214 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) && 7215 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) && 7216 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) && 7217 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx); 7218 7219 frameptr += 0x28; 7220 } 7221 7222 /* Pop registers */ 7223 pop_ok = pop_ok && 7224 v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) && 7225 v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) && 7226 v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) && 7227 v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) && 7228 v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) && 7229 v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) && 7230 v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) && 7231 v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx); 7232 7233 if (!pop_ok) { 7234 /* v7m_stack_read() pended a fault, so take it (as a tail 7235 * chained exception on the same stack frame) 7236 */ 7237 v7m_exception_taken(cpu, excret, true, false); 7238 return; 7239 } 7240 7241 /* Returning from an exception with a PC with bit 0 set is defined 7242 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified 7243 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore 7244 * the lsbit, and there are several RTOSes out there which incorrectly 7245 * assume the r15 in the stack frame should be a Thumb-style "lsbit 7246 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but 7247 * complain about the badly behaved guest. 7248 */ 7249 if (env->regs[15] & 1) { 7250 env->regs[15] &= ~1U; 7251 if (!arm_feature(env, ARM_FEATURE_V8)) { 7252 qemu_log_mask(LOG_GUEST_ERROR, 7253 "M profile return from interrupt with misaligned " 7254 "PC is UNPREDICTABLE on v7M\n"); 7255 } 7256 } 7257 7258 if (arm_feature(env, ARM_FEATURE_V8)) { 7259 /* For v8M we have to check whether the xPSR exception field 7260 * matches the EXCRET value for return to handler/thread 7261 * before we commit to changing the SP and xPSR. 7262 */ 7263 bool will_be_handler = (xpsr & XPSR_EXCP) != 0; 7264 if (return_to_handler != will_be_handler) { 7265 /* Take an INVPC UsageFault on the current stack. 7266 * By this point we will have switched to the security state 7267 * for the background state, so this UsageFault will target 7268 * that state. 7269 */ 7270 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 7271 env->v7m.secure); 7272 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7273 v7m_exception_taken(cpu, excret, true, false); 7274 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " 7275 "stackframe: failed exception return integrity " 7276 "check\n"); 7277 return; 7278 } 7279 } 7280 7281 /* Commit to consuming the stack frame */ 7282 frameptr += 0x20; 7283 /* Undo stack alignment (the SPREALIGN bit indicates that the original 7284 * pre-exception SP was not 8-aligned and we added a padding word to 7285 * align it, so we undo this by ORing in the bit that increases it 7286 * from the current 8-aligned value to the 8-unaligned value. (Adding 4 7287 * would work too but a logical OR is how the pseudocode specifies it.) 7288 */ 7289 if (xpsr & XPSR_SPREALIGN) { 7290 frameptr |= 4; 7291 } 7292 *frame_sp_p = frameptr; 7293 } 7294 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */ 7295 xpsr_write(env, xpsr, ~XPSR_SPREALIGN); 7296 7297 /* The restored xPSR exception field will be zero if we're 7298 * resuming in Thread mode. If that doesn't match what the 7299 * exception return excret specified then this is a UsageFault. 7300 * v7M requires we make this check here; v8M did it earlier. 7301 */ 7302 if (return_to_handler != arm_v7m_is_handler_mode(env)) { 7303 /* Take an INVPC UsageFault by pushing the stack again; 7304 * we know we're v7M so this is never a Secure UsageFault. 7305 */ 7306 bool ignore_stackfaults; 7307 7308 assert(!arm_feature(env, ARM_FEATURE_V8)); 7309 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); 7310 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7311 ignore_stackfaults = v7m_push_stack(cpu); 7312 v7m_exception_taken(cpu, excret, false, ignore_stackfaults); 7313 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: " 7314 "failed exception return integrity check\n"); 7315 return; 7316 } 7317 7318 /* Otherwise, we have a successful exception exit. */ 7319 arm_clear_exclusive(env); 7320 qemu_log_mask(CPU_LOG_INT, "...successful exception return\n"); 7321 } 7322 7323 static bool do_v7m_function_return(ARMCPU *cpu) 7324 { 7325 /* v8M security extensions magic function return. 7326 * We may either: 7327 * (1) throw an exception (longjump) 7328 * (2) return true if we successfully handled the function return 7329 * (3) return false if we failed a consistency check and have 7330 * pended a UsageFault that needs to be taken now 7331 * 7332 * At this point the magic return value is split between env->regs[15] 7333 * and env->thumb. We don't bother to reconstitute it because we don't 7334 * need it (all values are handled the same way). 7335 */ 7336 CPUARMState *env = &cpu->env; 7337 uint32_t newpc, newpsr, newpsr_exc; 7338 7339 qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n"); 7340 7341 { 7342 bool threadmode, spsel; 7343 TCGMemOpIdx oi; 7344 ARMMMUIdx mmu_idx; 7345 uint32_t *frame_sp_p; 7346 uint32_t frameptr; 7347 7348 /* Pull the return address and IPSR from the Secure stack */ 7349 threadmode = !arm_v7m_is_handler_mode(env); 7350 spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK; 7351 7352 frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); 7353 frameptr = *frame_sp_p; 7354 7355 /* These loads may throw an exception (for MPU faults). We want to 7356 * do them as secure, so work out what MMU index that is. 7357 */ 7358 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 7359 oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); 7360 newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); 7361 newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); 7362 7363 /* Consistency checks on new IPSR */ 7364 newpsr_exc = newpsr & XPSR_EXCP; 7365 if (!((env->v7m.exception == 0 && newpsr_exc == 0) || 7366 (env->v7m.exception == 1 && newpsr_exc != 0))) { 7367 /* Pend the fault and tell our caller to take it */ 7368 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7369 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 7370 env->v7m.secure); 7371 qemu_log_mask(CPU_LOG_INT, 7372 "...taking INVPC UsageFault: " 7373 "IPSR consistency check failed\n"); 7374 return false; 7375 } 7376 7377 *frame_sp_p = frameptr + 8; 7378 } 7379 7380 /* This invalidates frame_sp_p */ 7381 switch_v7m_security_state(env, true); 7382 env->v7m.exception = newpsr_exc; 7383 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; 7384 if (newpsr & XPSR_SFPA) { 7385 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK; 7386 } 7387 xpsr_write(env, 0, XPSR_IT); 7388 env->thumb = newpc & 1; 7389 env->regs[15] = newpc & ~1; 7390 7391 qemu_log_mask(CPU_LOG_INT, "...function return successful\n"); 7392 return true; 7393 } 7394 7395 static void arm_log_exception(int idx) 7396 { 7397 if (qemu_loglevel_mask(CPU_LOG_INT)) { 7398 const char *exc = NULL; 7399 static const char * const excnames[] = { 7400 [EXCP_UDEF] = "Undefined Instruction", 7401 [EXCP_SWI] = "SVC", 7402 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 7403 [EXCP_DATA_ABORT] = "Data Abort", 7404 [EXCP_IRQ] = "IRQ", 7405 [EXCP_FIQ] = "FIQ", 7406 [EXCP_BKPT] = "Breakpoint", 7407 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 7408 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 7409 [EXCP_HVC] = "Hypervisor Call", 7410 [EXCP_HYP_TRAP] = "Hypervisor Trap", 7411 [EXCP_SMC] = "Secure Monitor Call", 7412 [EXCP_VIRQ] = "Virtual IRQ", 7413 [EXCP_VFIQ] = "Virtual FIQ", 7414 [EXCP_SEMIHOST] = "Semihosting call", 7415 [EXCP_NOCP] = "v7M NOCP UsageFault", 7416 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 7417 }; 7418 7419 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 7420 exc = excnames[idx]; 7421 } 7422 if (!exc) { 7423 exc = "unknown"; 7424 } 7425 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); 7426 } 7427 } 7428 7429 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, 7430 uint32_t addr, uint16_t *insn) 7431 { 7432 /* Load a 16-bit portion of a v7M instruction, returning true on success, 7433 * or false on failure (in which case we will have pended the appropriate 7434 * exception). 7435 * We need to do the instruction fetch's MPU and SAU checks 7436 * like this because there is no MMU index that would allow 7437 * doing the load with a single function call. Instead we must 7438 * first check that the security attributes permit the load 7439 * and that they don't mismatch on the two halves of the instruction, 7440 * and then we do the load as a secure load (ie using the security 7441 * attributes of the address, not the CPU, as architecturally required). 7442 */ 7443 CPUState *cs = CPU(cpu); 7444 CPUARMState *env = &cpu->env; 7445 V8M_SAttributes sattrs = {}; 7446 MemTxAttrs attrs = {}; 7447 ARMMMUFaultInfo fi = {}; 7448 MemTxResult txres; 7449 target_ulong page_size; 7450 hwaddr physaddr; 7451 int prot; 7452 7453 v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); 7454 if (!sattrs.nsc || sattrs.ns) { 7455 /* This must be the second half of the insn, and it straddles a 7456 * region boundary with the second half not being S&NSC. 7457 */ 7458 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7459 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7460 qemu_log_mask(CPU_LOG_INT, 7461 "...really SecureFault with SFSR.INVEP\n"); 7462 return false; 7463 } 7464 if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, 7465 &physaddr, &attrs, &prot, &page_size, &fi, NULL)) { 7466 /* the MPU lookup failed */ 7467 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 7468 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); 7469 qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n"); 7470 return false; 7471 } 7472 *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr, 7473 attrs, &txres); 7474 if (txres != MEMTX_OK) { 7475 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 7476 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 7477 qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n"); 7478 return false; 7479 } 7480 return true; 7481 } 7482 7483 static bool v7m_handle_execute_nsc(ARMCPU *cpu) 7484 { 7485 /* Check whether this attempt to execute code in a Secure & NS-Callable 7486 * memory region is for an SG instruction; if so, then emulate the 7487 * effect of the SG instruction and return true. Otherwise pend 7488 * the correct kind of exception and return false. 7489 */ 7490 CPUARMState *env = &cpu->env; 7491 ARMMMUIdx mmu_idx; 7492 uint16_t insn; 7493 7494 /* We should never get here unless get_phys_addr_pmsav8() caused 7495 * an exception for NS executing in S&NSC memory. 7496 */ 7497 assert(!env->v7m.secure); 7498 assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 7499 7500 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */ 7501 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 7502 7503 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) { 7504 return false; 7505 } 7506 7507 if (!env->thumb) { 7508 goto gen_invep; 7509 } 7510 7511 if (insn != 0xe97f) { 7512 /* Not an SG instruction first half (we choose the IMPDEF 7513 * early-SG-check option). 7514 */ 7515 goto gen_invep; 7516 } 7517 7518 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) { 7519 return false; 7520 } 7521 7522 if (insn != 0xe97f) { 7523 /* Not an SG instruction second half (yes, both halves of the SG 7524 * insn have the same hex value) 7525 */ 7526 goto gen_invep; 7527 } 7528 7529 /* OK, we have confirmed that we really have an SG instruction. 7530 * We know we're NS in S memory so don't need to repeat those checks. 7531 */ 7532 qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32 7533 ", executing it\n", env->regs[15]); 7534 env->regs[14] &= ~1; 7535 switch_v7m_security_state(env, true); 7536 xpsr_write(env, 0, XPSR_IT); 7537 env->regs[15] += 4; 7538 return true; 7539 7540 gen_invep: 7541 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7542 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7543 qemu_log_mask(CPU_LOG_INT, 7544 "...really SecureFault with SFSR.INVEP\n"); 7545 return false; 7546 } 7547 7548 void arm_v7m_cpu_do_interrupt(CPUState *cs) 7549 { 7550 ARMCPU *cpu = ARM_CPU(cs); 7551 CPUARMState *env = &cpu->env; 7552 uint32_t lr; 7553 bool ignore_stackfaults; 7554 7555 arm_log_exception(cs->exception_index); 7556 7557 /* For exceptions we just mark as pending on the NVIC, and let that 7558 handle it. */ 7559 switch (cs->exception_index) { 7560 case EXCP_UDEF: 7561 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7562 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK; 7563 break; 7564 case EXCP_NOCP: 7565 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7566 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK; 7567 break; 7568 case EXCP_INVSTATE: 7569 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7570 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK; 7571 break; 7572 case EXCP_SWI: 7573 /* The PC already points to the next instruction. */ 7574 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure); 7575 break; 7576 case EXCP_PREFETCH_ABORT: 7577 case EXCP_DATA_ABORT: 7578 /* Note that for M profile we don't have a guest facing FSR, but 7579 * the env->exception.fsr will be populated by the code that 7580 * raises the fault, in the A profile short-descriptor format. 7581 */ 7582 switch (env->exception.fsr & 0xf) { 7583 case M_FAKE_FSR_NSC_EXEC: 7584 /* Exception generated when we try to execute code at an address 7585 * which is marked as Secure & Non-Secure Callable and the CPU 7586 * is in the Non-Secure state. The only instruction which can 7587 * be executed like this is SG (and that only if both halves of 7588 * the SG instruction have the same security attributes.) 7589 * Everything else must generate an INVEP SecureFault, so we 7590 * emulate the SG instruction here. 7591 */ 7592 if (v7m_handle_execute_nsc(cpu)) { 7593 return; 7594 } 7595 break; 7596 case M_FAKE_FSR_SFAULT: 7597 /* Various flavours of SecureFault for attempts to execute or 7598 * access data in the wrong security state. 7599 */ 7600 switch (cs->exception_index) { 7601 case EXCP_PREFETCH_ABORT: 7602 if (env->v7m.secure) { 7603 env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK; 7604 qemu_log_mask(CPU_LOG_INT, 7605 "...really SecureFault with SFSR.INVTRAN\n"); 7606 } else { 7607 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7608 qemu_log_mask(CPU_LOG_INT, 7609 "...really SecureFault with SFSR.INVEP\n"); 7610 } 7611 break; 7612 case EXCP_DATA_ABORT: 7613 /* This must be an NS access to S memory */ 7614 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK; 7615 qemu_log_mask(CPU_LOG_INT, 7616 "...really SecureFault with SFSR.AUVIOL\n"); 7617 break; 7618 } 7619 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7620 break; 7621 case 0x8: /* External Abort */ 7622 switch (cs->exception_index) { 7623 case EXCP_PREFETCH_ABORT: 7624 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 7625 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n"); 7626 break; 7627 case EXCP_DATA_ABORT: 7628 env->v7m.cfsr[M_REG_NS] |= 7629 (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK); 7630 env->v7m.bfar = env->exception.vaddress; 7631 qemu_log_mask(CPU_LOG_INT, 7632 "...with CFSR.PRECISERR and BFAR 0x%x\n", 7633 env->v7m.bfar); 7634 break; 7635 } 7636 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 7637 break; 7638 default: 7639 /* All other FSR values are either MPU faults or "can't happen 7640 * for M profile" cases. 7641 */ 7642 switch (cs->exception_index) { 7643 case EXCP_PREFETCH_ABORT: 7644 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 7645 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n"); 7646 break; 7647 case EXCP_DATA_ABORT: 7648 env->v7m.cfsr[env->v7m.secure] |= 7649 (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK); 7650 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress; 7651 qemu_log_mask(CPU_LOG_INT, 7652 "...with CFSR.DACCVIOL and MMFAR 0x%x\n", 7653 env->v7m.mmfar[env->v7m.secure]); 7654 break; 7655 } 7656 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, 7657 env->v7m.secure); 7658 break; 7659 } 7660 break; 7661 case EXCP_BKPT: 7662 if (semihosting_enabled()) { 7663 int nr; 7664 nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff; 7665 if (nr == 0xab) { 7666 env->regs[15] += 2; 7667 qemu_log_mask(CPU_LOG_INT, 7668 "...handling as semihosting call 0x%x\n", 7669 env->regs[0]); 7670 env->regs[0] = do_arm_semihosting(env); 7671 return; 7672 } 7673 } 7674 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false); 7675 break; 7676 case EXCP_IRQ: 7677 break; 7678 case EXCP_EXCEPTION_EXIT: 7679 if (env->regs[15] < EXC_RETURN_MIN_MAGIC) { 7680 /* Must be v8M security extension function return */ 7681 assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC); 7682 assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 7683 if (do_v7m_function_return(cpu)) { 7684 return; 7685 } 7686 } else { 7687 do_v7m_exception_exit(cpu); 7688 return; 7689 } 7690 break; 7691 default: 7692 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 7693 return; /* Never happens. Keep compiler happy. */ 7694 } 7695 7696 if (arm_feature(env, ARM_FEATURE_V8)) { 7697 lr = R_V7M_EXCRET_RES1_MASK | 7698 R_V7M_EXCRET_DCRS_MASK | 7699 R_V7M_EXCRET_FTYPE_MASK; 7700 /* The S bit indicates whether we should return to Secure 7701 * or NonSecure (ie our current state). 7702 * The ES bit indicates whether we're taking this exception 7703 * to Secure or NonSecure (ie our target state). We set it 7704 * later, in v7m_exception_taken(). 7705 * The SPSEL bit is also set in v7m_exception_taken() for v8M. 7706 * This corresponds to the ARM ARM pseudocode for v8M setting 7707 * some LR bits in PushStack() and some in ExceptionTaken(); 7708 * the distinction matters for the tailchain cases where we 7709 * can take an exception without pushing the stack. 7710 */ 7711 if (env->v7m.secure) { 7712 lr |= R_V7M_EXCRET_S_MASK; 7713 } 7714 } else { 7715 lr = R_V7M_EXCRET_RES1_MASK | 7716 R_V7M_EXCRET_S_MASK | 7717 R_V7M_EXCRET_DCRS_MASK | 7718 R_V7M_EXCRET_FTYPE_MASK | 7719 R_V7M_EXCRET_ES_MASK; 7720 if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) { 7721 lr |= R_V7M_EXCRET_SPSEL_MASK; 7722 } 7723 } 7724 if (!arm_v7m_is_handler_mode(env)) { 7725 lr |= R_V7M_EXCRET_MODE_MASK; 7726 } 7727 7728 ignore_stackfaults = v7m_push_stack(cpu); 7729 v7m_exception_taken(cpu, lr, false, ignore_stackfaults); 7730 qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception); 7731 } 7732 7733 /* Function used to synchronize QEMU's AArch64 register set with AArch32 7734 * register set. This is necessary when switching between AArch32 and AArch64 7735 * execution state. 7736 */ 7737 void aarch64_sync_32_to_64(CPUARMState *env) 7738 { 7739 int i; 7740 uint32_t mode = env->uncached_cpsr & CPSR_M; 7741 7742 /* We can blanket copy R[0:7] to X[0:7] */ 7743 for (i = 0; i < 8; i++) { 7744 env->xregs[i] = env->regs[i]; 7745 } 7746 7747 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 7748 * Otherwise, they come from the banked user regs. 7749 */ 7750 if (mode == ARM_CPU_MODE_FIQ) { 7751 for (i = 8; i < 13; i++) { 7752 env->xregs[i] = env->usr_regs[i - 8]; 7753 } 7754 } else { 7755 for (i = 8; i < 13; i++) { 7756 env->xregs[i] = env->regs[i]; 7757 } 7758 } 7759 7760 /* Registers x13-x23 are the various mode SP and FP registers. Registers 7761 * r13 and r14 are only copied if we are in that mode, otherwise we copy 7762 * from the mode banked register. 7763 */ 7764 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 7765 env->xregs[13] = env->regs[13]; 7766 env->xregs[14] = env->regs[14]; 7767 } else { 7768 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 7769 /* HYP is an exception in that it is copied from r14 */ 7770 if (mode == ARM_CPU_MODE_HYP) { 7771 env->xregs[14] = env->regs[14]; 7772 } else { 7773 env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)]; 7774 } 7775 } 7776 7777 if (mode == ARM_CPU_MODE_HYP) { 7778 env->xregs[15] = env->regs[13]; 7779 } else { 7780 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 7781 } 7782 7783 if (mode == ARM_CPU_MODE_IRQ) { 7784 env->xregs[16] = env->regs[14]; 7785 env->xregs[17] = env->regs[13]; 7786 } else { 7787 env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)]; 7788 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 7789 } 7790 7791 if (mode == ARM_CPU_MODE_SVC) { 7792 env->xregs[18] = env->regs[14]; 7793 env->xregs[19] = env->regs[13]; 7794 } else { 7795 env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)]; 7796 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 7797 } 7798 7799 if (mode == ARM_CPU_MODE_ABT) { 7800 env->xregs[20] = env->regs[14]; 7801 env->xregs[21] = env->regs[13]; 7802 } else { 7803 env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)]; 7804 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 7805 } 7806 7807 if (mode == ARM_CPU_MODE_UND) { 7808 env->xregs[22] = env->regs[14]; 7809 env->xregs[23] = env->regs[13]; 7810 } else { 7811 env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)]; 7812 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 7813 } 7814 7815 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 7816 * mode, then we can copy from r8-r14. Otherwise, we copy from the 7817 * FIQ bank for r8-r14. 7818 */ 7819 if (mode == ARM_CPU_MODE_FIQ) { 7820 for (i = 24; i < 31; i++) { 7821 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 7822 } 7823 } else { 7824 for (i = 24; i < 29; i++) { 7825 env->xregs[i] = env->fiq_regs[i - 24]; 7826 } 7827 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 7828 env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)]; 7829 } 7830 7831 env->pc = env->regs[15]; 7832 } 7833 7834 /* Function used to synchronize QEMU's AArch32 register set with AArch64 7835 * register set. This is necessary when switching between AArch32 and AArch64 7836 * execution state. 7837 */ 7838 void aarch64_sync_64_to_32(CPUARMState *env) 7839 { 7840 int i; 7841 uint32_t mode = env->uncached_cpsr & CPSR_M; 7842 7843 /* We can blanket copy X[0:7] to R[0:7] */ 7844 for (i = 0; i < 8; i++) { 7845 env->regs[i] = env->xregs[i]; 7846 } 7847 7848 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 7849 * Otherwise, we copy x8-x12 into the banked user regs. 7850 */ 7851 if (mode == ARM_CPU_MODE_FIQ) { 7852 for (i = 8; i < 13; i++) { 7853 env->usr_regs[i - 8] = env->xregs[i]; 7854 } 7855 } else { 7856 for (i = 8; i < 13; i++) { 7857 env->regs[i] = env->xregs[i]; 7858 } 7859 } 7860 7861 /* Registers r13 & r14 depend on the current mode. 7862 * If we are in a given mode, we copy the corresponding x registers to r13 7863 * and r14. Otherwise, we copy the x register to the banked r13 and r14 7864 * for the mode. 7865 */ 7866 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 7867 env->regs[13] = env->xregs[13]; 7868 env->regs[14] = env->xregs[14]; 7869 } else { 7870 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 7871 7872 /* HYP is an exception in that it does not have its own banked r14 but 7873 * shares the USR r14 7874 */ 7875 if (mode == ARM_CPU_MODE_HYP) { 7876 env->regs[14] = env->xregs[14]; 7877 } else { 7878 env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 7879 } 7880 } 7881 7882 if (mode == ARM_CPU_MODE_HYP) { 7883 env->regs[13] = env->xregs[15]; 7884 } else { 7885 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 7886 } 7887 7888 if (mode == ARM_CPU_MODE_IRQ) { 7889 env->regs[14] = env->xregs[16]; 7890 env->regs[13] = env->xregs[17]; 7891 } else { 7892 env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 7893 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 7894 } 7895 7896 if (mode == ARM_CPU_MODE_SVC) { 7897 env->regs[14] = env->xregs[18]; 7898 env->regs[13] = env->xregs[19]; 7899 } else { 7900 env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 7901 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 7902 } 7903 7904 if (mode == ARM_CPU_MODE_ABT) { 7905 env->regs[14] = env->xregs[20]; 7906 env->regs[13] = env->xregs[21]; 7907 } else { 7908 env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 7909 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 7910 } 7911 7912 if (mode == ARM_CPU_MODE_UND) { 7913 env->regs[14] = env->xregs[22]; 7914 env->regs[13] = env->xregs[23]; 7915 } else { 7916 env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 7917 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 7918 } 7919 7920 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 7921 * mode, then we can copy to r8-r14. Otherwise, we copy to the 7922 * FIQ bank for r8-r14. 7923 */ 7924 if (mode == ARM_CPU_MODE_FIQ) { 7925 for (i = 24; i < 31; i++) { 7926 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 7927 } 7928 } else { 7929 for (i = 24; i < 29; i++) { 7930 env->fiq_regs[i - 24] = env->xregs[i]; 7931 } 7932 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 7933 env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 7934 } 7935 7936 env->regs[15] = env->pc; 7937 } 7938 7939 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 7940 { 7941 ARMCPU *cpu = ARM_CPU(cs); 7942 CPUARMState *env = &cpu->env; 7943 uint32_t addr; 7944 uint32_t mask; 7945 int new_mode; 7946 uint32_t offset; 7947 uint32_t moe; 7948 7949 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 7950 switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) { 7951 case EC_BREAKPOINT: 7952 case EC_BREAKPOINT_SAME_EL: 7953 moe = 1; 7954 break; 7955 case EC_WATCHPOINT: 7956 case EC_WATCHPOINT_SAME_EL: 7957 moe = 10; 7958 break; 7959 case EC_AA32_BKPT: 7960 moe = 3; 7961 break; 7962 case EC_VECTORCATCH: 7963 moe = 5; 7964 break; 7965 default: 7966 moe = 0; 7967 break; 7968 } 7969 7970 if (moe) { 7971 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 7972 } 7973 7974 /* TODO: Vectored interrupt controller. */ 7975 switch (cs->exception_index) { 7976 case EXCP_UDEF: 7977 new_mode = ARM_CPU_MODE_UND; 7978 addr = 0x04; 7979 mask = CPSR_I; 7980 if (env->thumb) 7981 offset = 2; 7982 else 7983 offset = 4; 7984 break; 7985 case EXCP_SWI: 7986 new_mode = ARM_CPU_MODE_SVC; 7987 addr = 0x08; 7988 mask = CPSR_I; 7989 /* The PC already points to the next instruction. */ 7990 offset = 0; 7991 break; 7992 case EXCP_BKPT: 7993 /* Fall through to prefetch abort. */ 7994 case EXCP_PREFETCH_ABORT: 7995 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 7996 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 7997 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 7998 env->exception.fsr, (uint32_t)env->exception.vaddress); 7999 new_mode = ARM_CPU_MODE_ABT; 8000 addr = 0x0c; 8001 mask = CPSR_A | CPSR_I; 8002 offset = 4; 8003 break; 8004 case EXCP_DATA_ABORT: 8005 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 8006 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 8007 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 8008 env->exception.fsr, 8009 (uint32_t)env->exception.vaddress); 8010 new_mode = ARM_CPU_MODE_ABT; 8011 addr = 0x10; 8012 mask = CPSR_A | CPSR_I; 8013 offset = 8; 8014 break; 8015 case EXCP_IRQ: 8016 new_mode = ARM_CPU_MODE_IRQ; 8017 addr = 0x18; 8018 /* Disable IRQ and imprecise data aborts. */ 8019 mask = CPSR_A | CPSR_I; 8020 offset = 4; 8021 if (env->cp15.scr_el3 & SCR_IRQ) { 8022 /* IRQ routed to monitor mode */ 8023 new_mode = ARM_CPU_MODE_MON; 8024 mask |= CPSR_F; 8025 } 8026 break; 8027 case EXCP_FIQ: 8028 new_mode = ARM_CPU_MODE_FIQ; 8029 addr = 0x1c; 8030 /* Disable FIQ, IRQ and imprecise data aborts. */ 8031 mask = CPSR_A | CPSR_I | CPSR_F; 8032 if (env->cp15.scr_el3 & SCR_FIQ) { 8033 /* FIQ routed to monitor mode */ 8034 new_mode = ARM_CPU_MODE_MON; 8035 } 8036 offset = 4; 8037 break; 8038 case EXCP_VIRQ: 8039 new_mode = ARM_CPU_MODE_IRQ; 8040 addr = 0x18; 8041 /* Disable IRQ and imprecise data aborts. */ 8042 mask = CPSR_A | CPSR_I; 8043 offset = 4; 8044 break; 8045 case EXCP_VFIQ: 8046 new_mode = ARM_CPU_MODE_FIQ; 8047 addr = 0x1c; 8048 /* Disable FIQ, IRQ and imprecise data aborts. */ 8049 mask = CPSR_A | CPSR_I | CPSR_F; 8050 offset = 4; 8051 break; 8052 case EXCP_SMC: 8053 new_mode = ARM_CPU_MODE_MON; 8054 addr = 0x08; 8055 mask = CPSR_A | CPSR_I | CPSR_F; 8056 offset = 0; 8057 break; 8058 default: 8059 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8060 return; /* Never happens. Keep compiler happy. */ 8061 } 8062 8063 if (new_mode == ARM_CPU_MODE_MON) { 8064 addr += env->cp15.mvbar; 8065 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 8066 /* High vectors. When enabled, base address cannot be remapped. */ 8067 addr += 0xffff0000; 8068 } else { 8069 /* ARM v7 architectures provide a vector base address register to remap 8070 * the interrupt vector table. 8071 * This register is only followed in non-monitor mode, and is banked. 8072 * Note: only bits 31:5 are valid. 8073 */ 8074 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 8075 } 8076 8077 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 8078 env->cp15.scr_el3 &= ~SCR_NS; 8079 } 8080 8081 switch_mode (env, new_mode); 8082 /* For exceptions taken to AArch32 we must clear the SS bit in both 8083 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 8084 */ 8085 env->uncached_cpsr &= ~PSTATE_SS; 8086 env->spsr = cpsr_read(env); 8087 /* Clear IT bits. */ 8088 env->condexec_bits = 0; 8089 /* Switch to the new mode, and to the correct instruction set. */ 8090 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 8091 /* Set new mode endianness */ 8092 env->uncached_cpsr &= ~CPSR_E; 8093 if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) { 8094 env->uncached_cpsr |= CPSR_E; 8095 } 8096 env->daif |= mask; 8097 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares 8098 * and we should just guard the thumb mode on V4 */ 8099 if (arm_feature(env, ARM_FEATURE_V4T)) { 8100 env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 8101 } 8102 env->regs[14] = env->regs[15] + offset; 8103 env->regs[15] = addr; 8104 } 8105 8106 /* Handle exception entry to a target EL which is using AArch64 */ 8107 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 8108 { 8109 ARMCPU *cpu = ARM_CPU(cs); 8110 CPUARMState *env = &cpu->env; 8111 unsigned int new_el = env->exception.target_el; 8112 target_ulong addr = env->cp15.vbar_el[new_el]; 8113 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 8114 8115 if (arm_current_el(env) < new_el) { 8116 /* Entry vector offset depends on whether the implemented EL 8117 * immediately lower than the target level is using AArch32 or AArch64 8118 */ 8119 bool is_aa64; 8120 8121 switch (new_el) { 8122 case 3: 8123 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 8124 break; 8125 case 2: 8126 is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0; 8127 break; 8128 case 1: 8129 is_aa64 = is_a64(env); 8130 break; 8131 default: 8132 g_assert_not_reached(); 8133 } 8134 8135 if (is_aa64) { 8136 addr += 0x400; 8137 } else { 8138 addr += 0x600; 8139 } 8140 } else if (pstate_read(env) & PSTATE_SP) { 8141 addr += 0x200; 8142 } 8143 8144 switch (cs->exception_index) { 8145 case EXCP_PREFETCH_ABORT: 8146 case EXCP_DATA_ABORT: 8147 env->cp15.far_el[new_el] = env->exception.vaddress; 8148 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 8149 env->cp15.far_el[new_el]); 8150 /* fall through */ 8151 case EXCP_BKPT: 8152 case EXCP_UDEF: 8153 case EXCP_SWI: 8154 case EXCP_HVC: 8155 case EXCP_HYP_TRAP: 8156 case EXCP_SMC: 8157 env->cp15.esr_el[new_el] = env->exception.syndrome; 8158 break; 8159 case EXCP_IRQ: 8160 case EXCP_VIRQ: 8161 addr += 0x80; 8162 break; 8163 case EXCP_FIQ: 8164 case EXCP_VFIQ: 8165 addr += 0x100; 8166 break; 8167 case EXCP_SEMIHOST: 8168 qemu_log_mask(CPU_LOG_INT, 8169 "...handling as semihosting call 0x%" PRIx64 "\n", 8170 env->xregs[0]); 8171 env->xregs[0] = do_arm_semihosting(env); 8172 return; 8173 default: 8174 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8175 } 8176 8177 if (is_a64(env)) { 8178 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); 8179 aarch64_save_sp(env, arm_current_el(env)); 8180 env->elr_el[new_el] = env->pc; 8181 } else { 8182 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env); 8183 env->elr_el[new_el] = env->regs[15]; 8184 8185 aarch64_sync_32_to_64(env); 8186 8187 env->condexec_bits = 0; 8188 } 8189 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 8190 env->elr_el[new_el]); 8191 8192 pstate_write(env, PSTATE_DAIF | new_mode); 8193 env->aarch64 = 1; 8194 aarch64_restore_sp(env, new_el); 8195 8196 env->pc = addr; 8197 8198 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 8199 new_el, env->pc, pstate_read(env)); 8200 } 8201 8202 static inline bool check_for_semihosting(CPUState *cs) 8203 { 8204 /* Check whether this exception is a semihosting call; if so 8205 * then handle it and return true; otherwise return false. 8206 */ 8207 ARMCPU *cpu = ARM_CPU(cs); 8208 CPUARMState *env = &cpu->env; 8209 8210 if (is_a64(env)) { 8211 if (cs->exception_index == EXCP_SEMIHOST) { 8212 /* This is always the 64-bit semihosting exception. 8213 * The "is this usermode" and "is semihosting enabled" 8214 * checks have been done at translate time. 8215 */ 8216 qemu_log_mask(CPU_LOG_INT, 8217 "...handling as semihosting call 0x%" PRIx64 "\n", 8218 env->xregs[0]); 8219 env->xregs[0] = do_arm_semihosting(env); 8220 return true; 8221 } 8222 return false; 8223 } else { 8224 uint32_t imm; 8225 8226 /* Only intercept calls from privileged modes, to provide some 8227 * semblance of security. 8228 */ 8229 if (cs->exception_index != EXCP_SEMIHOST && 8230 (!semihosting_enabled() || 8231 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) { 8232 return false; 8233 } 8234 8235 switch (cs->exception_index) { 8236 case EXCP_SEMIHOST: 8237 /* This is always a semihosting call; the "is this usermode" 8238 * and "is semihosting enabled" checks have been done at 8239 * translate time. 8240 */ 8241 break; 8242 case EXCP_SWI: 8243 /* Check for semihosting interrupt. */ 8244 if (env->thumb) { 8245 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env)) 8246 & 0xff; 8247 if (imm == 0xab) { 8248 break; 8249 } 8250 } else { 8251 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env)) 8252 & 0xffffff; 8253 if (imm == 0x123456) { 8254 break; 8255 } 8256 } 8257 return false; 8258 case EXCP_BKPT: 8259 /* See if this is a semihosting syscall. */ 8260 if (env->thumb) { 8261 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) 8262 & 0xff; 8263 if (imm == 0xab) { 8264 env->regs[15] += 2; 8265 break; 8266 } 8267 } 8268 return false; 8269 default: 8270 return false; 8271 } 8272 8273 qemu_log_mask(CPU_LOG_INT, 8274 "...handling as semihosting call 0x%x\n", 8275 env->regs[0]); 8276 env->regs[0] = do_arm_semihosting(env); 8277 return true; 8278 } 8279 } 8280 8281 /* Handle a CPU exception for A and R profile CPUs. 8282 * Do any appropriate logging, handle PSCI calls, and then hand off 8283 * to the AArch64-entry or AArch32-entry function depending on the 8284 * target exception level's register width. 8285 */ 8286 void arm_cpu_do_interrupt(CPUState *cs) 8287 { 8288 ARMCPU *cpu = ARM_CPU(cs); 8289 CPUARMState *env = &cpu->env; 8290 unsigned int new_el = env->exception.target_el; 8291 8292 assert(!arm_feature(env, ARM_FEATURE_M)); 8293 8294 arm_log_exception(cs->exception_index); 8295 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 8296 new_el); 8297 if (qemu_loglevel_mask(CPU_LOG_INT) 8298 && !excp_is_internal(cs->exception_index)) { 8299 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 8300 env->exception.syndrome >> ARM_EL_EC_SHIFT, 8301 env->exception.syndrome); 8302 } 8303 8304 if (arm_is_psci_call(cpu, cs->exception_index)) { 8305 arm_handle_psci_call(cpu); 8306 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 8307 return; 8308 } 8309 8310 /* Semihosting semantics depend on the register width of the 8311 * code that caused the exception, not the target exception level, 8312 * so must be handled here. 8313 */ 8314 if (check_for_semihosting(cs)) { 8315 return; 8316 } 8317 8318 /* Hooks may change global state so BQL should be held, also the 8319 * BQL needs to be held for any modification of 8320 * cs->interrupt_request. 8321 */ 8322 g_assert(qemu_mutex_iothread_locked()); 8323 8324 arm_call_pre_el_change_hook(cpu); 8325 8326 assert(!excp_is_internal(cs->exception_index)); 8327 if (arm_el_is_aa64(env, new_el)) { 8328 arm_cpu_do_interrupt_aarch64(cs); 8329 } else { 8330 arm_cpu_do_interrupt_aarch32(cs); 8331 } 8332 8333 arm_call_el_change_hook(cpu); 8334 8335 if (!kvm_enabled()) { 8336 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 8337 } 8338 } 8339 8340 /* Return the exception level which controls this address translation regime */ 8341 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 8342 { 8343 switch (mmu_idx) { 8344 case ARMMMUIdx_S2NS: 8345 case ARMMMUIdx_S1E2: 8346 return 2; 8347 case ARMMMUIdx_S1E3: 8348 return 3; 8349 case ARMMMUIdx_S1SE0: 8350 return arm_el_is_aa64(env, 3) ? 1 : 3; 8351 case ARMMMUIdx_S1SE1: 8352 case ARMMMUIdx_S1NSE0: 8353 case ARMMMUIdx_S1NSE1: 8354 case ARMMMUIdx_MPrivNegPri: 8355 case ARMMMUIdx_MUserNegPri: 8356 case ARMMMUIdx_MPriv: 8357 case ARMMMUIdx_MUser: 8358 case ARMMMUIdx_MSPrivNegPri: 8359 case ARMMMUIdx_MSUserNegPri: 8360 case ARMMMUIdx_MSPriv: 8361 case ARMMMUIdx_MSUser: 8362 return 1; 8363 default: 8364 g_assert_not_reached(); 8365 } 8366 } 8367 8368 /* Return the SCTLR value which controls this address translation regime */ 8369 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 8370 { 8371 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 8372 } 8373 8374 /* Return true if the specified stage of address translation is disabled */ 8375 static inline bool regime_translation_disabled(CPUARMState *env, 8376 ARMMMUIdx mmu_idx) 8377 { 8378 if (arm_feature(env, ARM_FEATURE_M)) { 8379 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 8380 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 8381 case R_V7M_MPU_CTRL_ENABLE_MASK: 8382 /* Enabled, but not for HardFault and NMI */ 8383 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 8384 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 8385 /* Enabled for all cases */ 8386 return false; 8387 case 0: 8388 default: 8389 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 8390 * we warned about that in armv7m_nvic.c when the guest set it. 8391 */ 8392 return true; 8393 } 8394 } 8395 8396 if (mmu_idx == ARMMMUIdx_S2NS) { 8397 return (env->cp15.hcr_el2 & HCR_VM) == 0; 8398 } 8399 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 8400 } 8401 8402 static inline bool regime_translation_big_endian(CPUARMState *env, 8403 ARMMMUIdx mmu_idx) 8404 { 8405 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 8406 } 8407 8408 /* Return the TCR controlling this translation regime */ 8409 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 8410 { 8411 if (mmu_idx == ARMMMUIdx_S2NS) { 8412 return &env->cp15.vtcr_el2; 8413 } 8414 return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; 8415 } 8416 8417 /* Convert a possible stage1+2 MMU index into the appropriate 8418 * stage 1 MMU index 8419 */ 8420 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 8421 { 8422 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 8423 mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0); 8424 } 8425 return mmu_idx; 8426 } 8427 8428 /* Returns TBI0 value for current regime el */ 8429 uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx) 8430 { 8431 TCR *tcr; 8432 uint32_t el; 8433 8434 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert 8435 * a stage 1+2 mmu index into the appropriate stage 1 mmu index. 8436 */ 8437 mmu_idx = stage_1_mmu_idx(mmu_idx); 8438 8439 tcr = regime_tcr(env, mmu_idx); 8440 el = regime_el(env, mmu_idx); 8441 8442 if (el > 1) { 8443 return extract64(tcr->raw_tcr, 20, 1); 8444 } else { 8445 return extract64(tcr->raw_tcr, 37, 1); 8446 } 8447 } 8448 8449 /* Returns TBI1 value for current regime el */ 8450 uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx) 8451 { 8452 TCR *tcr; 8453 uint32_t el; 8454 8455 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert 8456 * a stage 1+2 mmu index into the appropriate stage 1 mmu index. 8457 */ 8458 mmu_idx = stage_1_mmu_idx(mmu_idx); 8459 8460 tcr = regime_tcr(env, mmu_idx); 8461 el = regime_el(env, mmu_idx); 8462 8463 if (el > 1) { 8464 return 0; 8465 } else { 8466 return extract64(tcr->raw_tcr, 38, 1); 8467 } 8468 } 8469 8470 /* Return the TTBR associated with this translation regime */ 8471 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 8472 int ttbrn) 8473 { 8474 if (mmu_idx == ARMMMUIdx_S2NS) { 8475 return env->cp15.vttbr_el2; 8476 } 8477 if (ttbrn == 0) { 8478 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 8479 } else { 8480 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 8481 } 8482 } 8483 8484 /* Return true if the translation regime is using LPAE format page tables */ 8485 static inline bool regime_using_lpae_format(CPUARMState *env, 8486 ARMMMUIdx mmu_idx) 8487 { 8488 int el = regime_el(env, mmu_idx); 8489 if (el == 2 || arm_el_is_aa64(env, el)) { 8490 return true; 8491 } 8492 if (arm_feature(env, ARM_FEATURE_LPAE) 8493 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 8494 return true; 8495 } 8496 return false; 8497 } 8498 8499 /* Returns true if the stage 1 translation regime is using LPAE format page 8500 * tables. Used when raising alignment exceptions, whose FSR changes depending 8501 * on whether the long or short descriptor format is in use. */ 8502 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 8503 { 8504 mmu_idx = stage_1_mmu_idx(mmu_idx); 8505 8506 return regime_using_lpae_format(env, mmu_idx); 8507 } 8508 8509 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 8510 { 8511 switch (mmu_idx) { 8512 case ARMMMUIdx_S1SE0: 8513 case ARMMMUIdx_S1NSE0: 8514 case ARMMMUIdx_MUser: 8515 case ARMMMUIdx_MSUser: 8516 case ARMMMUIdx_MUserNegPri: 8517 case ARMMMUIdx_MSUserNegPri: 8518 return true; 8519 default: 8520 return false; 8521 case ARMMMUIdx_S12NSE0: 8522 case ARMMMUIdx_S12NSE1: 8523 g_assert_not_reached(); 8524 } 8525 } 8526 8527 /* Translate section/page access permissions to page 8528 * R/W protection flags 8529 * 8530 * @env: CPUARMState 8531 * @mmu_idx: MMU index indicating required translation regime 8532 * @ap: The 3-bit access permissions (AP[2:0]) 8533 * @domain_prot: The 2-bit domain access permissions 8534 */ 8535 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 8536 int ap, int domain_prot) 8537 { 8538 bool is_user = regime_is_user(env, mmu_idx); 8539 8540 if (domain_prot == 3) { 8541 return PAGE_READ | PAGE_WRITE; 8542 } 8543 8544 switch (ap) { 8545 case 0: 8546 if (arm_feature(env, ARM_FEATURE_V7)) { 8547 return 0; 8548 } 8549 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 8550 case SCTLR_S: 8551 return is_user ? 0 : PAGE_READ; 8552 case SCTLR_R: 8553 return PAGE_READ; 8554 default: 8555 return 0; 8556 } 8557 case 1: 8558 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 8559 case 2: 8560 if (is_user) { 8561 return PAGE_READ; 8562 } else { 8563 return PAGE_READ | PAGE_WRITE; 8564 } 8565 case 3: 8566 return PAGE_READ | PAGE_WRITE; 8567 case 4: /* Reserved. */ 8568 return 0; 8569 case 5: 8570 return is_user ? 0 : PAGE_READ; 8571 case 6: 8572 return PAGE_READ; 8573 case 7: 8574 if (!arm_feature(env, ARM_FEATURE_V6K)) { 8575 return 0; 8576 } 8577 return PAGE_READ; 8578 default: 8579 g_assert_not_reached(); 8580 } 8581 } 8582 8583 /* Translate section/page access permissions to page 8584 * R/W protection flags. 8585 * 8586 * @ap: The 2-bit simple AP (AP[2:1]) 8587 * @is_user: TRUE if accessing from PL0 8588 */ 8589 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 8590 { 8591 switch (ap) { 8592 case 0: 8593 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 8594 case 1: 8595 return PAGE_READ | PAGE_WRITE; 8596 case 2: 8597 return is_user ? 0 : PAGE_READ; 8598 case 3: 8599 return PAGE_READ; 8600 default: 8601 g_assert_not_reached(); 8602 } 8603 } 8604 8605 static inline int 8606 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 8607 { 8608 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 8609 } 8610 8611 /* Translate S2 section/page access permissions to protection flags 8612 * 8613 * @env: CPUARMState 8614 * @s2ap: The 2-bit stage2 access permissions (S2AP) 8615 * @xn: XN (execute-never) bit 8616 */ 8617 static int get_S2prot(CPUARMState *env, int s2ap, int xn) 8618 { 8619 int prot = 0; 8620 8621 if (s2ap & 1) { 8622 prot |= PAGE_READ; 8623 } 8624 if (s2ap & 2) { 8625 prot |= PAGE_WRITE; 8626 } 8627 if (!xn) { 8628 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 8629 prot |= PAGE_EXEC; 8630 } 8631 } 8632 return prot; 8633 } 8634 8635 /* Translate section/page access permissions to protection flags 8636 * 8637 * @env: CPUARMState 8638 * @mmu_idx: MMU index indicating required translation regime 8639 * @is_aa64: TRUE if AArch64 8640 * @ap: The 2-bit simple AP (AP[2:1]) 8641 * @ns: NS (non-secure) bit 8642 * @xn: XN (execute-never) bit 8643 * @pxn: PXN (privileged execute-never) bit 8644 */ 8645 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 8646 int ap, int ns, int xn, int pxn) 8647 { 8648 bool is_user = regime_is_user(env, mmu_idx); 8649 int prot_rw, user_rw; 8650 bool have_wxn; 8651 int wxn = 0; 8652 8653 assert(mmu_idx != ARMMMUIdx_S2NS); 8654 8655 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 8656 if (is_user) { 8657 prot_rw = user_rw; 8658 } else { 8659 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 8660 } 8661 8662 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 8663 return prot_rw; 8664 } 8665 8666 /* TODO have_wxn should be replaced with 8667 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 8668 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 8669 * compatible processors have EL2, which is required for [U]WXN. 8670 */ 8671 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 8672 8673 if (have_wxn) { 8674 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 8675 } 8676 8677 if (is_aa64) { 8678 switch (regime_el(env, mmu_idx)) { 8679 case 1: 8680 if (!is_user) { 8681 xn = pxn || (user_rw & PAGE_WRITE); 8682 } 8683 break; 8684 case 2: 8685 case 3: 8686 break; 8687 } 8688 } else if (arm_feature(env, ARM_FEATURE_V7)) { 8689 switch (regime_el(env, mmu_idx)) { 8690 case 1: 8691 case 3: 8692 if (is_user) { 8693 xn = xn || !(user_rw & PAGE_READ); 8694 } else { 8695 int uwxn = 0; 8696 if (have_wxn) { 8697 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 8698 } 8699 xn = xn || !(prot_rw & PAGE_READ) || pxn || 8700 (uwxn && (user_rw & PAGE_WRITE)); 8701 } 8702 break; 8703 case 2: 8704 break; 8705 } 8706 } else { 8707 xn = wxn = 0; 8708 } 8709 8710 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 8711 return prot_rw; 8712 } 8713 return prot_rw | PAGE_EXEC; 8714 } 8715 8716 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 8717 uint32_t *table, uint32_t address) 8718 { 8719 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 8720 TCR *tcr = regime_tcr(env, mmu_idx); 8721 8722 if (address & tcr->mask) { 8723 if (tcr->raw_tcr & TTBCR_PD1) { 8724 /* Translation table walk disabled for TTBR1 */ 8725 return false; 8726 } 8727 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 8728 } else { 8729 if (tcr->raw_tcr & TTBCR_PD0) { 8730 /* Translation table walk disabled for TTBR0 */ 8731 return false; 8732 } 8733 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 8734 } 8735 *table |= (address >> 18) & 0x3ffc; 8736 return true; 8737 } 8738 8739 /* Translate a S1 pagetable walk through S2 if needed. */ 8740 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 8741 hwaddr addr, MemTxAttrs txattrs, 8742 ARMMMUFaultInfo *fi) 8743 { 8744 if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) && 8745 !regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 8746 target_ulong s2size; 8747 hwaddr s2pa; 8748 int s2prot; 8749 int ret; 8750 8751 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa, 8752 &txattrs, &s2prot, &s2size, fi, NULL); 8753 if (ret) { 8754 assert(fi->type != ARMFault_None); 8755 fi->s2addr = addr; 8756 fi->stage2 = true; 8757 fi->s1ptw = true; 8758 return ~0; 8759 } 8760 addr = s2pa; 8761 } 8762 return addr; 8763 } 8764 8765 /* All loads done in the course of a page table walk go through here. */ 8766 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 8767 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 8768 { 8769 ARMCPU *cpu = ARM_CPU(cs); 8770 CPUARMState *env = &cpu->env; 8771 MemTxAttrs attrs = {}; 8772 MemTxResult result = MEMTX_OK; 8773 AddressSpace *as; 8774 uint32_t data; 8775 8776 attrs.secure = is_secure; 8777 as = arm_addressspace(cs, attrs); 8778 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 8779 if (fi->s1ptw) { 8780 return 0; 8781 } 8782 if (regime_translation_big_endian(env, mmu_idx)) { 8783 data = address_space_ldl_be(as, addr, attrs, &result); 8784 } else { 8785 data = address_space_ldl_le(as, addr, attrs, &result); 8786 } 8787 if (result == MEMTX_OK) { 8788 return data; 8789 } 8790 fi->type = ARMFault_SyncExternalOnWalk; 8791 fi->ea = arm_extabort_type(result); 8792 return 0; 8793 } 8794 8795 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 8796 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 8797 { 8798 ARMCPU *cpu = ARM_CPU(cs); 8799 CPUARMState *env = &cpu->env; 8800 MemTxAttrs attrs = {}; 8801 MemTxResult result = MEMTX_OK; 8802 AddressSpace *as; 8803 uint64_t data; 8804 8805 attrs.secure = is_secure; 8806 as = arm_addressspace(cs, attrs); 8807 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 8808 if (fi->s1ptw) { 8809 return 0; 8810 } 8811 if (regime_translation_big_endian(env, mmu_idx)) { 8812 data = address_space_ldq_be(as, addr, attrs, &result); 8813 } else { 8814 data = address_space_ldq_le(as, addr, attrs, &result); 8815 } 8816 if (result == MEMTX_OK) { 8817 return data; 8818 } 8819 fi->type = ARMFault_SyncExternalOnWalk; 8820 fi->ea = arm_extabort_type(result); 8821 return 0; 8822 } 8823 8824 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 8825 MMUAccessType access_type, ARMMMUIdx mmu_idx, 8826 hwaddr *phys_ptr, int *prot, 8827 target_ulong *page_size, 8828 ARMMMUFaultInfo *fi) 8829 { 8830 CPUState *cs = CPU(arm_env_get_cpu(env)); 8831 int level = 1; 8832 uint32_t table; 8833 uint32_t desc; 8834 int type; 8835 int ap; 8836 int domain = 0; 8837 int domain_prot; 8838 hwaddr phys_addr; 8839 uint32_t dacr; 8840 8841 /* Pagetable walk. */ 8842 /* Lookup l1 descriptor. */ 8843 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 8844 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 8845 fi->type = ARMFault_Translation; 8846 goto do_fault; 8847 } 8848 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8849 mmu_idx, fi); 8850 if (fi->type != ARMFault_None) { 8851 goto do_fault; 8852 } 8853 type = (desc & 3); 8854 domain = (desc >> 5) & 0x0f; 8855 if (regime_el(env, mmu_idx) == 1) { 8856 dacr = env->cp15.dacr_ns; 8857 } else { 8858 dacr = env->cp15.dacr_s; 8859 } 8860 domain_prot = (dacr >> (domain * 2)) & 3; 8861 if (type == 0) { 8862 /* Section translation fault. */ 8863 fi->type = ARMFault_Translation; 8864 goto do_fault; 8865 } 8866 if (type != 2) { 8867 level = 2; 8868 } 8869 if (domain_prot == 0 || domain_prot == 2) { 8870 fi->type = ARMFault_Domain; 8871 goto do_fault; 8872 } 8873 if (type == 2) { 8874 /* 1Mb section. */ 8875 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 8876 ap = (desc >> 10) & 3; 8877 *page_size = 1024 * 1024; 8878 } else { 8879 /* Lookup l2 entry. */ 8880 if (type == 1) { 8881 /* Coarse pagetable. */ 8882 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 8883 } else { 8884 /* Fine pagetable. */ 8885 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 8886 } 8887 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8888 mmu_idx, fi); 8889 if (fi->type != ARMFault_None) { 8890 goto do_fault; 8891 } 8892 switch (desc & 3) { 8893 case 0: /* Page translation fault. */ 8894 fi->type = ARMFault_Translation; 8895 goto do_fault; 8896 case 1: /* 64k page. */ 8897 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 8898 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 8899 *page_size = 0x10000; 8900 break; 8901 case 2: /* 4k page. */ 8902 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 8903 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 8904 *page_size = 0x1000; 8905 break; 8906 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 8907 if (type == 1) { 8908 /* ARMv6/XScale extended small page format */ 8909 if (arm_feature(env, ARM_FEATURE_XSCALE) 8910 || arm_feature(env, ARM_FEATURE_V6)) { 8911 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 8912 *page_size = 0x1000; 8913 } else { 8914 /* UNPREDICTABLE in ARMv5; we choose to take a 8915 * page translation fault. 8916 */ 8917 fi->type = ARMFault_Translation; 8918 goto do_fault; 8919 } 8920 } else { 8921 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 8922 *page_size = 0x400; 8923 } 8924 ap = (desc >> 4) & 3; 8925 break; 8926 default: 8927 /* Never happens, but compiler isn't smart enough to tell. */ 8928 abort(); 8929 } 8930 } 8931 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 8932 *prot |= *prot ? PAGE_EXEC : 0; 8933 if (!(*prot & (1 << access_type))) { 8934 /* Access permission fault. */ 8935 fi->type = ARMFault_Permission; 8936 goto do_fault; 8937 } 8938 *phys_ptr = phys_addr; 8939 return false; 8940 do_fault: 8941 fi->domain = domain; 8942 fi->level = level; 8943 return true; 8944 } 8945 8946 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 8947 MMUAccessType access_type, ARMMMUIdx mmu_idx, 8948 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 8949 target_ulong *page_size, ARMMMUFaultInfo *fi) 8950 { 8951 CPUState *cs = CPU(arm_env_get_cpu(env)); 8952 int level = 1; 8953 uint32_t table; 8954 uint32_t desc; 8955 uint32_t xn; 8956 uint32_t pxn = 0; 8957 int type; 8958 int ap; 8959 int domain = 0; 8960 int domain_prot; 8961 hwaddr phys_addr; 8962 uint32_t dacr; 8963 bool ns; 8964 8965 /* Pagetable walk. */ 8966 /* Lookup l1 descriptor. */ 8967 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 8968 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 8969 fi->type = ARMFault_Translation; 8970 goto do_fault; 8971 } 8972 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8973 mmu_idx, fi); 8974 if (fi->type != ARMFault_None) { 8975 goto do_fault; 8976 } 8977 type = (desc & 3); 8978 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { 8979 /* Section translation fault, or attempt to use the encoding 8980 * which is Reserved on implementations without PXN. 8981 */ 8982 fi->type = ARMFault_Translation; 8983 goto do_fault; 8984 } 8985 if ((type == 1) || !(desc & (1 << 18))) { 8986 /* Page or Section. */ 8987 domain = (desc >> 5) & 0x0f; 8988 } 8989 if (regime_el(env, mmu_idx) == 1) { 8990 dacr = env->cp15.dacr_ns; 8991 } else { 8992 dacr = env->cp15.dacr_s; 8993 } 8994 if (type == 1) { 8995 level = 2; 8996 } 8997 domain_prot = (dacr >> (domain * 2)) & 3; 8998 if (domain_prot == 0 || domain_prot == 2) { 8999 /* Section or Page domain fault */ 9000 fi->type = ARMFault_Domain; 9001 goto do_fault; 9002 } 9003 if (type != 1) { 9004 if (desc & (1 << 18)) { 9005 /* Supersection. */ 9006 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 9007 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 9008 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 9009 *page_size = 0x1000000; 9010 } else { 9011 /* Section. */ 9012 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 9013 *page_size = 0x100000; 9014 } 9015 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 9016 xn = desc & (1 << 4); 9017 pxn = desc & 1; 9018 ns = extract32(desc, 19, 1); 9019 } else { 9020 if (arm_feature(env, ARM_FEATURE_PXN)) { 9021 pxn = (desc >> 2) & 1; 9022 } 9023 ns = extract32(desc, 3, 1); 9024 /* Lookup l2 entry. */ 9025 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 9026 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9027 mmu_idx, fi); 9028 if (fi->type != ARMFault_None) { 9029 goto do_fault; 9030 } 9031 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 9032 switch (desc & 3) { 9033 case 0: /* Page translation fault. */ 9034 fi->type = ARMFault_Translation; 9035 goto do_fault; 9036 case 1: /* 64k page. */ 9037 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 9038 xn = desc & (1 << 15); 9039 *page_size = 0x10000; 9040 break; 9041 case 2: case 3: /* 4k page. */ 9042 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 9043 xn = desc & 1; 9044 *page_size = 0x1000; 9045 break; 9046 default: 9047 /* Never happens, but compiler isn't smart enough to tell. */ 9048 abort(); 9049 } 9050 } 9051 if (domain_prot == 3) { 9052 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 9053 } else { 9054 if (pxn && !regime_is_user(env, mmu_idx)) { 9055 xn = 1; 9056 } 9057 if (xn && access_type == MMU_INST_FETCH) { 9058 fi->type = ARMFault_Permission; 9059 goto do_fault; 9060 } 9061 9062 if (arm_feature(env, ARM_FEATURE_V6K) && 9063 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 9064 /* The simplified model uses AP[0] as an access control bit. */ 9065 if ((ap & 1) == 0) { 9066 /* Access flag fault. */ 9067 fi->type = ARMFault_AccessFlag; 9068 goto do_fault; 9069 } 9070 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 9071 } else { 9072 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 9073 } 9074 if (*prot && !xn) { 9075 *prot |= PAGE_EXEC; 9076 } 9077 if (!(*prot & (1 << access_type))) { 9078 /* Access permission fault. */ 9079 fi->type = ARMFault_Permission; 9080 goto do_fault; 9081 } 9082 } 9083 if (ns) { 9084 /* The NS bit will (as required by the architecture) have no effect if 9085 * the CPU doesn't support TZ or this is a non-secure translation 9086 * regime, because the attribute will already be non-secure. 9087 */ 9088 attrs->secure = false; 9089 } 9090 *phys_ptr = phys_addr; 9091 return false; 9092 do_fault: 9093 fi->domain = domain; 9094 fi->level = level; 9095 return true; 9096 } 9097 9098 /* 9099 * check_s2_mmu_setup 9100 * @cpu: ARMCPU 9101 * @is_aa64: True if the translation regime is in AArch64 state 9102 * @startlevel: Suggested starting level 9103 * @inputsize: Bitsize of IPAs 9104 * @stride: Page-table stride (See the ARM ARM) 9105 * 9106 * Returns true if the suggested S2 translation parameters are OK and 9107 * false otherwise. 9108 */ 9109 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 9110 int inputsize, int stride) 9111 { 9112 const int grainsize = stride + 3; 9113 int startsizecheck; 9114 9115 /* Negative levels are never allowed. */ 9116 if (level < 0) { 9117 return false; 9118 } 9119 9120 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 9121 if (startsizecheck < 1 || startsizecheck > stride + 4) { 9122 return false; 9123 } 9124 9125 if (is_aa64) { 9126 CPUARMState *env = &cpu->env; 9127 unsigned int pamax = arm_pamax(cpu); 9128 9129 switch (stride) { 9130 case 13: /* 64KB Pages. */ 9131 if (level == 0 || (level == 1 && pamax <= 42)) { 9132 return false; 9133 } 9134 break; 9135 case 11: /* 16KB Pages. */ 9136 if (level == 0 || (level == 1 && pamax <= 40)) { 9137 return false; 9138 } 9139 break; 9140 case 9: /* 4KB Pages. */ 9141 if (level == 0 && pamax <= 42) { 9142 return false; 9143 } 9144 break; 9145 default: 9146 g_assert_not_reached(); 9147 } 9148 9149 /* Inputsize checks. */ 9150 if (inputsize > pamax && 9151 (arm_el_is_aa64(env, 1) || inputsize > 40)) { 9152 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 9153 return false; 9154 } 9155 } else { 9156 /* AArch32 only supports 4KB pages. Assert on that. */ 9157 assert(stride == 9); 9158 9159 if (level == 0) { 9160 return false; 9161 } 9162 } 9163 return true; 9164 } 9165 9166 /* Translate from the 4-bit stage 2 representation of 9167 * memory attributes (without cache-allocation hints) to 9168 * the 8-bit representation of the stage 1 MAIR registers 9169 * (which includes allocation hints). 9170 * 9171 * ref: shared/translation/attrs/S2AttrDecode() 9172 * .../S2ConvertAttrsHints() 9173 */ 9174 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 9175 { 9176 uint8_t hiattr = extract32(s2attrs, 2, 2); 9177 uint8_t loattr = extract32(s2attrs, 0, 2); 9178 uint8_t hihint = 0, lohint = 0; 9179 9180 if (hiattr != 0) { /* normal memory */ 9181 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ 9182 hiattr = loattr = 1; /* non-cacheable */ 9183 } else { 9184 if (hiattr != 1) { /* Write-through or write-back */ 9185 hihint = 3; /* RW allocate */ 9186 } 9187 if (loattr != 1) { /* Write-through or write-back */ 9188 lohint = 3; /* RW allocate */ 9189 } 9190 } 9191 } 9192 9193 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 9194 } 9195 9196 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 9197 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9198 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 9199 target_ulong *page_size_ptr, 9200 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 9201 { 9202 ARMCPU *cpu = arm_env_get_cpu(env); 9203 CPUState *cs = CPU(cpu); 9204 /* Read an LPAE long-descriptor translation table. */ 9205 ARMFaultType fault_type = ARMFault_Translation; 9206 uint32_t level; 9207 uint32_t epd = 0; 9208 int32_t t0sz, t1sz; 9209 uint32_t tg; 9210 uint64_t ttbr; 9211 int ttbr_select; 9212 hwaddr descaddr, indexmask, indexmask_grainsize; 9213 uint32_t tableattrs; 9214 target_ulong page_size; 9215 uint32_t attrs; 9216 int32_t stride = 9; 9217 int32_t addrsize; 9218 int inputsize; 9219 int32_t tbi = 0; 9220 TCR *tcr = regime_tcr(env, mmu_idx); 9221 int ap, ns, xn, pxn; 9222 uint32_t el = regime_el(env, mmu_idx); 9223 bool ttbr1_valid = true; 9224 uint64_t descaddrmask; 9225 bool aarch64 = arm_el_is_aa64(env, el); 9226 9227 /* TODO: 9228 * This code does not handle the different format TCR for VTCR_EL2. 9229 * This code also does not support shareability levels. 9230 * Attribute and permission bit handling should also be checked when adding 9231 * support for those page table walks. 9232 */ 9233 if (aarch64) { 9234 level = 0; 9235 addrsize = 64; 9236 if (el > 1) { 9237 if (mmu_idx != ARMMMUIdx_S2NS) { 9238 tbi = extract64(tcr->raw_tcr, 20, 1); 9239 } 9240 } else { 9241 if (extract64(address, 55, 1)) { 9242 tbi = extract64(tcr->raw_tcr, 38, 1); 9243 } else { 9244 tbi = extract64(tcr->raw_tcr, 37, 1); 9245 } 9246 } 9247 tbi *= 8; 9248 9249 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it 9250 * invalid. 9251 */ 9252 if (el > 1) { 9253 ttbr1_valid = false; 9254 } 9255 } else { 9256 level = 1; 9257 addrsize = 32; 9258 /* There is no TTBR1 for EL2 */ 9259 if (el == 2) { 9260 ttbr1_valid = false; 9261 } 9262 } 9263 9264 /* Determine whether this address is in the region controlled by 9265 * TTBR0 or TTBR1 (or if it is in neither region and should fault). 9266 * This is a Non-secure PL0/1 stage 1 translation, so controlled by 9267 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32: 9268 */ 9269 if (aarch64) { 9270 /* AArch64 translation. */ 9271 t0sz = extract32(tcr->raw_tcr, 0, 6); 9272 t0sz = MIN(t0sz, 39); 9273 t0sz = MAX(t0sz, 16); 9274 } else if (mmu_idx != ARMMMUIdx_S2NS) { 9275 /* AArch32 stage 1 translation. */ 9276 t0sz = extract32(tcr->raw_tcr, 0, 3); 9277 } else { 9278 /* AArch32 stage 2 translation. */ 9279 bool sext = extract32(tcr->raw_tcr, 4, 1); 9280 bool sign = extract32(tcr->raw_tcr, 3, 1); 9281 /* Address size is 40-bit for a stage 2 translation, 9282 * and t0sz can be negative (from -8 to 7), 9283 * so we need to adjust it to use the TTBR selecting logic below. 9284 */ 9285 addrsize = 40; 9286 t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8; 9287 9288 /* If the sign-extend bit is not the same as t0sz[3], the result 9289 * is unpredictable. Flag this as a guest error. */ 9290 if (sign != sext) { 9291 qemu_log_mask(LOG_GUEST_ERROR, 9292 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 9293 } 9294 } 9295 t1sz = extract32(tcr->raw_tcr, 16, 6); 9296 if (aarch64) { 9297 t1sz = MIN(t1sz, 39); 9298 t1sz = MAX(t1sz, 16); 9299 } 9300 if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) { 9301 /* there is a ttbr0 region and we are in it (high bits all zero) */ 9302 ttbr_select = 0; 9303 } else if (ttbr1_valid && t1sz && 9304 !extract64(~address, addrsize - t1sz, t1sz - tbi)) { 9305 /* there is a ttbr1 region and we are in it (high bits all one) */ 9306 ttbr_select = 1; 9307 } else if (!t0sz) { 9308 /* ttbr0 region is "everything not in the ttbr1 region" */ 9309 ttbr_select = 0; 9310 } else if (!t1sz && ttbr1_valid) { 9311 /* ttbr1 region is "everything not in the ttbr0 region" */ 9312 ttbr_select = 1; 9313 } else { 9314 /* in the gap between the two regions, this is a Translation fault */ 9315 fault_type = ARMFault_Translation; 9316 goto do_fault; 9317 } 9318 9319 /* Note that QEMU ignores shareability and cacheability attributes, 9320 * so we don't need to do anything with the SH, ORGN, IRGN fields 9321 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 9322 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 9323 * implement any ASID-like capability so we can ignore it (instead 9324 * we will always flush the TLB any time the ASID is changed). 9325 */ 9326 if (ttbr_select == 0) { 9327 ttbr = regime_ttbr(env, mmu_idx, 0); 9328 if (el < 2) { 9329 epd = extract32(tcr->raw_tcr, 7, 1); 9330 } 9331 inputsize = addrsize - t0sz; 9332 9333 tg = extract32(tcr->raw_tcr, 14, 2); 9334 if (tg == 1) { /* 64KB pages */ 9335 stride = 13; 9336 } 9337 if (tg == 2) { /* 16KB pages */ 9338 stride = 11; 9339 } 9340 } else { 9341 /* We should only be here if TTBR1 is valid */ 9342 assert(ttbr1_valid); 9343 9344 ttbr = regime_ttbr(env, mmu_idx, 1); 9345 epd = extract32(tcr->raw_tcr, 23, 1); 9346 inputsize = addrsize - t1sz; 9347 9348 tg = extract32(tcr->raw_tcr, 30, 2); 9349 if (tg == 3) { /* 64KB pages */ 9350 stride = 13; 9351 } 9352 if (tg == 1) { /* 16KB pages */ 9353 stride = 11; 9354 } 9355 } 9356 9357 /* Here we should have set up all the parameters for the translation: 9358 * inputsize, ttbr, epd, stride, tbi 9359 */ 9360 9361 if (epd) { 9362 /* Translation table walk disabled => Translation fault on TLB miss 9363 * Note: This is always 0 on 64-bit EL2 and EL3. 9364 */ 9365 goto do_fault; 9366 } 9367 9368 if (mmu_idx != ARMMMUIdx_S2NS) { 9369 /* The starting level depends on the virtual address size (which can 9370 * be up to 48 bits) and the translation granule size. It indicates 9371 * the number of strides (stride bits at a time) needed to 9372 * consume the bits of the input address. In the pseudocode this is: 9373 * level = 4 - RoundUp((inputsize - grainsize) / stride) 9374 * where their 'inputsize' is our 'inputsize', 'grainsize' is 9375 * our 'stride + 3' and 'stride' is our 'stride'. 9376 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 9377 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 9378 * = 4 - (inputsize - 4) / stride; 9379 */ 9380 level = 4 - (inputsize - 4) / stride; 9381 } else { 9382 /* For stage 2 translations the starting level is specified by the 9383 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 9384 */ 9385 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 9386 uint32_t startlevel; 9387 bool ok; 9388 9389 if (!aarch64 || stride == 9) { 9390 /* AArch32 or 4KB pages */ 9391 startlevel = 2 - sl0; 9392 } else { 9393 /* 16KB or 64KB pages */ 9394 startlevel = 3 - sl0; 9395 } 9396 9397 /* Check that the starting level is valid. */ 9398 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 9399 inputsize, stride); 9400 if (!ok) { 9401 fault_type = ARMFault_Translation; 9402 goto do_fault; 9403 } 9404 level = startlevel; 9405 } 9406 9407 indexmask_grainsize = (1ULL << (stride + 3)) - 1; 9408 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; 9409 9410 /* Now we can extract the actual base address from the TTBR */ 9411 descaddr = extract64(ttbr, 0, 48); 9412 descaddr &= ~indexmask; 9413 9414 /* The address field in the descriptor goes up to bit 39 for ARMv7 9415 * but up to bit 47 for ARMv8, but we use the descaddrmask 9416 * up to bit 39 for AArch32, because we don't need other bits in that case 9417 * to construct next descriptor address (anyway they should be all zeroes). 9418 */ 9419 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & 9420 ~indexmask_grainsize; 9421 9422 /* Secure accesses start with the page table in secure memory and 9423 * can be downgraded to non-secure at any step. Non-secure accesses 9424 * remain non-secure. We implement this by just ORing in the NSTable/NS 9425 * bits at each step. 9426 */ 9427 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 9428 for (;;) { 9429 uint64_t descriptor; 9430 bool nstable; 9431 9432 descaddr |= (address >> (stride * (4 - level))) & indexmask; 9433 descaddr &= ~7ULL; 9434 nstable = extract32(tableattrs, 4, 1); 9435 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 9436 if (fi->type != ARMFault_None) { 9437 goto do_fault; 9438 } 9439 9440 if (!(descriptor & 1) || 9441 (!(descriptor & 2) && (level == 3))) { 9442 /* Invalid, or the Reserved level 3 encoding */ 9443 goto do_fault; 9444 } 9445 descaddr = descriptor & descaddrmask; 9446 9447 if ((descriptor & 2) && (level < 3)) { 9448 /* Table entry. The top five bits are attributes which may 9449 * propagate down through lower levels of the table (and 9450 * which are all arranged so that 0 means "no effect", so 9451 * we can gather them up by ORing in the bits at each level). 9452 */ 9453 tableattrs |= extract64(descriptor, 59, 5); 9454 level++; 9455 indexmask = indexmask_grainsize; 9456 continue; 9457 } 9458 /* Block entry at level 1 or 2, or page entry at level 3. 9459 * These are basically the same thing, although the number 9460 * of bits we pull in from the vaddr varies. 9461 */ 9462 page_size = (1ULL << ((stride * (4 - level)) + 3)); 9463 descaddr |= (address & (page_size - 1)); 9464 /* Extract attributes from the descriptor */ 9465 attrs = extract64(descriptor, 2, 10) 9466 | (extract64(descriptor, 52, 12) << 10); 9467 9468 if (mmu_idx == ARMMMUIdx_S2NS) { 9469 /* Stage 2 table descriptors do not include any attribute fields */ 9470 break; 9471 } 9472 /* Merge in attributes from table descriptors */ 9473 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 9474 attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */ 9475 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 9476 * means "force PL1 access only", which means forcing AP[1] to 0. 9477 */ 9478 if (extract32(tableattrs, 2, 1)) { 9479 attrs &= ~(1 << 4); 9480 } 9481 attrs |= nstable << 3; /* NS */ 9482 break; 9483 } 9484 /* Here descaddr is the final physical address, and attributes 9485 * are all in attrs. 9486 */ 9487 fault_type = ARMFault_AccessFlag; 9488 if ((attrs & (1 << 8)) == 0) { 9489 /* Access flag */ 9490 goto do_fault; 9491 } 9492 9493 ap = extract32(attrs, 4, 2); 9494 xn = extract32(attrs, 12, 1); 9495 9496 if (mmu_idx == ARMMMUIdx_S2NS) { 9497 ns = true; 9498 *prot = get_S2prot(env, ap, xn); 9499 } else { 9500 ns = extract32(attrs, 3, 1); 9501 pxn = extract32(attrs, 11, 1); 9502 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 9503 } 9504 9505 fault_type = ARMFault_Permission; 9506 if (!(*prot & (1 << access_type))) { 9507 goto do_fault; 9508 } 9509 9510 if (ns) { 9511 /* The NS bit will (as required by the architecture) have no effect if 9512 * the CPU doesn't support TZ or this is a non-secure translation 9513 * regime, because the attribute will already be non-secure. 9514 */ 9515 txattrs->secure = false; 9516 } 9517 9518 if (cacheattrs != NULL) { 9519 if (mmu_idx == ARMMMUIdx_S2NS) { 9520 cacheattrs->attrs = convert_stage2_attrs(env, 9521 extract32(attrs, 0, 4)); 9522 } else { 9523 /* Index into MAIR registers for cache attributes */ 9524 uint8_t attrindx = extract32(attrs, 0, 3); 9525 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 9526 assert(attrindx <= 7); 9527 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 9528 } 9529 cacheattrs->shareability = extract32(attrs, 6, 2); 9530 } 9531 9532 *phys_ptr = descaddr; 9533 *page_size_ptr = page_size; 9534 return false; 9535 9536 do_fault: 9537 fi->type = fault_type; 9538 fi->level = level; 9539 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 9540 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS); 9541 return true; 9542 } 9543 9544 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 9545 ARMMMUIdx mmu_idx, 9546 int32_t address, int *prot) 9547 { 9548 if (!arm_feature(env, ARM_FEATURE_M)) { 9549 *prot = PAGE_READ | PAGE_WRITE; 9550 switch (address) { 9551 case 0xF0000000 ... 0xFFFFFFFF: 9552 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 9553 /* hivecs execing is ok */ 9554 *prot |= PAGE_EXEC; 9555 } 9556 break; 9557 case 0x00000000 ... 0x7FFFFFFF: 9558 *prot |= PAGE_EXEC; 9559 break; 9560 } 9561 } else { 9562 /* Default system address map for M profile cores. 9563 * The architecture specifies which regions are execute-never; 9564 * at the MPU level no other checks are defined. 9565 */ 9566 switch (address) { 9567 case 0x00000000 ... 0x1fffffff: /* ROM */ 9568 case 0x20000000 ... 0x3fffffff: /* SRAM */ 9569 case 0x60000000 ... 0x7fffffff: /* RAM */ 9570 case 0x80000000 ... 0x9fffffff: /* RAM */ 9571 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 9572 break; 9573 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 9574 case 0xa0000000 ... 0xbfffffff: /* Device */ 9575 case 0xc0000000 ... 0xdfffffff: /* Device */ 9576 case 0xe0000000 ... 0xffffffff: /* System */ 9577 *prot = PAGE_READ | PAGE_WRITE; 9578 break; 9579 default: 9580 g_assert_not_reached(); 9581 } 9582 } 9583 } 9584 9585 static bool pmsav7_use_background_region(ARMCPU *cpu, 9586 ARMMMUIdx mmu_idx, bool is_user) 9587 { 9588 /* Return true if we should use the default memory map as a 9589 * "background" region if there are no hits against any MPU regions. 9590 */ 9591 CPUARMState *env = &cpu->env; 9592 9593 if (is_user) { 9594 return false; 9595 } 9596 9597 if (arm_feature(env, ARM_FEATURE_M)) { 9598 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 9599 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 9600 } else { 9601 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 9602 } 9603 } 9604 9605 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 9606 { 9607 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 9608 return arm_feature(env, ARM_FEATURE_M) && 9609 extract32(address, 20, 12) == 0xe00; 9610 } 9611 9612 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 9613 { 9614 /* True if address is in the M profile system region 9615 * 0xe0000000 - 0xffffffff 9616 */ 9617 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 9618 } 9619 9620 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 9621 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9622 hwaddr *phys_ptr, int *prot, 9623 target_ulong *page_size, 9624 ARMMMUFaultInfo *fi) 9625 { 9626 ARMCPU *cpu = arm_env_get_cpu(env); 9627 int n; 9628 bool is_user = regime_is_user(env, mmu_idx); 9629 9630 *phys_ptr = address; 9631 *page_size = TARGET_PAGE_SIZE; 9632 *prot = 0; 9633 9634 if (regime_translation_disabled(env, mmu_idx) || 9635 m_is_ppb_region(env, address)) { 9636 /* MPU disabled or M profile PPB access: use default memory map. 9637 * The other case which uses the default memory map in the 9638 * v7M ARM ARM pseudocode is exception vector reads from the vector 9639 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 9640 * which always does a direct read using address_space_ldl(), rather 9641 * than going via this function, so we don't need to check that here. 9642 */ 9643 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9644 } else { /* MPU enabled */ 9645 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 9646 /* region search */ 9647 uint32_t base = env->pmsav7.drbar[n]; 9648 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 9649 uint32_t rmask; 9650 bool srdis = false; 9651 9652 if (!(env->pmsav7.drsr[n] & 0x1)) { 9653 continue; 9654 } 9655 9656 if (!rsize) { 9657 qemu_log_mask(LOG_GUEST_ERROR, 9658 "DRSR[%d]: Rsize field cannot be 0\n", n); 9659 continue; 9660 } 9661 rsize++; 9662 rmask = (1ull << rsize) - 1; 9663 9664 if (base & rmask) { 9665 qemu_log_mask(LOG_GUEST_ERROR, 9666 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 9667 "to DRSR region size, mask = 0x%" PRIx32 "\n", 9668 n, base, rmask); 9669 continue; 9670 } 9671 9672 if (address < base || address > base + rmask) { 9673 /* 9674 * Address not in this region. We must check whether the 9675 * region covers addresses in the same page as our address. 9676 * In that case we must not report a size that covers the 9677 * whole page for a subsequent hit against a different MPU 9678 * region or the background region, because it would result in 9679 * incorrect TLB hits for subsequent accesses to addresses that 9680 * are in this MPU region. 9681 */ 9682 if (ranges_overlap(base, rmask, 9683 address & TARGET_PAGE_MASK, 9684 TARGET_PAGE_SIZE)) { 9685 *page_size = 1; 9686 } 9687 continue; 9688 } 9689 9690 /* Region matched */ 9691 9692 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 9693 int i, snd; 9694 uint32_t srdis_mask; 9695 9696 rsize -= 3; /* sub region size (power of 2) */ 9697 snd = ((address - base) >> rsize) & 0x7; 9698 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 9699 9700 srdis_mask = srdis ? 0x3 : 0x0; 9701 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 9702 /* This will check in groups of 2, 4 and then 8, whether 9703 * the subregion bits are consistent. rsize is incremented 9704 * back up to give the region size, considering consistent 9705 * adjacent subregions as one region. Stop testing if rsize 9706 * is already big enough for an entire QEMU page. 9707 */ 9708 int snd_rounded = snd & ~(i - 1); 9709 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 9710 snd_rounded + 8, i); 9711 if (srdis_mask ^ srdis_multi) { 9712 break; 9713 } 9714 srdis_mask = (srdis_mask << i) | srdis_mask; 9715 rsize++; 9716 } 9717 } 9718 if (srdis) { 9719 continue; 9720 } 9721 if (rsize < TARGET_PAGE_BITS) { 9722 *page_size = 1 << rsize; 9723 } 9724 break; 9725 } 9726 9727 if (n == -1) { /* no hits */ 9728 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 9729 /* background fault */ 9730 fi->type = ARMFault_Background; 9731 return true; 9732 } 9733 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9734 } else { /* a MPU hit! */ 9735 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 9736 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 9737 9738 if (m_is_system_region(env, address)) { 9739 /* System space is always execute never */ 9740 xn = 1; 9741 } 9742 9743 if (is_user) { /* User mode AP bit decoding */ 9744 switch (ap) { 9745 case 0: 9746 case 1: 9747 case 5: 9748 break; /* no access */ 9749 case 3: 9750 *prot |= PAGE_WRITE; 9751 /* fall through */ 9752 case 2: 9753 case 6: 9754 *prot |= PAGE_READ | PAGE_EXEC; 9755 break; 9756 case 7: 9757 /* for v7M, same as 6; for R profile a reserved value */ 9758 if (arm_feature(env, ARM_FEATURE_M)) { 9759 *prot |= PAGE_READ | PAGE_EXEC; 9760 break; 9761 } 9762 /* fall through */ 9763 default: 9764 qemu_log_mask(LOG_GUEST_ERROR, 9765 "DRACR[%d]: Bad value for AP bits: 0x%" 9766 PRIx32 "\n", n, ap); 9767 } 9768 } else { /* Priv. mode AP bits decoding */ 9769 switch (ap) { 9770 case 0: 9771 break; /* no access */ 9772 case 1: 9773 case 2: 9774 case 3: 9775 *prot |= PAGE_WRITE; 9776 /* fall through */ 9777 case 5: 9778 case 6: 9779 *prot |= PAGE_READ | PAGE_EXEC; 9780 break; 9781 case 7: 9782 /* for v7M, same as 6; for R profile a reserved value */ 9783 if (arm_feature(env, ARM_FEATURE_M)) { 9784 *prot |= PAGE_READ | PAGE_EXEC; 9785 break; 9786 } 9787 /* fall through */ 9788 default: 9789 qemu_log_mask(LOG_GUEST_ERROR, 9790 "DRACR[%d]: Bad value for AP bits: 0x%" 9791 PRIx32 "\n", n, ap); 9792 } 9793 } 9794 9795 /* execute never */ 9796 if (xn) { 9797 *prot &= ~PAGE_EXEC; 9798 } 9799 } 9800 } 9801 9802 fi->type = ARMFault_Permission; 9803 fi->level = 1; 9804 /* 9805 * Core QEMU code can't handle execution from small pages yet, so 9806 * don't try it. This way we'll get an MPU exception, rather than 9807 * eventually causing QEMU to exit in get_page_addr_code(). 9808 */ 9809 if (*page_size < TARGET_PAGE_SIZE && (*prot & PAGE_EXEC)) { 9810 qemu_log_mask(LOG_UNIMP, 9811 "MPU: No support for execution from regions " 9812 "smaller than 1K\n"); 9813 *prot &= ~PAGE_EXEC; 9814 } 9815 return !(*prot & (1 << access_type)); 9816 } 9817 9818 static bool v8m_is_sau_exempt(CPUARMState *env, 9819 uint32_t address, MMUAccessType access_type) 9820 { 9821 /* The architecture specifies that certain address ranges are 9822 * exempt from v8M SAU/IDAU checks. 9823 */ 9824 return 9825 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 9826 (address >= 0xe0000000 && address <= 0xe0002fff) || 9827 (address >= 0xe000e000 && address <= 0xe000efff) || 9828 (address >= 0xe002e000 && address <= 0xe002efff) || 9829 (address >= 0xe0040000 && address <= 0xe0041fff) || 9830 (address >= 0xe00ff000 && address <= 0xe00fffff); 9831 } 9832 9833 static void v8m_security_lookup(CPUARMState *env, uint32_t address, 9834 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9835 V8M_SAttributes *sattrs) 9836 { 9837 /* Look up the security attributes for this address. Compare the 9838 * pseudocode SecurityCheck() function. 9839 * We assume the caller has zero-initialized *sattrs. 9840 */ 9841 ARMCPU *cpu = arm_env_get_cpu(env); 9842 int r; 9843 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 9844 int idau_region = IREGION_NOTVALID; 9845 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 9846 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 9847 9848 if (cpu->idau) { 9849 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 9850 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 9851 9852 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 9853 &idau_nsc); 9854 } 9855 9856 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 9857 /* 0xf0000000..0xffffffff is always S for insn fetches */ 9858 return; 9859 } 9860 9861 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 9862 sattrs->ns = !regime_is_secure(env, mmu_idx); 9863 return; 9864 } 9865 9866 if (idau_region != IREGION_NOTVALID) { 9867 sattrs->irvalid = true; 9868 sattrs->iregion = idau_region; 9869 } 9870 9871 switch (env->sau.ctrl & 3) { 9872 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 9873 break; 9874 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 9875 sattrs->ns = true; 9876 break; 9877 default: /* SAU.ENABLE == 1 */ 9878 for (r = 0; r < cpu->sau_sregion; r++) { 9879 if (env->sau.rlar[r] & 1) { 9880 uint32_t base = env->sau.rbar[r] & ~0x1f; 9881 uint32_t limit = env->sau.rlar[r] | 0x1f; 9882 9883 if (base <= address && limit >= address) { 9884 if (base > addr_page_base || limit < addr_page_limit) { 9885 sattrs->subpage = true; 9886 } 9887 if (sattrs->srvalid) { 9888 /* If we hit in more than one region then we must report 9889 * as Secure, not NS-Callable, with no valid region 9890 * number info. 9891 */ 9892 sattrs->ns = false; 9893 sattrs->nsc = false; 9894 sattrs->sregion = 0; 9895 sattrs->srvalid = false; 9896 break; 9897 } else { 9898 if (env->sau.rlar[r] & 2) { 9899 sattrs->nsc = true; 9900 } else { 9901 sattrs->ns = true; 9902 } 9903 sattrs->srvalid = true; 9904 sattrs->sregion = r; 9905 } 9906 } else { 9907 /* 9908 * Address not in this region. We must check whether the 9909 * region covers addresses in the same page as our address. 9910 * In that case we must not report a size that covers the 9911 * whole page for a subsequent hit against a different MPU 9912 * region or the background region, because it would result 9913 * in incorrect TLB hits for subsequent accesses to 9914 * addresses that are in this MPU region. 9915 */ 9916 if (limit >= base && 9917 ranges_overlap(base, limit - base + 1, 9918 addr_page_base, 9919 TARGET_PAGE_SIZE)) { 9920 sattrs->subpage = true; 9921 } 9922 } 9923 } 9924 } 9925 9926 /* The IDAU will override the SAU lookup results if it specifies 9927 * higher security than the SAU does. 9928 */ 9929 if (!idau_ns) { 9930 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 9931 sattrs->ns = false; 9932 sattrs->nsc = idau_nsc; 9933 } 9934 } 9935 break; 9936 } 9937 } 9938 9939 static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 9940 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9941 hwaddr *phys_ptr, MemTxAttrs *txattrs, 9942 int *prot, bool *is_subpage, 9943 ARMMMUFaultInfo *fi, uint32_t *mregion) 9944 { 9945 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 9946 * that a full phys-to-virt translation does). 9947 * mregion is (if not NULL) set to the region number which matched, 9948 * or -1 if no region number is returned (MPU off, address did not 9949 * hit a region, address hit in multiple regions). 9950 * We set is_subpage to true if the region hit doesn't cover the 9951 * entire TARGET_PAGE the address is within. 9952 */ 9953 ARMCPU *cpu = arm_env_get_cpu(env); 9954 bool is_user = regime_is_user(env, mmu_idx); 9955 uint32_t secure = regime_is_secure(env, mmu_idx); 9956 int n; 9957 int matchregion = -1; 9958 bool hit = false; 9959 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 9960 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 9961 9962 *is_subpage = false; 9963 *phys_ptr = address; 9964 *prot = 0; 9965 if (mregion) { 9966 *mregion = -1; 9967 } 9968 9969 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 9970 * was an exception vector read from the vector table (which is always 9971 * done using the default system address map), because those accesses 9972 * are done in arm_v7m_load_vector(), which always does a direct 9973 * read using address_space_ldl(), rather than going via this function. 9974 */ 9975 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 9976 hit = true; 9977 } else if (m_is_ppb_region(env, address)) { 9978 hit = true; 9979 } else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 9980 hit = true; 9981 } else { 9982 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 9983 /* region search */ 9984 /* Note that the base address is bits [31:5] from the register 9985 * with bits [4:0] all zeroes, but the limit address is bits 9986 * [31:5] from the register with bits [4:0] all ones. 9987 */ 9988 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 9989 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 9990 9991 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 9992 /* Region disabled */ 9993 continue; 9994 } 9995 9996 if (address < base || address > limit) { 9997 /* 9998 * Address not in this region. We must check whether the 9999 * region covers addresses in the same page as our address. 10000 * In that case we must not report a size that covers the 10001 * whole page for a subsequent hit against a different MPU 10002 * region or the background region, because it would result in 10003 * incorrect TLB hits for subsequent accesses to addresses that 10004 * are in this MPU region. 10005 */ 10006 if (limit >= base && 10007 ranges_overlap(base, limit - base + 1, 10008 addr_page_base, 10009 TARGET_PAGE_SIZE)) { 10010 *is_subpage = true; 10011 } 10012 continue; 10013 } 10014 10015 if (base > addr_page_base || limit < addr_page_limit) { 10016 *is_subpage = true; 10017 } 10018 10019 if (hit) { 10020 /* Multiple regions match -- always a failure (unlike 10021 * PMSAv7 where highest-numbered-region wins) 10022 */ 10023 fi->type = ARMFault_Permission; 10024 fi->level = 1; 10025 return true; 10026 } 10027 10028 matchregion = n; 10029 hit = true; 10030 } 10031 } 10032 10033 if (!hit) { 10034 /* background fault */ 10035 fi->type = ARMFault_Background; 10036 return true; 10037 } 10038 10039 if (matchregion == -1) { 10040 /* hit using the background region */ 10041 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 10042 } else { 10043 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 10044 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 10045 10046 if (m_is_system_region(env, address)) { 10047 /* System space is always execute never */ 10048 xn = 1; 10049 } 10050 10051 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 10052 if (*prot && !xn) { 10053 *prot |= PAGE_EXEC; 10054 } 10055 /* We don't need to look the attribute up in the MAIR0/MAIR1 10056 * registers because that only tells us about cacheability. 10057 */ 10058 if (mregion) { 10059 *mregion = matchregion; 10060 } 10061 } 10062 10063 fi->type = ARMFault_Permission; 10064 fi->level = 1; 10065 /* 10066 * Core QEMU code can't handle execution from small pages yet, so 10067 * don't try it. This means any attempted execution will generate 10068 * an MPU exception, rather than eventually causing QEMU to exit in 10069 * get_page_addr_code(). 10070 */ 10071 if (*is_subpage && (*prot & PAGE_EXEC)) { 10072 qemu_log_mask(LOG_UNIMP, 10073 "MPU: No support for execution from regions " 10074 "smaller than 1K\n"); 10075 *prot &= ~PAGE_EXEC; 10076 } 10077 return !(*prot & (1 << access_type)); 10078 } 10079 10080 10081 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 10082 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10083 hwaddr *phys_ptr, MemTxAttrs *txattrs, 10084 int *prot, target_ulong *page_size, 10085 ARMMMUFaultInfo *fi) 10086 { 10087 uint32_t secure = regime_is_secure(env, mmu_idx); 10088 V8M_SAttributes sattrs = {}; 10089 bool ret; 10090 bool mpu_is_subpage; 10091 10092 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10093 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 10094 if (access_type == MMU_INST_FETCH) { 10095 /* Instruction fetches always use the MMU bank and the 10096 * transaction attribute determined by the fetch address, 10097 * regardless of CPU state. This is painful for QEMU 10098 * to handle, because it would mean we need to encode 10099 * into the mmu_idx not just the (user, negpri) information 10100 * for the current security state but also that for the 10101 * other security state, which would balloon the number 10102 * of mmu_idx values needed alarmingly. 10103 * Fortunately we can avoid this because it's not actually 10104 * possible to arbitrarily execute code from memory with 10105 * the wrong security attribute: it will always generate 10106 * an exception of some kind or another, apart from the 10107 * special case of an NS CPU executing an SG instruction 10108 * in S&NSC memory. So we always just fail the translation 10109 * here and sort things out in the exception handler 10110 * (including possibly emulating an SG instruction). 10111 */ 10112 if (sattrs.ns != !secure) { 10113 if (sattrs.nsc) { 10114 fi->type = ARMFault_QEMU_NSCExec; 10115 } else { 10116 fi->type = ARMFault_QEMU_SFault; 10117 } 10118 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 10119 *phys_ptr = address; 10120 *prot = 0; 10121 return true; 10122 } 10123 } else { 10124 /* For data accesses we always use the MMU bank indicated 10125 * by the current CPU state, but the security attributes 10126 * might downgrade a secure access to nonsecure. 10127 */ 10128 if (sattrs.ns) { 10129 txattrs->secure = false; 10130 } else if (!secure) { 10131 /* NS access to S memory must fault. 10132 * Architecturally we should first check whether the 10133 * MPU information for this address indicates that we 10134 * are doing an unaligned access to Device memory, which 10135 * should generate a UsageFault instead. QEMU does not 10136 * currently check for that kind of unaligned access though. 10137 * If we added it we would need to do so as a special case 10138 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 10139 */ 10140 fi->type = ARMFault_QEMU_SFault; 10141 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 10142 *phys_ptr = address; 10143 *prot = 0; 10144 return true; 10145 } 10146 } 10147 } 10148 10149 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 10150 txattrs, prot, &mpu_is_subpage, fi, NULL); 10151 /* 10152 * TODO: this is a temporary hack to ignore the fact that the SAU region 10153 * is smaller than a page if this is an executable region. We never 10154 * supported small MPU regions, but we did (accidentally) allow small 10155 * SAU regions, and if we now made small SAU regions not be executable 10156 * then this would break previously working guest code. We can't 10157 * remove this until/unless we implement support for execution from 10158 * small regions. 10159 */ 10160 if (*prot & PAGE_EXEC) { 10161 sattrs.subpage = false; 10162 } 10163 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; 10164 return ret; 10165 } 10166 10167 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 10168 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10169 hwaddr *phys_ptr, int *prot, 10170 ARMMMUFaultInfo *fi) 10171 { 10172 int n; 10173 uint32_t mask; 10174 uint32_t base; 10175 bool is_user = regime_is_user(env, mmu_idx); 10176 10177 if (regime_translation_disabled(env, mmu_idx)) { 10178 /* MPU disabled. */ 10179 *phys_ptr = address; 10180 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10181 return false; 10182 } 10183 10184 *phys_ptr = address; 10185 for (n = 7; n >= 0; n--) { 10186 base = env->cp15.c6_region[n]; 10187 if ((base & 1) == 0) { 10188 continue; 10189 } 10190 mask = 1 << ((base >> 1) & 0x1f); 10191 /* Keep this shift separate from the above to avoid an 10192 (undefined) << 32. */ 10193 mask = (mask << 1) - 1; 10194 if (((base ^ address) & ~mask) == 0) { 10195 break; 10196 } 10197 } 10198 if (n < 0) { 10199 fi->type = ARMFault_Background; 10200 return true; 10201 } 10202 10203 if (access_type == MMU_INST_FETCH) { 10204 mask = env->cp15.pmsav5_insn_ap; 10205 } else { 10206 mask = env->cp15.pmsav5_data_ap; 10207 } 10208 mask = (mask >> (n * 4)) & 0xf; 10209 switch (mask) { 10210 case 0: 10211 fi->type = ARMFault_Permission; 10212 fi->level = 1; 10213 return true; 10214 case 1: 10215 if (is_user) { 10216 fi->type = ARMFault_Permission; 10217 fi->level = 1; 10218 return true; 10219 } 10220 *prot = PAGE_READ | PAGE_WRITE; 10221 break; 10222 case 2: 10223 *prot = PAGE_READ; 10224 if (!is_user) { 10225 *prot |= PAGE_WRITE; 10226 } 10227 break; 10228 case 3: 10229 *prot = PAGE_READ | PAGE_WRITE; 10230 break; 10231 case 5: 10232 if (is_user) { 10233 fi->type = ARMFault_Permission; 10234 fi->level = 1; 10235 return true; 10236 } 10237 *prot = PAGE_READ; 10238 break; 10239 case 6: 10240 *prot = PAGE_READ; 10241 break; 10242 default: 10243 /* Bad permission. */ 10244 fi->type = ARMFault_Permission; 10245 fi->level = 1; 10246 return true; 10247 } 10248 *prot |= PAGE_EXEC; 10249 return false; 10250 } 10251 10252 /* Combine either inner or outer cacheability attributes for normal 10253 * memory, according to table D4-42 and pseudocode procedure 10254 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 10255 * 10256 * NB: only stage 1 includes allocation hints (RW bits), leading to 10257 * some asymmetry. 10258 */ 10259 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 10260 { 10261 if (s1 == 4 || s2 == 4) { 10262 /* non-cacheable has precedence */ 10263 return 4; 10264 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 10265 /* stage 1 write-through takes precedence */ 10266 return s1; 10267 } else if (extract32(s2, 2, 2) == 2) { 10268 /* stage 2 write-through takes precedence, but the allocation hint 10269 * is still taken from stage 1 10270 */ 10271 return (2 << 2) | extract32(s1, 0, 2); 10272 } else { /* write-back */ 10273 return s1; 10274 } 10275 } 10276 10277 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 10278 * and CombineS1S2Desc() 10279 * 10280 * @s1: Attributes from stage 1 walk 10281 * @s2: Attributes from stage 2 walk 10282 */ 10283 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 10284 { 10285 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); 10286 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); 10287 ARMCacheAttrs ret; 10288 10289 /* Combine shareability attributes (table D4-43) */ 10290 if (s1.shareability == 2 || s2.shareability == 2) { 10291 /* if either are outer-shareable, the result is outer-shareable */ 10292 ret.shareability = 2; 10293 } else if (s1.shareability == 3 || s2.shareability == 3) { 10294 /* if either are inner-shareable, the result is inner-shareable */ 10295 ret.shareability = 3; 10296 } else { 10297 /* both non-shareable */ 10298 ret.shareability = 0; 10299 } 10300 10301 /* Combine memory type and cacheability attributes */ 10302 if (s1hi == 0 || s2hi == 0) { 10303 /* Device has precedence over normal */ 10304 if (s1lo == 0 || s2lo == 0) { 10305 /* nGnRnE has precedence over anything */ 10306 ret.attrs = 0; 10307 } else if (s1lo == 4 || s2lo == 4) { 10308 /* non-Reordering has precedence over Reordering */ 10309 ret.attrs = 4; /* nGnRE */ 10310 } else if (s1lo == 8 || s2lo == 8) { 10311 /* non-Gathering has precedence over Gathering */ 10312 ret.attrs = 8; /* nGRE */ 10313 } else { 10314 ret.attrs = 0xc; /* GRE */ 10315 } 10316 10317 /* Any location for which the resultant memory type is any 10318 * type of Device memory is always treated as Outer Shareable. 10319 */ 10320 ret.shareability = 2; 10321 } else { /* Normal memory */ 10322 /* Outer/inner cacheability combine independently */ 10323 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 10324 | combine_cacheattr_nibble(s1lo, s2lo); 10325 10326 if (ret.attrs == 0x44) { 10327 /* Any location for which the resultant memory type is Normal 10328 * Inner Non-cacheable, Outer Non-cacheable is always treated 10329 * as Outer Shareable. 10330 */ 10331 ret.shareability = 2; 10332 } 10333 } 10334 10335 return ret; 10336 } 10337 10338 10339 /* get_phys_addr - get the physical address for this virtual address 10340 * 10341 * Find the physical address corresponding to the given virtual address, 10342 * by doing a translation table walk on MMU based systems or using the 10343 * MPU state on MPU based systems. 10344 * 10345 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 10346 * prot and page_size may not be filled in, and the populated fsr value provides 10347 * information on why the translation aborted, in the format of a 10348 * DFSR/IFSR fault register, with the following caveats: 10349 * * we honour the short vs long DFSR format differences. 10350 * * the WnR bit is never set (the caller must do this). 10351 * * for PSMAv5 based systems we don't bother to return a full FSR format 10352 * value. 10353 * 10354 * @env: CPUARMState 10355 * @address: virtual address to get physical address for 10356 * @access_type: 0 for read, 1 for write, 2 for execute 10357 * @mmu_idx: MMU index indicating required translation regime 10358 * @phys_ptr: set to the physical address corresponding to the virtual address 10359 * @attrs: set to the memory transaction attributes to use 10360 * @prot: set to the permissions for the page containing phys_ptr 10361 * @page_size: set to the size of the page containing phys_ptr 10362 * @fi: set to fault info if the translation fails 10363 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 10364 */ 10365 static bool get_phys_addr(CPUARMState *env, target_ulong address, 10366 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10367 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 10368 target_ulong *page_size, 10369 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 10370 { 10371 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 10372 /* Call ourselves recursively to do the stage 1 and then stage 2 10373 * translations. 10374 */ 10375 if (arm_feature(env, ARM_FEATURE_EL2)) { 10376 hwaddr ipa; 10377 int s2_prot; 10378 int ret; 10379 ARMCacheAttrs cacheattrs2 = {}; 10380 10381 ret = get_phys_addr(env, address, access_type, 10382 stage_1_mmu_idx(mmu_idx), &ipa, attrs, 10383 prot, page_size, fi, cacheattrs); 10384 10385 /* If S1 fails or S2 is disabled, return early. */ 10386 if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 10387 *phys_ptr = ipa; 10388 return ret; 10389 } 10390 10391 /* S1 is done. Now do S2 translation. */ 10392 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS, 10393 phys_ptr, attrs, &s2_prot, 10394 page_size, fi, 10395 cacheattrs != NULL ? &cacheattrs2 : NULL); 10396 fi->s2addr = ipa; 10397 /* Combine the S1 and S2 perms. */ 10398 *prot &= s2_prot; 10399 10400 /* Combine the S1 and S2 cache attributes, if needed */ 10401 if (!ret && cacheattrs != NULL) { 10402 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 10403 } 10404 10405 return ret; 10406 } else { 10407 /* 10408 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 10409 */ 10410 mmu_idx = stage_1_mmu_idx(mmu_idx); 10411 } 10412 } 10413 10414 /* The page table entries may downgrade secure to non-secure, but 10415 * cannot upgrade an non-secure translation regime's attributes 10416 * to secure. 10417 */ 10418 attrs->secure = regime_is_secure(env, mmu_idx); 10419 attrs->user = regime_is_user(env, mmu_idx); 10420 10421 /* Fast Context Switch Extension. This doesn't exist at all in v8. 10422 * In v7 and earlier it affects all stage 1 translations. 10423 */ 10424 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS 10425 && !arm_feature(env, ARM_FEATURE_V8)) { 10426 if (regime_el(env, mmu_idx) == 3) { 10427 address += env->cp15.fcseidr_s; 10428 } else { 10429 address += env->cp15.fcseidr_ns; 10430 } 10431 } 10432 10433 if (arm_feature(env, ARM_FEATURE_PMSA)) { 10434 bool ret; 10435 *page_size = TARGET_PAGE_SIZE; 10436 10437 if (arm_feature(env, ARM_FEATURE_V8)) { 10438 /* PMSAv8 */ 10439 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 10440 phys_ptr, attrs, prot, page_size, fi); 10441 } else if (arm_feature(env, ARM_FEATURE_V7)) { 10442 /* PMSAv7 */ 10443 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 10444 phys_ptr, prot, page_size, fi); 10445 } else { 10446 /* Pre-v7 MPU */ 10447 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 10448 phys_ptr, prot, fi); 10449 } 10450 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 10451 " mmu_idx %u -> %s (prot %c%c%c)\n", 10452 access_type == MMU_DATA_LOAD ? "reading" : 10453 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 10454 (uint32_t)address, mmu_idx, 10455 ret ? "Miss" : "Hit", 10456 *prot & PAGE_READ ? 'r' : '-', 10457 *prot & PAGE_WRITE ? 'w' : '-', 10458 *prot & PAGE_EXEC ? 'x' : '-'); 10459 10460 return ret; 10461 } 10462 10463 /* Definitely a real MMU, not an MPU */ 10464 10465 if (regime_translation_disabled(env, mmu_idx)) { 10466 /* MMU disabled. */ 10467 *phys_ptr = address; 10468 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10469 *page_size = TARGET_PAGE_SIZE; 10470 return 0; 10471 } 10472 10473 if (regime_using_lpae_format(env, mmu_idx)) { 10474 return get_phys_addr_lpae(env, address, access_type, mmu_idx, 10475 phys_ptr, attrs, prot, page_size, 10476 fi, cacheattrs); 10477 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 10478 return get_phys_addr_v6(env, address, access_type, mmu_idx, 10479 phys_ptr, attrs, prot, page_size, fi); 10480 } else { 10481 return get_phys_addr_v5(env, address, access_type, mmu_idx, 10482 phys_ptr, prot, page_size, fi); 10483 } 10484 } 10485 10486 /* Walk the page table and (if the mapping exists) add the page 10487 * to the TLB. Return false on success, or true on failure. Populate 10488 * fsr with ARM DFSR/IFSR fault register format value on failure. 10489 */ 10490 bool arm_tlb_fill(CPUState *cs, vaddr address, 10491 MMUAccessType access_type, int mmu_idx, 10492 ARMMMUFaultInfo *fi) 10493 { 10494 ARMCPU *cpu = ARM_CPU(cs); 10495 CPUARMState *env = &cpu->env; 10496 hwaddr phys_addr; 10497 target_ulong page_size; 10498 int prot; 10499 int ret; 10500 MemTxAttrs attrs = {}; 10501 10502 ret = get_phys_addr(env, address, access_type, 10503 core_to_arm_mmu_idx(env, mmu_idx), &phys_addr, 10504 &attrs, &prot, &page_size, fi, NULL); 10505 if (!ret) { 10506 /* 10507 * Map a single [sub]page. Regions smaller than our declared 10508 * target page size are handled specially, so for those we 10509 * pass in the exact addresses. 10510 */ 10511 if (page_size >= TARGET_PAGE_SIZE) { 10512 phys_addr &= TARGET_PAGE_MASK; 10513 address &= TARGET_PAGE_MASK; 10514 } 10515 tlb_set_page_with_attrs(cs, address, phys_addr, attrs, 10516 prot, mmu_idx, page_size); 10517 return 0; 10518 } 10519 10520 return ret; 10521 } 10522 10523 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 10524 MemTxAttrs *attrs) 10525 { 10526 ARMCPU *cpu = ARM_CPU(cs); 10527 CPUARMState *env = &cpu->env; 10528 hwaddr phys_addr; 10529 target_ulong page_size; 10530 int prot; 10531 bool ret; 10532 ARMMMUFaultInfo fi = {}; 10533 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 10534 10535 *attrs = (MemTxAttrs) {}; 10536 10537 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, 10538 attrs, &prot, &page_size, &fi, NULL); 10539 10540 if (ret) { 10541 return -1; 10542 } 10543 return phys_addr; 10544 } 10545 10546 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 10547 { 10548 uint32_t mask; 10549 unsigned el = arm_current_el(env); 10550 10551 /* First handle registers which unprivileged can read */ 10552 10553 switch (reg) { 10554 case 0 ... 7: /* xPSR sub-fields */ 10555 mask = 0; 10556 if ((reg & 1) && el) { 10557 mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */ 10558 } 10559 if (!(reg & 4)) { 10560 mask |= XPSR_NZCV | XPSR_Q; /* APSR */ 10561 } 10562 /* EPSR reads as zero */ 10563 return xpsr_read(env) & mask; 10564 break; 10565 case 20: /* CONTROL */ 10566 return env->v7m.control[env->v7m.secure]; 10567 case 0x94: /* CONTROL_NS */ 10568 /* We have to handle this here because unprivileged Secure code 10569 * can read the NS CONTROL register. 10570 */ 10571 if (!env->v7m.secure) { 10572 return 0; 10573 } 10574 return env->v7m.control[M_REG_NS]; 10575 } 10576 10577 if (el == 0) { 10578 return 0; /* unprivileged reads others as zero */ 10579 } 10580 10581 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10582 switch (reg) { 10583 case 0x88: /* MSP_NS */ 10584 if (!env->v7m.secure) { 10585 return 0; 10586 } 10587 return env->v7m.other_ss_msp; 10588 case 0x89: /* PSP_NS */ 10589 if (!env->v7m.secure) { 10590 return 0; 10591 } 10592 return env->v7m.other_ss_psp; 10593 case 0x8a: /* MSPLIM_NS */ 10594 if (!env->v7m.secure) { 10595 return 0; 10596 } 10597 return env->v7m.msplim[M_REG_NS]; 10598 case 0x8b: /* PSPLIM_NS */ 10599 if (!env->v7m.secure) { 10600 return 0; 10601 } 10602 return env->v7m.psplim[M_REG_NS]; 10603 case 0x90: /* PRIMASK_NS */ 10604 if (!env->v7m.secure) { 10605 return 0; 10606 } 10607 return env->v7m.primask[M_REG_NS]; 10608 case 0x91: /* BASEPRI_NS */ 10609 if (!env->v7m.secure) { 10610 return 0; 10611 } 10612 return env->v7m.basepri[M_REG_NS]; 10613 case 0x93: /* FAULTMASK_NS */ 10614 if (!env->v7m.secure) { 10615 return 0; 10616 } 10617 return env->v7m.faultmask[M_REG_NS]; 10618 case 0x98: /* SP_NS */ 10619 { 10620 /* This gives the non-secure SP selected based on whether we're 10621 * currently in handler mode or not, using the NS CONTROL.SPSEL. 10622 */ 10623 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; 10624 10625 if (!env->v7m.secure) { 10626 return 0; 10627 } 10628 if (!arm_v7m_is_handler_mode(env) && spsel) { 10629 return env->v7m.other_ss_psp; 10630 } else { 10631 return env->v7m.other_ss_msp; 10632 } 10633 } 10634 default: 10635 break; 10636 } 10637 } 10638 10639 switch (reg) { 10640 case 8: /* MSP */ 10641 return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13]; 10642 case 9: /* PSP */ 10643 return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp; 10644 case 10: /* MSPLIM */ 10645 if (!arm_feature(env, ARM_FEATURE_V8)) { 10646 goto bad_reg; 10647 } 10648 return env->v7m.msplim[env->v7m.secure]; 10649 case 11: /* PSPLIM */ 10650 if (!arm_feature(env, ARM_FEATURE_V8)) { 10651 goto bad_reg; 10652 } 10653 return env->v7m.psplim[env->v7m.secure]; 10654 case 16: /* PRIMASK */ 10655 return env->v7m.primask[env->v7m.secure]; 10656 case 17: /* BASEPRI */ 10657 case 18: /* BASEPRI_MAX */ 10658 return env->v7m.basepri[env->v7m.secure]; 10659 case 19: /* FAULTMASK */ 10660 return env->v7m.faultmask[env->v7m.secure]; 10661 default: 10662 bad_reg: 10663 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special" 10664 " register %d\n", reg); 10665 return 0; 10666 } 10667 } 10668 10669 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) 10670 { 10671 /* We're passed bits [11..0] of the instruction; extract 10672 * SYSm and the mask bits. 10673 * Invalid combinations of SYSm and mask are UNPREDICTABLE; 10674 * we choose to treat them as if the mask bits were valid. 10675 * NB that the pseudocode 'mask' variable is bits [11..10], 10676 * whereas ours is [11..8]. 10677 */ 10678 uint32_t mask = extract32(maskreg, 8, 4); 10679 uint32_t reg = extract32(maskreg, 0, 8); 10680 10681 if (arm_current_el(env) == 0 && reg > 7) { 10682 /* only xPSR sub-fields may be written by unprivileged */ 10683 return; 10684 } 10685 10686 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10687 switch (reg) { 10688 case 0x88: /* MSP_NS */ 10689 if (!env->v7m.secure) { 10690 return; 10691 } 10692 env->v7m.other_ss_msp = val; 10693 return; 10694 case 0x89: /* PSP_NS */ 10695 if (!env->v7m.secure) { 10696 return; 10697 } 10698 env->v7m.other_ss_psp = val; 10699 return; 10700 case 0x8a: /* MSPLIM_NS */ 10701 if (!env->v7m.secure) { 10702 return; 10703 } 10704 env->v7m.msplim[M_REG_NS] = val & ~7; 10705 return; 10706 case 0x8b: /* PSPLIM_NS */ 10707 if (!env->v7m.secure) { 10708 return; 10709 } 10710 env->v7m.psplim[M_REG_NS] = val & ~7; 10711 return; 10712 case 0x90: /* PRIMASK_NS */ 10713 if (!env->v7m.secure) { 10714 return; 10715 } 10716 env->v7m.primask[M_REG_NS] = val & 1; 10717 return; 10718 case 0x91: /* BASEPRI_NS */ 10719 if (!env->v7m.secure) { 10720 return; 10721 } 10722 env->v7m.basepri[M_REG_NS] = val & 0xff; 10723 return; 10724 case 0x93: /* FAULTMASK_NS */ 10725 if (!env->v7m.secure) { 10726 return; 10727 } 10728 env->v7m.faultmask[M_REG_NS] = val & 1; 10729 return; 10730 case 0x94: /* CONTROL_NS */ 10731 if (!env->v7m.secure) { 10732 return; 10733 } 10734 write_v7m_control_spsel_for_secstate(env, 10735 val & R_V7M_CONTROL_SPSEL_MASK, 10736 M_REG_NS); 10737 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK; 10738 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK; 10739 return; 10740 case 0x98: /* SP_NS */ 10741 { 10742 /* This gives the non-secure SP selected based on whether we're 10743 * currently in handler mode or not, using the NS CONTROL.SPSEL. 10744 */ 10745 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; 10746 10747 if (!env->v7m.secure) { 10748 return; 10749 } 10750 if (!arm_v7m_is_handler_mode(env) && spsel) { 10751 env->v7m.other_ss_psp = val; 10752 } else { 10753 env->v7m.other_ss_msp = val; 10754 } 10755 return; 10756 } 10757 default: 10758 break; 10759 } 10760 } 10761 10762 switch (reg) { 10763 case 0 ... 7: /* xPSR sub-fields */ 10764 /* only APSR is actually writable */ 10765 if (!(reg & 4)) { 10766 uint32_t apsrmask = 0; 10767 10768 if (mask & 8) { 10769 apsrmask |= XPSR_NZCV | XPSR_Q; 10770 } 10771 if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) { 10772 apsrmask |= XPSR_GE; 10773 } 10774 xpsr_write(env, val, apsrmask); 10775 } 10776 break; 10777 case 8: /* MSP */ 10778 if (v7m_using_psp(env)) { 10779 env->v7m.other_sp = val; 10780 } else { 10781 env->regs[13] = val; 10782 } 10783 break; 10784 case 9: /* PSP */ 10785 if (v7m_using_psp(env)) { 10786 env->regs[13] = val; 10787 } else { 10788 env->v7m.other_sp = val; 10789 } 10790 break; 10791 case 10: /* MSPLIM */ 10792 if (!arm_feature(env, ARM_FEATURE_V8)) { 10793 goto bad_reg; 10794 } 10795 env->v7m.msplim[env->v7m.secure] = val & ~7; 10796 break; 10797 case 11: /* PSPLIM */ 10798 if (!arm_feature(env, ARM_FEATURE_V8)) { 10799 goto bad_reg; 10800 } 10801 env->v7m.psplim[env->v7m.secure] = val & ~7; 10802 break; 10803 case 16: /* PRIMASK */ 10804 env->v7m.primask[env->v7m.secure] = val & 1; 10805 break; 10806 case 17: /* BASEPRI */ 10807 env->v7m.basepri[env->v7m.secure] = val & 0xff; 10808 break; 10809 case 18: /* BASEPRI_MAX */ 10810 val &= 0xff; 10811 if (val != 0 && (val < env->v7m.basepri[env->v7m.secure] 10812 || env->v7m.basepri[env->v7m.secure] == 0)) { 10813 env->v7m.basepri[env->v7m.secure] = val; 10814 } 10815 break; 10816 case 19: /* FAULTMASK */ 10817 env->v7m.faultmask[env->v7m.secure] = val & 1; 10818 break; 10819 case 20: /* CONTROL */ 10820 /* Writing to the SPSEL bit only has an effect if we are in 10821 * thread mode; other bits can be updated by any privileged code. 10822 * write_v7m_control_spsel() deals with updating the SPSEL bit in 10823 * env->v7m.control, so we only need update the others. 10824 * For v7M, we must just ignore explicit writes to SPSEL in handler 10825 * mode; for v8M the write is permitted but will have no effect. 10826 */ 10827 if (arm_feature(env, ARM_FEATURE_V8) || 10828 !arm_v7m_is_handler_mode(env)) { 10829 write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); 10830 } 10831 env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK; 10832 env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK; 10833 break; 10834 default: 10835 bad_reg: 10836 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special" 10837 " register %d\n", reg); 10838 return; 10839 } 10840 } 10841 10842 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) 10843 { 10844 /* Implement the TT instruction. op is bits [7:6] of the insn. */ 10845 bool forceunpriv = op & 1; 10846 bool alt = op & 2; 10847 V8M_SAttributes sattrs = {}; 10848 uint32_t tt_resp; 10849 bool r, rw, nsr, nsrw, mrvalid; 10850 int prot; 10851 ARMMMUFaultInfo fi = {}; 10852 MemTxAttrs attrs = {}; 10853 hwaddr phys_addr; 10854 ARMMMUIdx mmu_idx; 10855 uint32_t mregion; 10856 bool targetpriv; 10857 bool targetsec = env->v7m.secure; 10858 bool is_subpage; 10859 10860 /* Work out what the security state and privilege level we're 10861 * interested in is... 10862 */ 10863 if (alt) { 10864 targetsec = !targetsec; 10865 } 10866 10867 if (forceunpriv) { 10868 targetpriv = false; 10869 } else { 10870 targetpriv = arm_v7m_is_handler_mode(env) || 10871 !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK); 10872 } 10873 10874 /* ...and then figure out which MMU index this is */ 10875 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv); 10876 10877 /* We know that the MPU and SAU don't care about the access type 10878 * for our purposes beyond that we don't want to claim to be 10879 * an insn fetch, so we arbitrarily call this a read. 10880 */ 10881 10882 /* MPU region info only available for privileged or if 10883 * inspecting the other MPU state. 10884 */ 10885 if (arm_current_el(env) != 0 || alt) { 10886 /* We can ignore the return value as prot is always set */ 10887 pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, 10888 &phys_addr, &attrs, &prot, &is_subpage, 10889 &fi, &mregion); 10890 if (mregion == -1) { 10891 mrvalid = false; 10892 mregion = 0; 10893 } else { 10894 mrvalid = true; 10895 } 10896 r = prot & PAGE_READ; 10897 rw = prot & PAGE_WRITE; 10898 } else { 10899 r = false; 10900 rw = false; 10901 mrvalid = false; 10902 mregion = 0; 10903 } 10904 10905 if (env->v7m.secure) { 10906 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); 10907 nsr = sattrs.ns && r; 10908 nsrw = sattrs.ns && rw; 10909 } else { 10910 sattrs.ns = true; 10911 nsr = false; 10912 nsrw = false; 10913 } 10914 10915 tt_resp = (sattrs.iregion << 24) | 10916 (sattrs.irvalid << 23) | 10917 ((!sattrs.ns) << 22) | 10918 (nsrw << 21) | 10919 (nsr << 20) | 10920 (rw << 19) | 10921 (r << 18) | 10922 (sattrs.srvalid << 17) | 10923 (mrvalid << 16) | 10924 (sattrs.sregion << 8) | 10925 mregion; 10926 10927 return tt_resp; 10928 } 10929 10930 #endif 10931 10932 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) 10933 { 10934 /* Implement DC ZVA, which zeroes a fixed-length block of memory. 10935 * Note that we do not implement the (architecturally mandated) 10936 * alignment fault for attempts to use this on Device memory 10937 * (which matches the usual QEMU behaviour of not implementing either 10938 * alignment faults or any memory attribute handling). 10939 */ 10940 10941 ARMCPU *cpu = arm_env_get_cpu(env); 10942 uint64_t blocklen = 4 << cpu->dcz_blocksize; 10943 uint64_t vaddr = vaddr_in & ~(blocklen - 1); 10944 10945 #ifndef CONFIG_USER_ONLY 10946 { 10947 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than 10948 * the block size so we might have to do more than one TLB lookup. 10949 * We know that in fact for any v8 CPU the page size is at least 4K 10950 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only 10951 * 1K as an artefact of legacy v5 subpage support being present in the 10952 * same QEMU executable. 10953 */ 10954 int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE); 10955 void *hostaddr[maxidx]; 10956 int try, i; 10957 unsigned mmu_idx = cpu_mmu_index(env, false); 10958 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); 10959 10960 for (try = 0; try < 2; try++) { 10961 10962 for (i = 0; i < maxidx; i++) { 10963 hostaddr[i] = tlb_vaddr_to_host(env, 10964 vaddr + TARGET_PAGE_SIZE * i, 10965 1, mmu_idx); 10966 if (!hostaddr[i]) { 10967 break; 10968 } 10969 } 10970 if (i == maxidx) { 10971 /* If it's all in the TLB it's fair game for just writing to; 10972 * we know we don't need to update dirty status, etc. 10973 */ 10974 for (i = 0; i < maxidx - 1; i++) { 10975 memset(hostaddr[i], 0, TARGET_PAGE_SIZE); 10976 } 10977 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE)); 10978 return; 10979 } 10980 /* OK, try a store and see if we can populate the tlb. This 10981 * might cause an exception if the memory isn't writable, 10982 * in which case we will longjmp out of here. We must for 10983 * this purpose use the actual register value passed to us 10984 * so that we get the fault address right. 10985 */ 10986 helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC()); 10987 /* Now we can populate the other TLB entries, if any */ 10988 for (i = 0; i < maxidx; i++) { 10989 uint64_t va = vaddr + TARGET_PAGE_SIZE * i; 10990 if (va != (vaddr_in & TARGET_PAGE_MASK)) { 10991 helper_ret_stb_mmu(env, va, 0, oi, GETPC()); 10992 } 10993 } 10994 } 10995 10996 /* Slow path (probably attempt to do this to an I/O device or 10997 * similar, or clearing of a block of code we have translations 10998 * cached for). Just do a series of byte writes as the architecture 10999 * demands. It's not worth trying to use a cpu_physical_memory_map(), 11000 * memset(), unmap() sequence here because: 11001 * + we'd need to account for the blocksize being larger than a page 11002 * + the direct-RAM access case is almost always going to be dealt 11003 * with in the fastpath code above, so there's no speed benefit 11004 * + we would have to deal with the map returning NULL because the 11005 * bounce buffer was in use 11006 */ 11007 for (i = 0; i < blocklen; i++) { 11008 helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC()); 11009 } 11010 } 11011 #else 11012 memset(g2h(vaddr), 0, blocklen); 11013 #endif 11014 } 11015 11016 /* Note that signed overflow is undefined in C. The following routines are 11017 careful to use unsigned types where modulo arithmetic is required. 11018 Failure to do so _will_ break on newer gcc. */ 11019 11020 /* Signed saturating arithmetic. */ 11021 11022 /* Perform 16-bit signed saturating addition. */ 11023 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 11024 { 11025 uint16_t res; 11026 11027 res = a + b; 11028 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 11029 if (a & 0x8000) 11030 res = 0x8000; 11031 else 11032 res = 0x7fff; 11033 } 11034 return res; 11035 } 11036 11037 /* Perform 8-bit signed saturating addition. */ 11038 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 11039 { 11040 uint8_t res; 11041 11042 res = a + b; 11043 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 11044 if (a & 0x80) 11045 res = 0x80; 11046 else 11047 res = 0x7f; 11048 } 11049 return res; 11050 } 11051 11052 /* Perform 16-bit signed saturating subtraction. */ 11053 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 11054 { 11055 uint16_t res; 11056 11057 res = a - b; 11058 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 11059 if (a & 0x8000) 11060 res = 0x8000; 11061 else 11062 res = 0x7fff; 11063 } 11064 return res; 11065 } 11066 11067 /* Perform 8-bit signed saturating subtraction. */ 11068 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 11069 { 11070 uint8_t res; 11071 11072 res = a - b; 11073 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 11074 if (a & 0x80) 11075 res = 0x80; 11076 else 11077 res = 0x7f; 11078 } 11079 return res; 11080 } 11081 11082 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 11083 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 11084 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 11085 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 11086 #define PFX q 11087 11088 #include "op_addsub.h" 11089 11090 /* Unsigned saturating arithmetic. */ 11091 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 11092 { 11093 uint16_t res; 11094 res = a + b; 11095 if (res < a) 11096 res = 0xffff; 11097 return res; 11098 } 11099 11100 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 11101 { 11102 if (a > b) 11103 return a - b; 11104 else 11105 return 0; 11106 } 11107 11108 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 11109 { 11110 uint8_t res; 11111 res = a + b; 11112 if (res < a) 11113 res = 0xff; 11114 return res; 11115 } 11116 11117 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 11118 { 11119 if (a > b) 11120 return a - b; 11121 else 11122 return 0; 11123 } 11124 11125 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 11126 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 11127 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 11128 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 11129 #define PFX uq 11130 11131 #include "op_addsub.h" 11132 11133 /* Signed modulo arithmetic. */ 11134 #define SARITH16(a, b, n, op) do { \ 11135 int32_t sum; \ 11136 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 11137 RESULT(sum, n, 16); \ 11138 if (sum >= 0) \ 11139 ge |= 3 << (n * 2); \ 11140 } while(0) 11141 11142 #define SARITH8(a, b, n, op) do { \ 11143 int32_t sum; \ 11144 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 11145 RESULT(sum, n, 8); \ 11146 if (sum >= 0) \ 11147 ge |= 1 << n; \ 11148 } while(0) 11149 11150 11151 #define ADD16(a, b, n) SARITH16(a, b, n, +) 11152 #define SUB16(a, b, n) SARITH16(a, b, n, -) 11153 #define ADD8(a, b, n) SARITH8(a, b, n, +) 11154 #define SUB8(a, b, n) SARITH8(a, b, n, -) 11155 #define PFX s 11156 #define ARITH_GE 11157 11158 #include "op_addsub.h" 11159 11160 /* Unsigned modulo arithmetic. */ 11161 #define ADD16(a, b, n) do { \ 11162 uint32_t sum; \ 11163 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 11164 RESULT(sum, n, 16); \ 11165 if ((sum >> 16) == 1) \ 11166 ge |= 3 << (n * 2); \ 11167 } while(0) 11168 11169 #define ADD8(a, b, n) do { \ 11170 uint32_t sum; \ 11171 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 11172 RESULT(sum, n, 8); \ 11173 if ((sum >> 8) == 1) \ 11174 ge |= 1 << n; \ 11175 } while(0) 11176 11177 #define SUB16(a, b, n) do { \ 11178 uint32_t sum; \ 11179 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 11180 RESULT(sum, n, 16); \ 11181 if ((sum >> 16) == 0) \ 11182 ge |= 3 << (n * 2); \ 11183 } while(0) 11184 11185 #define SUB8(a, b, n) do { \ 11186 uint32_t sum; \ 11187 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 11188 RESULT(sum, n, 8); \ 11189 if ((sum >> 8) == 0) \ 11190 ge |= 1 << n; \ 11191 } while(0) 11192 11193 #define PFX u 11194 #define ARITH_GE 11195 11196 #include "op_addsub.h" 11197 11198 /* Halved signed arithmetic. */ 11199 #define ADD16(a, b, n) \ 11200 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 11201 #define SUB16(a, b, n) \ 11202 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 11203 #define ADD8(a, b, n) \ 11204 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 11205 #define SUB8(a, b, n) \ 11206 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 11207 #define PFX sh 11208 11209 #include "op_addsub.h" 11210 11211 /* Halved unsigned arithmetic. */ 11212 #define ADD16(a, b, n) \ 11213 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 11214 #define SUB16(a, b, n) \ 11215 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 11216 #define ADD8(a, b, n) \ 11217 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 11218 #define SUB8(a, b, n) \ 11219 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 11220 #define PFX uh 11221 11222 #include "op_addsub.h" 11223 11224 static inline uint8_t do_usad(uint8_t a, uint8_t b) 11225 { 11226 if (a > b) 11227 return a - b; 11228 else 11229 return b - a; 11230 } 11231 11232 /* Unsigned sum of absolute byte differences. */ 11233 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 11234 { 11235 uint32_t sum; 11236 sum = do_usad(a, b); 11237 sum += do_usad(a >> 8, b >> 8); 11238 sum += do_usad(a >> 16, b >>16); 11239 sum += do_usad(a >> 24, b >> 24); 11240 return sum; 11241 } 11242 11243 /* For ARMv6 SEL instruction. */ 11244 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 11245 { 11246 uint32_t mask; 11247 11248 mask = 0; 11249 if (flags & 1) 11250 mask |= 0xff; 11251 if (flags & 2) 11252 mask |= 0xff00; 11253 if (flags & 4) 11254 mask |= 0xff0000; 11255 if (flags & 8) 11256 mask |= 0xff000000; 11257 return (a & mask) | (b & ~mask); 11258 } 11259 11260 /* VFP support. We follow the convention used for VFP instructions: 11261 Single precision routines have a "s" suffix, double precision a 11262 "d" suffix. */ 11263 11264 /* Convert host exception flags to vfp form. */ 11265 static inline int vfp_exceptbits_from_host(int host_bits) 11266 { 11267 int target_bits = 0; 11268 11269 if (host_bits & float_flag_invalid) 11270 target_bits |= 1; 11271 if (host_bits & float_flag_divbyzero) 11272 target_bits |= 2; 11273 if (host_bits & float_flag_overflow) 11274 target_bits |= 4; 11275 if (host_bits & (float_flag_underflow | float_flag_output_denormal)) 11276 target_bits |= 8; 11277 if (host_bits & float_flag_inexact) 11278 target_bits |= 0x10; 11279 if (host_bits & float_flag_input_denormal) 11280 target_bits |= 0x80; 11281 return target_bits; 11282 } 11283 11284 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) 11285 { 11286 int i; 11287 uint32_t fpscr; 11288 11289 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff) 11290 | (env->vfp.vec_len << 16) 11291 | (env->vfp.vec_stride << 20); 11292 i = get_float_exception_flags(&env->vfp.fp_status); 11293 i |= get_float_exception_flags(&env->vfp.standard_fp_status); 11294 i |= get_float_exception_flags(&env->vfp.fp_status_f16); 11295 fpscr |= vfp_exceptbits_from_host(i); 11296 return fpscr; 11297 } 11298 11299 uint32_t vfp_get_fpscr(CPUARMState *env) 11300 { 11301 return HELPER(vfp_get_fpscr)(env); 11302 } 11303 11304 /* Convert vfp exception flags to target form. */ 11305 static inline int vfp_exceptbits_to_host(int target_bits) 11306 { 11307 int host_bits = 0; 11308 11309 if (target_bits & 1) 11310 host_bits |= float_flag_invalid; 11311 if (target_bits & 2) 11312 host_bits |= float_flag_divbyzero; 11313 if (target_bits & 4) 11314 host_bits |= float_flag_overflow; 11315 if (target_bits & 8) 11316 host_bits |= float_flag_underflow; 11317 if (target_bits & 0x10) 11318 host_bits |= float_flag_inexact; 11319 if (target_bits & 0x80) 11320 host_bits |= float_flag_input_denormal; 11321 return host_bits; 11322 } 11323 11324 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) 11325 { 11326 int i; 11327 uint32_t changed; 11328 11329 changed = env->vfp.xregs[ARM_VFP_FPSCR]; 11330 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff); 11331 env->vfp.vec_len = (val >> 16) & 7; 11332 env->vfp.vec_stride = (val >> 20) & 3; 11333 11334 changed ^= val; 11335 if (changed & (3 << 22)) { 11336 i = (val >> 22) & 3; 11337 switch (i) { 11338 case FPROUNDING_TIEEVEN: 11339 i = float_round_nearest_even; 11340 break; 11341 case FPROUNDING_POSINF: 11342 i = float_round_up; 11343 break; 11344 case FPROUNDING_NEGINF: 11345 i = float_round_down; 11346 break; 11347 case FPROUNDING_ZERO: 11348 i = float_round_to_zero; 11349 break; 11350 } 11351 set_float_rounding_mode(i, &env->vfp.fp_status); 11352 set_float_rounding_mode(i, &env->vfp.fp_status_f16); 11353 } 11354 if (changed & FPCR_FZ16) { 11355 bool ftz_enabled = val & FPCR_FZ16; 11356 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16); 11357 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16); 11358 } 11359 if (changed & FPCR_FZ) { 11360 bool ftz_enabled = val & FPCR_FZ; 11361 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status); 11362 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status); 11363 } 11364 if (changed & FPCR_DN) { 11365 bool dnan_enabled = val & FPCR_DN; 11366 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status); 11367 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16); 11368 } 11369 11370 /* The exception flags are ORed together when we read fpscr so we 11371 * only need to preserve the current state in one of our 11372 * float_status values. 11373 */ 11374 i = vfp_exceptbits_to_host(val); 11375 set_float_exception_flags(i, &env->vfp.fp_status); 11376 set_float_exception_flags(0, &env->vfp.fp_status_f16); 11377 set_float_exception_flags(0, &env->vfp.standard_fp_status); 11378 } 11379 11380 void vfp_set_fpscr(CPUARMState *env, uint32_t val) 11381 { 11382 HELPER(vfp_set_fpscr)(env, val); 11383 } 11384 11385 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) 11386 11387 #define VFP_BINOP(name) \ 11388 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ 11389 { \ 11390 float_status *fpst = fpstp; \ 11391 return float32_ ## name(a, b, fpst); \ 11392 } \ 11393 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ 11394 { \ 11395 float_status *fpst = fpstp; \ 11396 return float64_ ## name(a, b, fpst); \ 11397 } 11398 VFP_BINOP(add) 11399 VFP_BINOP(sub) 11400 VFP_BINOP(mul) 11401 VFP_BINOP(div) 11402 VFP_BINOP(min) 11403 VFP_BINOP(max) 11404 VFP_BINOP(minnum) 11405 VFP_BINOP(maxnum) 11406 #undef VFP_BINOP 11407 11408 float32 VFP_HELPER(neg, s)(float32 a) 11409 { 11410 return float32_chs(a); 11411 } 11412 11413 float64 VFP_HELPER(neg, d)(float64 a) 11414 { 11415 return float64_chs(a); 11416 } 11417 11418 float32 VFP_HELPER(abs, s)(float32 a) 11419 { 11420 return float32_abs(a); 11421 } 11422 11423 float64 VFP_HELPER(abs, d)(float64 a) 11424 { 11425 return float64_abs(a); 11426 } 11427 11428 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) 11429 { 11430 return float32_sqrt(a, &env->vfp.fp_status); 11431 } 11432 11433 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) 11434 { 11435 return float64_sqrt(a, &env->vfp.fp_status); 11436 } 11437 11438 /* XXX: check quiet/signaling case */ 11439 #define DO_VFP_cmp(p, type) \ 11440 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \ 11441 { \ 11442 uint32_t flags; \ 11443 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \ 11444 case 0: flags = 0x6; break; \ 11445 case -1: flags = 0x8; break; \ 11446 case 1: flags = 0x2; break; \ 11447 default: case 2: flags = 0x3; break; \ 11448 } \ 11449 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ 11450 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ 11451 } \ 11452 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \ 11453 { \ 11454 uint32_t flags; \ 11455 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \ 11456 case 0: flags = 0x6; break; \ 11457 case -1: flags = 0x8; break; \ 11458 case 1: flags = 0x2; break; \ 11459 default: case 2: flags = 0x3; break; \ 11460 } \ 11461 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ 11462 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ 11463 } 11464 DO_VFP_cmp(s, float32) 11465 DO_VFP_cmp(d, float64) 11466 #undef DO_VFP_cmp 11467 11468 /* Integer to float and float to integer conversions */ 11469 11470 #define CONV_ITOF(name, ftype, fsz, sign) \ 11471 ftype HELPER(name)(uint32_t x, void *fpstp) \ 11472 { \ 11473 float_status *fpst = fpstp; \ 11474 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ 11475 } 11476 11477 #define CONV_FTOI(name, ftype, fsz, sign, round) \ 11478 sign##int32_t HELPER(name)(ftype x, void *fpstp) \ 11479 { \ 11480 float_status *fpst = fpstp; \ 11481 if (float##fsz##_is_any_nan(x)) { \ 11482 float_raise(float_flag_invalid, fpst); \ 11483 return 0; \ 11484 } \ 11485 return float##fsz##_to_##sign##int32##round(x, fpst); \ 11486 } 11487 11488 #define FLOAT_CONVS(name, p, ftype, fsz, sign) \ 11489 CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \ 11490 CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \ 11491 CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero) 11492 11493 FLOAT_CONVS(si, h, uint32_t, 16, ) 11494 FLOAT_CONVS(si, s, float32, 32, ) 11495 FLOAT_CONVS(si, d, float64, 64, ) 11496 FLOAT_CONVS(ui, h, uint32_t, 16, u) 11497 FLOAT_CONVS(ui, s, float32, 32, u) 11498 FLOAT_CONVS(ui, d, float64, 64, u) 11499 11500 #undef CONV_ITOF 11501 #undef CONV_FTOI 11502 #undef FLOAT_CONVS 11503 11504 /* floating point conversion */ 11505 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) 11506 { 11507 return float32_to_float64(x, &env->vfp.fp_status); 11508 } 11509 11510 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) 11511 { 11512 return float64_to_float32(x, &env->vfp.fp_status); 11513 } 11514 11515 /* VFP3 fixed point conversion. */ 11516 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11517 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ 11518 void *fpstp) \ 11519 { \ 11520 float_status *fpst = fpstp; \ 11521 float##fsz tmp; \ 11522 tmp = itype##_to_##float##fsz(x, fpst); \ 11523 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \ 11524 } 11525 11526 /* Notice that we want only input-denormal exception flags from the 11527 * scalbn operation: the other possible flags (overflow+inexact if 11528 * we overflow to infinity, output-denormal) aren't correct for the 11529 * complete scale-and-convert operation. 11530 */ 11531 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \ 11532 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \ 11533 uint32_t shift, \ 11534 void *fpstp) \ 11535 { \ 11536 float_status *fpst = fpstp; \ 11537 int old_exc_flags = get_float_exception_flags(fpst); \ 11538 float##fsz tmp; \ 11539 if (float##fsz##_is_any_nan(x)) { \ 11540 float_raise(float_flag_invalid, fpst); \ 11541 return 0; \ 11542 } \ 11543 tmp = float##fsz##_scalbn(x, shift, fpst); \ 11544 old_exc_flags |= get_float_exception_flags(fpst) \ 11545 & float_flag_input_denormal; \ 11546 set_float_exception_flags(old_exc_flags, fpst); \ 11547 return float##fsz##_to_##itype##round(tmp, fpst); \ 11548 } 11549 11550 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \ 11551 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11552 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \ 11553 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) 11554 11555 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \ 11556 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11557 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) 11558 11559 VFP_CONV_FIX(sh, d, 64, 64, int16) 11560 VFP_CONV_FIX(sl, d, 64, 64, int32) 11561 VFP_CONV_FIX_A64(sq, d, 64, 64, int64) 11562 VFP_CONV_FIX(uh, d, 64, 64, uint16) 11563 VFP_CONV_FIX(ul, d, 64, 64, uint32) 11564 VFP_CONV_FIX_A64(uq, d, 64, 64, uint64) 11565 VFP_CONV_FIX(sh, s, 32, 32, int16) 11566 VFP_CONV_FIX(sl, s, 32, 32, int32) 11567 VFP_CONV_FIX_A64(sq, s, 32, 64, int64) 11568 VFP_CONV_FIX(uh, s, 32, 32, uint16) 11569 VFP_CONV_FIX(ul, s, 32, 32, uint32) 11570 VFP_CONV_FIX_A64(uq, s, 32, 64, uint64) 11571 11572 #undef VFP_CONV_FIX 11573 #undef VFP_CONV_FIX_FLOAT 11574 #undef VFP_CONV_FLOAT_FIX_ROUND 11575 #undef VFP_CONV_FIX_A64 11576 11577 /* Conversion to/from f16 can overflow to infinity before/after scaling. 11578 * Therefore we convert to f64, scale, and then convert f64 to f16; or 11579 * vice versa for conversion to integer. 11580 * 11581 * For 16- and 32-bit integers, the conversion to f64 never rounds. 11582 * For 64-bit integers, any integer that would cause rounding will also 11583 * overflow to f16 infinity, so there is no double rounding problem. 11584 */ 11585 11586 static float16 do_postscale_fp16(float64 f, int shift, float_status *fpst) 11587 { 11588 return float64_to_float16(float64_scalbn(f, -shift, fpst), true, fpst); 11589 } 11590 11591 uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst) 11592 { 11593 return do_postscale_fp16(int32_to_float64(x, fpst), shift, fpst); 11594 } 11595 11596 uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst) 11597 { 11598 return do_postscale_fp16(uint32_to_float64(x, fpst), shift, fpst); 11599 } 11600 11601 uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst) 11602 { 11603 return do_postscale_fp16(int64_to_float64(x, fpst), shift, fpst); 11604 } 11605 11606 uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst) 11607 { 11608 return do_postscale_fp16(uint64_to_float64(x, fpst), shift, fpst); 11609 } 11610 11611 static float64 do_prescale_fp16(float16 f, int shift, float_status *fpst) 11612 { 11613 if (unlikely(float16_is_any_nan(f))) { 11614 float_raise(float_flag_invalid, fpst); 11615 return 0; 11616 } else { 11617 int old_exc_flags = get_float_exception_flags(fpst); 11618 float64 ret; 11619 11620 ret = float16_to_float64(f, true, fpst); 11621 ret = float64_scalbn(ret, shift, fpst); 11622 old_exc_flags |= get_float_exception_flags(fpst) 11623 & float_flag_input_denormal; 11624 set_float_exception_flags(old_exc_flags, fpst); 11625 11626 return ret; 11627 } 11628 } 11629 11630 uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst) 11631 { 11632 return float64_to_int16(do_prescale_fp16(x, shift, fpst), fpst); 11633 } 11634 11635 uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst) 11636 { 11637 return float64_to_uint16(do_prescale_fp16(x, shift, fpst), fpst); 11638 } 11639 11640 uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst) 11641 { 11642 return float64_to_int32(do_prescale_fp16(x, shift, fpst), fpst); 11643 } 11644 11645 uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst) 11646 { 11647 return float64_to_uint32(do_prescale_fp16(x, shift, fpst), fpst); 11648 } 11649 11650 uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst) 11651 { 11652 return float64_to_int64(do_prescale_fp16(x, shift, fpst), fpst); 11653 } 11654 11655 uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst) 11656 { 11657 return float64_to_uint64(do_prescale_fp16(x, shift, fpst), fpst); 11658 } 11659 11660 /* Set the current fp rounding mode and return the old one. 11661 * The argument is a softfloat float_round_ value. 11662 */ 11663 uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp) 11664 { 11665 float_status *fp_status = fpstp; 11666 11667 uint32_t prev_rmode = get_float_rounding_mode(fp_status); 11668 set_float_rounding_mode(rmode, fp_status); 11669 11670 return prev_rmode; 11671 } 11672 11673 /* Set the current fp rounding mode in the standard fp status and return 11674 * the old one. This is for NEON instructions that need to change the 11675 * rounding mode but wish to use the standard FPSCR values for everything 11676 * else. Always set the rounding mode back to the correct value after 11677 * modifying it. 11678 * The argument is a softfloat float_round_ value. 11679 */ 11680 uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env) 11681 { 11682 float_status *fp_status = &env->vfp.standard_fp_status; 11683 11684 uint32_t prev_rmode = get_float_rounding_mode(fp_status); 11685 set_float_rounding_mode(rmode, fp_status); 11686 11687 return prev_rmode; 11688 } 11689 11690 /* Half precision conversions. */ 11691 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode) 11692 { 11693 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11694 * it would affect flushing input denormals. 11695 */ 11696 float_status *fpst = fpstp; 11697 flag save = get_flush_inputs_to_zero(fpst); 11698 set_flush_inputs_to_zero(false, fpst); 11699 float32 r = float16_to_float32(a, !ahp_mode, fpst); 11700 set_flush_inputs_to_zero(save, fpst); 11701 return r; 11702 } 11703 11704 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode) 11705 { 11706 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11707 * it would affect flushing output denormals. 11708 */ 11709 float_status *fpst = fpstp; 11710 flag save = get_flush_to_zero(fpst); 11711 set_flush_to_zero(false, fpst); 11712 float16 r = float32_to_float16(a, !ahp_mode, fpst); 11713 set_flush_to_zero(save, fpst); 11714 return r; 11715 } 11716 11717 float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode) 11718 { 11719 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11720 * it would affect flushing input denormals. 11721 */ 11722 float_status *fpst = fpstp; 11723 flag save = get_flush_inputs_to_zero(fpst); 11724 set_flush_inputs_to_zero(false, fpst); 11725 float64 r = float16_to_float64(a, !ahp_mode, fpst); 11726 set_flush_inputs_to_zero(save, fpst); 11727 return r; 11728 } 11729 11730 uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode) 11731 { 11732 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11733 * it would affect flushing output denormals. 11734 */ 11735 float_status *fpst = fpstp; 11736 flag save = get_flush_to_zero(fpst); 11737 set_flush_to_zero(false, fpst); 11738 float16 r = float64_to_float16(a, !ahp_mode, fpst); 11739 set_flush_to_zero(save, fpst); 11740 return r; 11741 } 11742 11743 #define float32_two make_float32(0x40000000) 11744 #define float32_three make_float32(0x40400000) 11745 #define float32_one_point_five make_float32(0x3fc00000) 11746 11747 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env) 11748 { 11749 float_status *s = &env->vfp.standard_fp_status; 11750 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || 11751 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { 11752 if (!(float32_is_zero(a) || float32_is_zero(b))) { 11753 float_raise(float_flag_input_denormal, s); 11754 } 11755 return float32_two; 11756 } 11757 return float32_sub(float32_two, float32_mul(a, b, s), s); 11758 } 11759 11760 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env) 11761 { 11762 float_status *s = &env->vfp.standard_fp_status; 11763 float32 product; 11764 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || 11765 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { 11766 if (!(float32_is_zero(a) || float32_is_zero(b))) { 11767 float_raise(float_flag_input_denormal, s); 11768 } 11769 return float32_one_point_five; 11770 } 11771 product = float32_mul(a, b, s); 11772 return float32_div(float32_sub(float32_three, product, s), float32_two, s); 11773 } 11774 11775 /* NEON helpers. */ 11776 11777 /* Constants 256 and 512 are used in some helpers; we avoid relying on 11778 * int->float conversions at run-time. */ 11779 #define float64_256 make_float64(0x4070000000000000LL) 11780 #define float64_512 make_float64(0x4080000000000000LL) 11781 #define float16_maxnorm make_float16(0x7bff) 11782 #define float32_maxnorm make_float32(0x7f7fffff) 11783 #define float64_maxnorm make_float64(0x7fefffffffffffffLL) 11784 11785 /* Reciprocal functions 11786 * 11787 * The algorithm that must be used to calculate the estimate 11788 * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate 11789 */ 11790 11791 /* See RecipEstimate() 11792 * 11793 * input is a 9 bit fixed point number 11794 * input range 256 .. 511 for a number from 0.5 <= x < 1.0. 11795 * result range 256 .. 511 for a number from 1.0 to 511/256. 11796 */ 11797 11798 static int recip_estimate(int input) 11799 { 11800 int a, b, r; 11801 assert(256 <= input && input < 512); 11802 a = (input * 2) + 1; 11803 b = (1 << 19) / a; 11804 r = (b + 1) >> 1; 11805 assert(256 <= r && r < 512); 11806 return r; 11807 } 11808 11809 /* 11810 * Common wrapper to call recip_estimate 11811 * 11812 * The parameters are exponent and 64 bit fraction (without implicit 11813 * bit) where the binary point is nominally at bit 52. Returns a 11814 * float64 which can then be rounded to the appropriate size by the 11815 * callee. 11816 */ 11817 11818 static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac) 11819 { 11820 uint32_t scaled, estimate; 11821 uint64_t result_frac; 11822 int result_exp; 11823 11824 /* Handle sub-normals */ 11825 if (*exp == 0) { 11826 if (extract64(frac, 51, 1) == 0) { 11827 *exp = -1; 11828 frac <<= 2; 11829 } else { 11830 frac <<= 1; 11831 } 11832 } 11833 11834 /* scaled = UInt('1':fraction<51:44>) */ 11835 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); 11836 estimate = recip_estimate(scaled); 11837 11838 result_exp = exp_off - *exp; 11839 result_frac = deposit64(0, 44, 8, estimate); 11840 if (result_exp == 0) { 11841 result_frac = deposit64(result_frac >> 1, 51, 1, 1); 11842 } else if (result_exp == -1) { 11843 result_frac = deposit64(result_frac >> 2, 50, 2, 1); 11844 result_exp = 0; 11845 } 11846 11847 *exp = result_exp; 11848 11849 return result_frac; 11850 } 11851 11852 static bool round_to_inf(float_status *fpst, bool sign_bit) 11853 { 11854 switch (fpst->float_rounding_mode) { 11855 case float_round_nearest_even: /* Round to Nearest */ 11856 return true; 11857 case float_round_up: /* Round to +Inf */ 11858 return !sign_bit; 11859 case float_round_down: /* Round to -Inf */ 11860 return sign_bit; 11861 case float_round_to_zero: /* Round to Zero */ 11862 return false; 11863 } 11864 11865 g_assert_not_reached(); 11866 } 11867 11868 uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp) 11869 { 11870 float_status *fpst = fpstp; 11871 float16 f16 = float16_squash_input_denormal(input, fpst); 11872 uint32_t f16_val = float16_val(f16); 11873 uint32_t f16_sign = float16_is_neg(f16); 11874 int f16_exp = extract32(f16_val, 10, 5); 11875 uint32_t f16_frac = extract32(f16_val, 0, 10); 11876 uint64_t f64_frac; 11877 11878 if (float16_is_any_nan(f16)) { 11879 float16 nan = f16; 11880 if (float16_is_signaling_nan(f16, fpst)) { 11881 float_raise(float_flag_invalid, fpst); 11882 nan = float16_silence_nan(f16, fpst); 11883 } 11884 if (fpst->default_nan_mode) { 11885 nan = float16_default_nan(fpst); 11886 } 11887 return nan; 11888 } else if (float16_is_infinity(f16)) { 11889 return float16_set_sign(float16_zero, float16_is_neg(f16)); 11890 } else if (float16_is_zero(f16)) { 11891 float_raise(float_flag_divbyzero, fpst); 11892 return float16_set_sign(float16_infinity, float16_is_neg(f16)); 11893 } else if (float16_abs(f16) < (1 << 8)) { 11894 /* Abs(value) < 2.0^-16 */ 11895 float_raise(float_flag_overflow | float_flag_inexact, fpst); 11896 if (round_to_inf(fpst, f16_sign)) { 11897 return float16_set_sign(float16_infinity, f16_sign); 11898 } else { 11899 return float16_set_sign(float16_maxnorm, f16_sign); 11900 } 11901 } else if (f16_exp >= 29 && fpst->flush_to_zero) { 11902 float_raise(float_flag_underflow, fpst); 11903 return float16_set_sign(float16_zero, float16_is_neg(f16)); 11904 } 11905 11906 f64_frac = call_recip_estimate(&f16_exp, 29, 11907 ((uint64_t) f16_frac) << (52 - 10)); 11908 11909 /* result = sign : result_exp<4:0> : fraction<51:42> */ 11910 f16_val = deposit32(0, 15, 1, f16_sign); 11911 f16_val = deposit32(f16_val, 10, 5, f16_exp); 11912 f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10)); 11913 return make_float16(f16_val); 11914 } 11915 11916 float32 HELPER(recpe_f32)(float32 input, void *fpstp) 11917 { 11918 float_status *fpst = fpstp; 11919 float32 f32 = float32_squash_input_denormal(input, fpst); 11920 uint32_t f32_val = float32_val(f32); 11921 bool f32_sign = float32_is_neg(f32); 11922 int f32_exp = extract32(f32_val, 23, 8); 11923 uint32_t f32_frac = extract32(f32_val, 0, 23); 11924 uint64_t f64_frac; 11925 11926 if (float32_is_any_nan(f32)) { 11927 float32 nan = f32; 11928 if (float32_is_signaling_nan(f32, fpst)) { 11929 float_raise(float_flag_invalid, fpst); 11930 nan = float32_silence_nan(f32, fpst); 11931 } 11932 if (fpst->default_nan_mode) { 11933 nan = float32_default_nan(fpst); 11934 } 11935 return nan; 11936 } else if (float32_is_infinity(f32)) { 11937 return float32_set_sign(float32_zero, float32_is_neg(f32)); 11938 } else if (float32_is_zero(f32)) { 11939 float_raise(float_flag_divbyzero, fpst); 11940 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 11941 } else if (float32_abs(f32) < (1ULL << 21)) { 11942 /* Abs(value) < 2.0^-128 */ 11943 float_raise(float_flag_overflow | float_flag_inexact, fpst); 11944 if (round_to_inf(fpst, f32_sign)) { 11945 return float32_set_sign(float32_infinity, f32_sign); 11946 } else { 11947 return float32_set_sign(float32_maxnorm, f32_sign); 11948 } 11949 } else if (f32_exp >= 253 && fpst->flush_to_zero) { 11950 float_raise(float_flag_underflow, fpst); 11951 return float32_set_sign(float32_zero, float32_is_neg(f32)); 11952 } 11953 11954 f64_frac = call_recip_estimate(&f32_exp, 253, 11955 ((uint64_t) f32_frac) << (52 - 23)); 11956 11957 /* result = sign : result_exp<7:0> : fraction<51:29> */ 11958 f32_val = deposit32(0, 31, 1, f32_sign); 11959 f32_val = deposit32(f32_val, 23, 8, f32_exp); 11960 f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23)); 11961 return make_float32(f32_val); 11962 } 11963 11964 float64 HELPER(recpe_f64)(float64 input, void *fpstp) 11965 { 11966 float_status *fpst = fpstp; 11967 float64 f64 = float64_squash_input_denormal(input, fpst); 11968 uint64_t f64_val = float64_val(f64); 11969 bool f64_sign = float64_is_neg(f64); 11970 int f64_exp = extract64(f64_val, 52, 11); 11971 uint64_t f64_frac = extract64(f64_val, 0, 52); 11972 11973 /* Deal with any special cases */ 11974 if (float64_is_any_nan(f64)) { 11975 float64 nan = f64; 11976 if (float64_is_signaling_nan(f64, fpst)) { 11977 float_raise(float_flag_invalid, fpst); 11978 nan = float64_silence_nan(f64, fpst); 11979 } 11980 if (fpst->default_nan_mode) { 11981 nan = float64_default_nan(fpst); 11982 } 11983 return nan; 11984 } else if (float64_is_infinity(f64)) { 11985 return float64_set_sign(float64_zero, float64_is_neg(f64)); 11986 } else if (float64_is_zero(f64)) { 11987 float_raise(float_flag_divbyzero, fpst); 11988 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 11989 } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { 11990 /* Abs(value) < 2.0^-1024 */ 11991 float_raise(float_flag_overflow | float_flag_inexact, fpst); 11992 if (round_to_inf(fpst, f64_sign)) { 11993 return float64_set_sign(float64_infinity, f64_sign); 11994 } else { 11995 return float64_set_sign(float64_maxnorm, f64_sign); 11996 } 11997 } else if (f64_exp >= 2045 && fpst->flush_to_zero) { 11998 float_raise(float_flag_underflow, fpst); 11999 return float64_set_sign(float64_zero, float64_is_neg(f64)); 12000 } 12001 12002 f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac); 12003 12004 /* result = sign : result_exp<10:0> : fraction<51:0>; */ 12005 f64_val = deposit64(0, 63, 1, f64_sign); 12006 f64_val = deposit64(f64_val, 52, 11, f64_exp); 12007 f64_val = deposit64(f64_val, 0, 52, f64_frac); 12008 return make_float64(f64_val); 12009 } 12010 12011 /* The algorithm that must be used to calculate the estimate 12012 * is specified by the ARM ARM. 12013 */ 12014 12015 static int do_recip_sqrt_estimate(int a) 12016 { 12017 int b, estimate; 12018 12019 assert(128 <= a && a < 512); 12020 if (a < 256) { 12021 a = a * 2 + 1; 12022 } else { 12023 a = (a >> 1) << 1; 12024 a = (a + 1) * 2; 12025 } 12026 b = 512; 12027 while (a * (b + 1) * (b + 1) < (1 << 28)) { 12028 b += 1; 12029 } 12030 estimate = (b + 1) / 2; 12031 assert(256 <= estimate && estimate < 512); 12032 12033 return estimate; 12034 } 12035 12036 12037 static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac) 12038 { 12039 int estimate; 12040 uint32_t scaled; 12041 12042 if (*exp == 0) { 12043 while (extract64(frac, 51, 1) == 0) { 12044 frac = frac << 1; 12045 *exp -= 1; 12046 } 12047 frac = extract64(frac, 0, 51) << 1; 12048 } 12049 12050 if (*exp & 1) { 12051 /* scaled = UInt('01':fraction<51:45>) */ 12052 scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); 12053 } else { 12054 /* scaled = UInt('1':fraction<51:44>) */ 12055 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); 12056 } 12057 estimate = do_recip_sqrt_estimate(scaled); 12058 12059 *exp = (exp_off - *exp) / 2; 12060 return extract64(estimate, 0, 8) << 44; 12061 } 12062 12063 uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp) 12064 { 12065 float_status *s = fpstp; 12066 float16 f16 = float16_squash_input_denormal(input, s); 12067 uint16_t val = float16_val(f16); 12068 bool f16_sign = float16_is_neg(f16); 12069 int f16_exp = extract32(val, 10, 5); 12070 uint16_t f16_frac = extract32(val, 0, 10); 12071 uint64_t f64_frac; 12072 12073 if (float16_is_any_nan(f16)) { 12074 float16 nan = f16; 12075 if (float16_is_signaling_nan(f16, s)) { 12076 float_raise(float_flag_invalid, s); 12077 nan = float16_silence_nan(f16, s); 12078 } 12079 if (s->default_nan_mode) { 12080 nan = float16_default_nan(s); 12081 } 12082 return nan; 12083 } else if (float16_is_zero(f16)) { 12084 float_raise(float_flag_divbyzero, s); 12085 return float16_set_sign(float16_infinity, f16_sign); 12086 } else if (f16_sign) { 12087 float_raise(float_flag_invalid, s); 12088 return float16_default_nan(s); 12089 } else if (float16_is_infinity(f16)) { 12090 return float16_zero; 12091 } 12092 12093 /* Scale and normalize to a double-precision value between 0.25 and 1.0, 12094 * preserving the parity of the exponent. */ 12095 12096 f64_frac = ((uint64_t) f16_frac) << (52 - 10); 12097 12098 f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac); 12099 12100 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */ 12101 val = deposit32(0, 15, 1, f16_sign); 12102 val = deposit32(val, 10, 5, f16_exp); 12103 val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8)); 12104 return make_float16(val); 12105 } 12106 12107 float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) 12108 { 12109 float_status *s = fpstp; 12110 float32 f32 = float32_squash_input_denormal(input, s); 12111 uint32_t val = float32_val(f32); 12112 uint32_t f32_sign = float32_is_neg(f32); 12113 int f32_exp = extract32(val, 23, 8); 12114 uint32_t f32_frac = extract32(val, 0, 23); 12115 uint64_t f64_frac; 12116 12117 if (float32_is_any_nan(f32)) { 12118 float32 nan = f32; 12119 if (float32_is_signaling_nan(f32, s)) { 12120 float_raise(float_flag_invalid, s); 12121 nan = float32_silence_nan(f32, s); 12122 } 12123 if (s->default_nan_mode) { 12124 nan = float32_default_nan(s); 12125 } 12126 return nan; 12127 } else if (float32_is_zero(f32)) { 12128 float_raise(float_flag_divbyzero, s); 12129 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 12130 } else if (float32_is_neg(f32)) { 12131 float_raise(float_flag_invalid, s); 12132 return float32_default_nan(s); 12133 } else if (float32_is_infinity(f32)) { 12134 return float32_zero; 12135 } 12136 12137 /* Scale and normalize to a double-precision value between 0.25 and 1.0, 12138 * preserving the parity of the exponent. */ 12139 12140 f64_frac = ((uint64_t) f32_frac) << 29; 12141 12142 f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac); 12143 12144 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */ 12145 val = deposit32(0, 31, 1, f32_sign); 12146 val = deposit32(val, 23, 8, f32_exp); 12147 val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); 12148 return make_float32(val); 12149 } 12150 12151 float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) 12152 { 12153 float_status *s = fpstp; 12154 float64 f64 = float64_squash_input_denormal(input, s); 12155 uint64_t val = float64_val(f64); 12156 bool f64_sign = float64_is_neg(f64); 12157 int f64_exp = extract64(val, 52, 11); 12158 uint64_t f64_frac = extract64(val, 0, 52); 12159 12160 if (float64_is_any_nan(f64)) { 12161 float64 nan = f64; 12162 if (float64_is_signaling_nan(f64, s)) { 12163 float_raise(float_flag_invalid, s); 12164 nan = float64_silence_nan(f64, s); 12165 } 12166 if (s->default_nan_mode) { 12167 nan = float64_default_nan(s); 12168 } 12169 return nan; 12170 } else if (float64_is_zero(f64)) { 12171 float_raise(float_flag_divbyzero, s); 12172 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 12173 } else if (float64_is_neg(f64)) { 12174 float_raise(float_flag_invalid, s); 12175 return float64_default_nan(s); 12176 } else if (float64_is_infinity(f64)) { 12177 return float64_zero; 12178 } 12179 12180 f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac); 12181 12182 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */ 12183 val = deposit64(0, 61, 1, f64_sign); 12184 val = deposit64(val, 52, 11, f64_exp); 12185 val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8)); 12186 return make_float64(val); 12187 } 12188 12189 uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp) 12190 { 12191 /* float_status *s = fpstp; */ 12192 int input, estimate; 12193 12194 if ((a & 0x80000000) == 0) { 12195 return 0xffffffff; 12196 } 12197 12198 input = extract32(a, 23, 9); 12199 estimate = recip_estimate(input); 12200 12201 return deposit32(0, (32 - 9), 9, estimate); 12202 } 12203 12204 uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp) 12205 { 12206 int estimate; 12207 12208 if ((a & 0xc0000000) == 0) { 12209 return 0xffffffff; 12210 } 12211 12212 estimate = do_recip_sqrt_estimate(extract32(a, 23, 9)); 12213 12214 return deposit32(0, 23, 9, estimate); 12215 } 12216 12217 /* VFPv4 fused multiply-accumulate */ 12218 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) 12219 { 12220 float_status *fpst = fpstp; 12221 return float32_muladd(a, b, c, 0, fpst); 12222 } 12223 12224 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) 12225 { 12226 float_status *fpst = fpstp; 12227 return float64_muladd(a, b, c, 0, fpst); 12228 } 12229 12230 /* ARMv8 round to integral */ 12231 float32 HELPER(rints_exact)(float32 x, void *fp_status) 12232 { 12233 return float32_round_to_int(x, fp_status); 12234 } 12235 12236 float64 HELPER(rintd_exact)(float64 x, void *fp_status) 12237 { 12238 return float64_round_to_int(x, fp_status); 12239 } 12240 12241 float32 HELPER(rints)(float32 x, void *fp_status) 12242 { 12243 int old_flags = get_float_exception_flags(fp_status), new_flags; 12244 float32 ret; 12245 12246 ret = float32_round_to_int(x, fp_status); 12247 12248 /* Suppress any inexact exceptions the conversion produced */ 12249 if (!(old_flags & float_flag_inexact)) { 12250 new_flags = get_float_exception_flags(fp_status); 12251 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); 12252 } 12253 12254 return ret; 12255 } 12256 12257 float64 HELPER(rintd)(float64 x, void *fp_status) 12258 { 12259 int old_flags = get_float_exception_flags(fp_status), new_flags; 12260 float64 ret; 12261 12262 ret = float64_round_to_int(x, fp_status); 12263 12264 new_flags = get_float_exception_flags(fp_status); 12265 12266 /* Suppress any inexact exceptions the conversion produced */ 12267 if (!(old_flags & float_flag_inexact)) { 12268 new_flags = get_float_exception_flags(fp_status); 12269 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); 12270 } 12271 12272 return ret; 12273 } 12274 12275 /* Convert ARM rounding mode to softfloat */ 12276 int arm_rmode_to_sf(int rmode) 12277 { 12278 switch (rmode) { 12279 case FPROUNDING_TIEAWAY: 12280 rmode = float_round_ties_away; 12281 break; 12282 case FPROUNDING_ODD: 12283 /* FIXME: add support for TIEAWAY and ODD */ 12284 qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", 12285 rmode); 12286 case FPROUNDING_TIEEVEN: 12287 default: 12288 rmode = float_round_nearest_even; 12289 break; 12290 case FPROUNDING_POSINF: 12291 rmode = float_round_up; 12292 break; 12293 case FPROUNDING_NEGINF: 12294 rmode = float_round_down; 12295 break; 12296 case FPROUNDING_ZERO: 12297 rmode = float_round_to_zero; 12298 break; 12299 } 12300 return rmode; 12301 } 12302 12303 /* CRC helpers. 12304 * The upper bytes of val (above the number specified by 'bytes') must have 12305 * been zeroed out by the caller. 12306 */ 12307 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 12308 { 12309 uint8_t buf[4]; 12310 12311 stl_le_p(buf, val); 12312 12313 /* zlib crc32 converts the accumulator and output to one's complement. */ 12314 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 12315 } 12316 12317 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 12318 { 12319 uint8_t buf[4]; 12320 12321 stl_le_p(buf, val); 12322 12323 /* Linux crc32c converts the output to one's complement. */ 12324 return crc32c(acc, buf, bytes) ^ 0xffffffff; 12325 } 12326 12327 /* Return the exception level to which FP-disabled exceptions should 12328 * be taken, or 0 if FP is enabled. 12329 */ 12330 static inline int fp_exception_el(CPUARMState *env) 12331 { 12332 #ifndef CONFIG_USER_ONLY 12333 int fpen; 12334 int cur_el = arm_current_el(env); 12335 12336 /* CPACR and the CPTR registers don't exist before v6, so FP is 12337 * always accessible 12338 */ 12339 if (!arm_feature(env, ARM_FEATURE_V6)) { 12340 return 0; 12341 } 12342 12343 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 12344 * 0, 2 : trap EL0 and EL1/PL1 accesses 12345 * 1 : trap only EL0 accesses 12346 * 3 : trap no accesses 12347 */ 12348 fpen = extract32(env->cp15.cpacr_el1, 20, 2); 12349 switch (fpen) { 12350 case 0: 12351 case 2: 12352 if (cur_el == 0 || cur_el == 1) { 12353 /* Trap to PL1, which might be EL1 or EL3 */ 12354 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 12355 return 3; 12356 } 12357 return 1; 12358 } 12359 if (cur_el == 3 && !is_a64(env)) { 12360 /* Secure PL1 running at EL3 */ 12361 return 3; 12362 } 12363 break; 12364 case 1: 12365 if (cur_el == 0) { 12366 return 1; 12367 } 12368 break; 12369 case 3: 12370 break; 12371 } 12372 12373 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 12374 * check because zero bits in the registers mean "don't trap". 12375 */ 12376 12377 /* CPTR_EL2 : present in v7VE or v8 */ 12378 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 12379 && !arm_is_secure_below_el3(env)) { 12380 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 12381 return 2; 12382 } 12383 12384 /* CPTR_EL3 : present in v8 */ 12385 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 12386 /* Trap all FP ops to EL3 */ 12387 return 3; 12388 } 12389 #endif 12390 return 0; 12391 } 12392 12393 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 12394 target_ulong *cs_base, uint32_t *pflags) 12395 { 12396 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 12397 int fp_el = fp_exception_el(env); 12398 uint32_t flags; 12399 12400 if (is_a64(env)) { 12401 int sve_el = sve_exception_el(env); 12402 uint32_t zcr_len; 12403 12404 *pc = env->pc; 12405 flags = ARM_TBFLAG_AARCH64_STATE_MASK; 12406 /* Get control bits for tagged addresses */ 12407 flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT); 12408 flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT); 12409 flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT; 12410 12411 /* If SVE is disabled, but FP is enabled, 12412 then the effective len is 0. */ 12413 if (sve_el != 0 && fp_el == 0) { 12414 zcr_len = 0; 12415 } else { 12416 int current_el = arm_current_el(env); 12417 12418 zcr_len = env->vfp.zcr_el[current_el <= 1 ? 1 : current_el]; 12419 zcr_len &= 0xf; 12420 if (current_el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { 12421 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 12422 } 12423 if (current_el < 3 && arm_feature(env, ARM_FEATURE_EL3)) { 12424 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 12425 } 12426 } 12427 flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT; 12428 } else { 12429 *pc = env->regs[15]; 12430 flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT) 12431 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT) 12432 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT) 12433 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT) 12434 | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT); 12435 if (!(access_secure_reg(env))) { 12436 flags |= ARM_TBFLAG_NS_MASK; 12437 } 12438 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30) 12439 || arm_el_is_aa64(env, 1)) { 12440 flags |= ARM_TBFLAG_VFPEN_MASK; 12441 } 12442 flags |= (extract32(env->cp15.c15_cpar, 0, 2) 12443 << ARM_TBFLAG_XSCALE_CPAR_SHIFT); 12444 } 12445 12446 flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT); 12447 12448 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 12449 * states defined in the ARM ARM for software singlestep: 12450 * SS_ACTIVE PSTATE.SS State 12451 * 0 x Inactive (the TB flag for SS is always 0) 12452 * 1 0 Active-pending 12453 * 1 1 Active-not-pending 12454 */ 12455 if (arm_singlestep_active(env)) { 12456 flags |= ARM_TBFLAG_SS_ACTIVE_MASK; 12457 if (is_a64(env)) { 12458 if (env->pstate & PSTATE_SS) { 12459 flags |= ARM_TBFLAG_PSTATE_SS_MASK; 12460 } 12461 } else { 12462 if (env->uncached_cpsr & PSTATE_SS) { 12463 flags |= ARM_TBFLAG_PSTATE_SS_MASK; 12464 } 12465 } 12466 } 12467 if (arm_cpu_data_is_big_endian(env)) { 12468 flags |= ARM_TBFLAG_BE_DATA_MASK; 12469 } 12470 flags |= fp_el << ARM_TBFLAG_FPEXC_EL_SHIFT; 12471 12472 if (arm_v7m_is_handler_mode(env)) { 12473 flags |= ARM_TBFLAG_HANDLER_MASK; 12474 } 12475 12476 *pflags = flags; 12477 *cs_base = 0; 12478 } 12479