1 /* 2 * ARM virtual CPU header 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef ARM_CPU_H 21 #define ARM_CPU_H 22 23 #include "kvm-consts.h" 24 #include "qemu/cpu-float.h" 25 #include "hw/registerfields.h" 26 #include "cpu-qom.h" 27 #include "exec/cpu-defs.h" 28 #include "exec/gdbstub.h" 29 #include "qapi/qapi-types-common.h" 30 #include "target/arm/multiprocessing.h" 31 #include "target/arm/gtimer.h" 32 33 /* ARM processors have a weak memory model */ 34 #define TCG_GUEST_DEFAULT_MO (0) 35 36 #ifdef TARGET_AARCH64 37 #define KVM_HAVE_MCE_INJECTION 1 38 #endif 39 40 #define EXCP_UDEF 1 /* undefined instruction */ 41 #define EXCP_SWI 2 /* software interrupt */ 42 #define EXCP_PREFETCH_ABORT 3 43 #define EXCP_DATA_ABORT 4 44 #define EXCP_IRQ 5 45 #define EXCP_FIQ 6 46 #define EXCP_BKPT 7 47 #define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */ 48 #define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */ 49 #define EXCP_HVC 11 /* HyperVisor Call */ 50 #define EXCP_HYP_TRAP 12 51 #define EXCP_SMC 13 /* Secure Monitor Call */ 52 #define EXCP_VIRQ 14 53 #define EXCP_VFIQ 15 54 #define EXCP_SEMIHOST 16 /* semihosting call */ 55 #define EXCP_NOCP 17 /* v7M NOCP UsageFault */ 56 #define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */ 57 #define EXCP_STKOF 19 /* v8M STKOF UsageFault */ 58 #define EXCP_LAZYFP 20 /* v7M fault during lazy FP stacking */ 59 #define EXCP_LSERR 21 /* v8M LSERR SecureFault */ 60 #define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */ 61 #define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */ 62 #define EXCP_VSERR 24 63 #define EXCP_GPC 25 /* v9 Granule Protection Check Fault */ 64 #define EXCP_NMI 26 65 #define EXCP_VINMI 27 66 #define EXCP_VFNMI 28 67 /* NB: add new EXCP_ defines to the array in arm_log_exception() too */ 68 69 #define ARMV7M_EXCP_RESET 1 70 #define ARMV7M_EXCP_NMI 2 71 #define ARMV7M_EXCP_HARD 3 72 #define ARMV7M_EXCP_MEM 4 73 #define ARMV7M_EXCP_BUS 5 74 #define ARMV7M_EXCP_USAGE 6 75 #define ARMV7M_EXCP_SECURE 7 76 #define ARMV7M_EXCP_SVC 11 77 #define ARMV7M_EXCP_DEBUG 12 78 #define ARMV7M_EXCP_PENDSV 14 79 #define ARMV7M_EXCP_SYSTICK 15 80 81 /* ARM-specific interrupt pending bits. */ 82 #define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1 83 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2 84 #define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3 85 #define CPU_INTERRUPT_VSERR CPU_INTERRUPT_TGT_INT_0 86 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_4 87 #define CPU_INTERRUPT_VINMI CPU_INTERRUPT_TGT_EXT_0 88 #define CPU_INTERRUPT_VFNMI CPU_INTERRUPT_TGT_INT_1 89 90 /* The usual mapping for an AArch64 system register to its AArch32 91 * counterpart is for the 32 bit world to have access to the lower 92 * half only (with writes leaving the upper half untouched). It's 93 * therefore useful to be able to pass TCG the offset of the least 94 * significant half of a uint64_t struct member. 95 */ 96 #if HOST_BIG_ENDIAN 97 #define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t)) 98 #define offsetofhigh32(S, M) offsetof(S, M) 99 #else 100 #define offsetoflow32(S, M) offsetof(S, M) 101 #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t)) 102 #endif 103 104 /* ARM-specific extra insn start words: 105 * 1: Conditional execution bits 106 * 2: Partial exception syndrome for data aborts 107 */ 108 #define TARGET_INSN_START_EXTRA_WORDS 2 109 110 /* The 2nd extra word holding syndrome info for data aborts does not use 111 * the upper 6 bits nor the lower 13 bits. We mask and shift it down to 112 * help the sleb128 encoder do a better job. 113 * When restoring the CPU state, we shift it back up. 114 */ 115 #define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1) 116 #define ARM_INSN_START_WORD2_SHIFT 13 117 118 /* We currently assume float and double are IEEE single and double 119 precision respectively. 120 Doing runtime conversions is tricky because VFP registers may contain 121 integer values (eg. as the result of a FTOSI instruction). 122 s<2n> maps to the least significant half of d<n> 123 s<2n+1> maps to the most significant half of d<n> 124 */ 125 126 /** 127 * DynamicGDBFeatureInfo: 128 * @desc: Contains the feature descriptions. 129 * @data: A union with data specific to the set of registers 130 * @cpregs_keys: Array that contains the corresponding Key of 131 * a given cpreg with the same order of the cpreg 132 * in the XML description. 133 */ 134 typedef struct DynamicGDBFeatureInfo { 135 GDBFeature desc; 136 union { 137 struct { 138 uint32_t *keys; 139 } cpregs; 140 } data; 141 } DynamicGDBFeatureInfo; 142 143 /* CPU state for each instance of a generic timer (in cp15 c14) */ 144 typedef struct ARMGenericTimer { 145 uint64_t cval; /* Timer CompareValue register */ 146 uint64_t ctl; /* Timer Control register */ 147 } ARMGenericTimer; 148 149 /* Define a maximum sized vector register. 150 * For 32-bit, this is a 128-bit NEON/AdvSIMD register. 151 * For 64-bit, this is a 2048-bit SVE register. 152 * 153 * Note that the mapping between S, D, and Q views of the register bank 154 * differs between AArch64 and AArch32. 155 * In AArch32: 156 * Qn = regs[n].d[1]:regs[n].d[0] 157 * Dn = regs[n / 2].d[n & 1] 158 * Sn = regs[n / 4].d[n % 4 / 2], 159 * bits 31..0 for even n, and bits 63..32 for odd n 160 * (and regs[16] to regs[31] are inaccessible) 161 * In AArch64: 162 * Zn = regs[n].d[*] 163 * Qn = regs[n].d[1]:regs[n].d[0] 164 * Dn = regs[n].d[0] 165 * Sn = regs[n].d[0] bits 31..0 166 * Hn = regs[n].d[0] bits 15..0 167 * 168 * This corresponds to the architecturally defined mapping between 169 * the two execution states, and means we do not need to explicitly 170 * map these registers when changing states. 171 * 172 * Align the data for use with TCG host vector operations. 173 */ 174 175 #ifdef TARGET_AARCH64 176 # define ARM_MAX_VQ 16 177 #else 178 # define ARM_MAX_VQ 1 179 #endif 180 181 typedef struct ARMVectorReg { 182 uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16); 183 } ARMVectorReg; 184 185 #ifdef TARGET_AARCH64 186 /* In AArch32 mode, predicate registers do not exist at all. */ 187 typedef struct ARMPredicateReg { 188 uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16); 189 } ARMPredicateReg; 190 191 /* In AArch32 mode, PAC keys do not exist at all. */ 192 typedef struct ARMPACKey { 193 uint64_t lo, hi; 194 } ARMPACKey; 195 #endif 196 197 /* See the commentary above the TBFLAG field definitions. */ 198 typedef struct CPUARMTBFlags { 199 uint32_t flags; 200 target_ulong flags2; 201 } CPUARMTBFlags; 202 203 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 204 205 typedef struct NVICState NVICState; 206 207 typedef struct CPUArchState { 208 /* Regs for current mode. */ 209 uint32_t regs[16]; 210 211 /* 32/64 switch only happens when taking and returning from 212 * exceptions so the overlap semantics are taken care of then 213 * instead of having a complicated union. 214 */ 215 /* Regs for A64 mode. */ 216 uint64_t xregs[32]; 217 uint64_t pc; 218 /* PSTATE isn't an architectural register for ARMv8. However, it is 219 * convenient for us to assemble the underlying state into a 32 bit format 220 * identical to the architectural format used for the SPSR. (This is also 221 * what the Linux kernel's 'pstate' field in signal handlers and KVM's 222 * 'pstate' register are.) Of the PSTATE bits: 223 * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same 224 * semantics as for AArch32, as described in the comments on each field) 225 * nRW (also known as M[4]) is kept, inverted, in env->aarch64 226 * DAIF (exception masks) are kept in env->daif 227 * BTYPE is kept in env->btype 228 * SM and ZA are kept in env->svcr 229 * all other bits are stored in their correct places in env->pstate 230 */ 231 uint32_t pstate; 232 bool aarch64; /* True if CPU is in aarch64 state; inverse of PSTATE.nRW */ 233 bool thumb; /* True if CPU is in thumb mode; cpsr[5] */ 234 235 /* Cached TBFLAGS state. See below for which bits are included. */ 236 CPUARMTBFlags hflags; 237 238 /* Frequently accessed CPSR bits are stored separately for efficiency. 239 This contains all the other bits. Use cpsr_{read,write} to access 240 the whole CPSR. */ 241 uint32_t uncached_cpsr; 242 uint32_t spsr; 243 244 /* Banked registers. */ 245 uint64_t banked_spsr[8]; 246 uint32_t banked_r13[8]; 247 uint32_t banked_r14[8]; 248 249 /* These hold r8-r12. */ 250 uint32_t usr_regs[5]; 251 uint32_t fiq_regs[5]; 252 253 /* cpsr flag cache for faster execution */ 254 uint32_t CF; /* 0 or 1 */ 255 uint32_t VF; /* V is the bit 31. All other bits are undefined */ 256 uint32_t NF; /* N is bit 31. All other bits are undefined. */ 257 uint32_t ZF; /* Z set if zero. */ 258 uint32_t QF; /* 0 or 1 */ 259 uint32_t GE; /* cpsr[19:16] */ 260 uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */ 261 uint32_t btype; /* BTI branch type. spsr[11:10]. */ 262 uint64_t daif; /* exception masks, in the bits they are in PSTATE */ 263 uint64_t svcr; /* PSTATE.{SM,ZA} in the bits they are in SVCR */ 264 265 uint64_t elr_el[4]; /* AArch64 exception link regs */ 266 uint64_t sp_el[4]; /* AArch64 banked stack pointers */ 267 268 /* System control coprocessor (cp15) */ 269 struct { 270 uint32_t c0_cpuid; 271 union { /* Cache size selection */ 272 struct { 273 uint64_t _unused_csselr0; 274 uint64_t csselr_ns; 275 uint64_t _unused_csselr1; 276 uint64_t csselr_s; 277 }; 278 uint64_t csselr_el[4]; 279 }; 280 union { /* System control register. */ 281 struct { 282 uint64_t _unused_sctlr; 283 uint64_t sctlr_ns; 284 uint64_t hsctlr; 285 uint64_t sctlr_s; 286 }; 287 uint64_t sctlr_el[4]; 288 }; 289 uint64_t vsctlr; /* Virtualization System control register. */ 290 uint64_t cpacr_el1; /* Architectural feature access control register */ 291 uint64_t cptr_el[4]; /* ARMv8 feature trap registers */ 292 uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ 293 uint64_t sder; /* Secure debug enable register. */ 294 uint32_t nsacr; /* Non-secure access control register. */ 295 union { /* MMU translation table base 0. */ 296 struct { 297 uint64_t _unused_ttbr0_0; 298 uint64_t ttbr0_ns; 299 uint64_t _unused_ttbr0_1; 300 uint64_t ttbr0_s; 301 }; 302 uint64_t ttbr0_el[4]; 303 }; 304 union { /* MMU translation table base 1. */ 305 struct { 306 uint64_t _unused_ttbr1_0; 307 uint64_t ttbr1_ns; 308 uint64_t _unused_ttbr1_1; 309 uint64_t ttbr1_s; 310 }; 311 uint64_t ttbr1_el[4]; 312 }; 313 uint64_t vttbr_el2; /* Virtualization Translation Table Base. */ 314 uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */ 315 /* MMU translation table base control. */ 316 uint64_t tcr_el[4]; 317 uint64_t vtcr_el2; /* Virtualization Translation Control. */ 318 uint64_t vstcr_el2; /* Secure Virtualization Translation Control. */ 319 uint32_t c2_data; /* MPU data cacheable bits. */ 320 uint32_t c2_insn; /* MPU instruction cacheable bits. */ 321 union { /* MMU domain access control register 322 * MPU write buffer control. 323 */ 324 struct { 325 uint64_t dacr_ns; 326 uint64_t dacr_s; 327 }; 328 struct { 329 uint64_t dacr32_el2; 330 }; 331 }; 332 uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */ 333 uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */ 334 uint64_t hcr_el2; /* Hypervisor configuration register */ 335 uint64_t hcrx_el2; /* Extended Hypervisor configuration register */ 336 uint64_t scr_el3; /* Secure configuration register. */ 337 union { /* Fault status registers. */ 338 struct { 339 uint64_t ifsr_ns; 340 uint64_t ifsr_s; 341 }; 342 struct { 343 uint64_t ifsr32_el2; 344 }; 345 }; 346 union { 347 struct { 348 uint64_t _unused_dfsr; 349 uint64_t dfsr_ns; 350 uint64_t hsr; 351 uint64_t dfsr_s; 352 }; 353 uint64_t esr_el[4]; 354 }; 355 uint32_t c6_region[8]; /* MPU base/size registers. */ 356 union { /* Fault address registers. */ 357 struct { 358 uint64_t _unused_far0; 359 #if HOST_BIG_ENDIAN 360 uint32_t ifar_ns; 361 uint32_t dfar_ns; 362 uint32_t ifar_s; 363 uint32_t dfar_s; 364 #else 365 uint32_t dfar_ns; 366 uint32_t ifar_ns; 367 uint32_t dfar_s; 368 uint32_t ifar_s; 369 #endif 370 uint64_t _unused_far3; 371 }; 372 uint64_t far_el[4]; 373 }; 374 uint64_t hpfar_el2; 375 uint64_t hstr_el2; 376 union { /* Translation result. */ 377 struct { 378 uint64_t _unused_par_0; 379 uint64_t par_ns; 380 uint64_t _unused_par_1; 381 uint64_t par_s; 382 }; 383 uint64_t par_el[4]; 384 }; 385 386 uint32_t c9_insn; /* Cache lockdown registers. */ 387 uint32_t c9_data; 388 uint64_t c9_pmcr; /* performance monitor control register */ 389 uint64_t c9_pmcnten; /* perf monitor counter enables */ 390 uint64_t c9_pmovsr; /* perf monitor overflow status */ 391 uint64_t c9_pmuserenr; /* perf monitor user enable */ 392 uint64_t c9_pmselr; /* perf monitor counter selection register */ 393 uint64_t c9_pminten; /* perf monitor interrupt enables */ 394 union { /* Memory attribute redirection */ 395 struct { 396 #if HOST_BIG_ENDIAN 397 uint64_t _unused_mair_0; 398 uint32_t mair1_ns; 399 uint32_t mair0_ns; 400 uint64_t _unused_mair_1; 401 uint32_t mair1_s; 402 uint32_t mair0_s; 403 #else 404 uint64_t _unused_mair_0; 405 uint32_t mair0_ns; 406 uint32_t mair1_ns; 407 uint64_t _unused_mair_1; 408 uint32_t mair0_s; 409 uint32_t mair1_s; 410 #endif 411 }; 412 uint64_t mair_el[4]; 413 }; 414 union { /* vector base address register */ 415 struct { 416 uint64_t _unused_vbar; 417 uint64_t vbar_ns; 418 uint64_t hvbar; 419 uint64_t vbar_s; 420 }; 421 uint64_t vbar_el[4]; 422 }; 423 uint32_t mvbar; /* (monitor) vector base address register */ 424 uint64_t rvbar; /* rvbar sampled from rvbar property at reset */ 425 struct { /* FCSE PID. */ 426 uint32_t fcseidr_ns; 427 uint32_t fcseidr_s; 428 }; 429 union { /* Context ID. */ 430 struct { 431 uint64_t _unused_contextidr_0; 432 uint64_t contextidr_ns; 433 uint64_t _unused_contextidr_1; 434 uint64_t contextidr_s; 435 }; 436 uint64_t contextidr_el[4]; 437 }; 438 union { /* User RW Thread register. */ 439 struct { 440 uint64_t tpidrurw_ns; 441 uint64_t tpidrprw_ns; 442 uint64_t htpidr; 443 uint64_t _tpidr_el3; 444 }; 445 uint64_t tpidr_el[4]; 446 }; 447 uint64_t tpidr2_el0; 448 /* The secure banks of these registers don't map anywhere */ 449 uint64_t tpidrurw_s; 450 uint64_t tpidrprw_s; 451 uint64_t tpidruro_s; 452 453 union { /* User RO Thread register. */ 454 uint64_t tpidruro_ns; 455 uint64_t tpidrro_el[1]; 456 }; 457 uint64_t c14_cntfrq; /* Counter Frequency register */ 458 uint64_t c14_cntkctl; /* Timer Control register */ 459 uint64_t cnthctl_el2; /* Counter/Timer Hyp Control register */ 460 uint64_t cntvoff_el2; /* Counter Virtual Offset register */ 461 uint64_t cntpoff_el2; /* Counter Physical Offset register */ 462 ARMGenericTimer c14_timer[NUM_GTIMERS]; 463 uint32_t c15_cpar; /* XScale Coprocessor Access Register */ 464 uint32_t c15_ticonfig; /* TI925T configuration byte. */ 465 uint32_t c15_i_max; /* Maximum D-cache dirty line index. */ 466 uint32_t c15_i_min; /* Minimum D-cache dirty line index. */ 467 uint32_t c15_threadid; /* TI debugger thread-ID. */ 468 uint32_t c15_config_base_address; /* SCU base address. */ 469 uint32_t c15_diagnostic; /* diagnostic register */ 470 uint32_t c15_power_diagnostic; 471 uint32_t c15_power_control; /* power control */ 472 uint64_t dbgbvr[16]; /* breakpoint value registers */ 473 uint64_t dbgbcr[16]; /* breakpoint control registers */ 474 uint64_t dbgwvr[16]; /* watchpoint value registers */ 475 uint64_t dbgwcr[16]; /* watchpoint control registers */ 476 uint64_t dbgclaim; /* DBGCLAIM bits */ 477 uint64_t mdscr_el1; 478 uint64_t oslsr_el1; /* OS Lock Status */ 479 uint64_t osdlr_el1; /* OS DoubleLock status */ 480 uint64_t mdcr_el2; 481 uint64_t mdcr_el3; 482 /* Stores the architectural value of the counter *the last time it was 483 * updated* by pmccntr_op_start. Accesses should always be surrounded 484 * by pmccntr_op_start/pmccntr_op_finish to guarantee the latest 485 * architecturally-correct value is being read/set. 486 */ 487 uint64_t c15_ccnt; 488 /* Stores the delta between the architectural value and the underlying 489 * cycle count during normal operation. It is used to update c15_ccnt 490 * to be the correct architectural value before accesses. During 491 * accesses, c15_ccnt_delta contains the underlying count being used 492 * for the access, after which it reverts to the delta value in 493 * pmccntr_op_finish. 494 */ 495 uint64_t c15_ccnt_delta; 496 uint64_t c14_pmevcntr[31]; 497 uint64_t c14_pmevcntr_delta[31]; 498 uint64_t c14_pmevtyper[31]; 499 uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */ 500 uint64_t vpidr_el2; /* Virtualization Processor ID Register */ 501 uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */ 502 uint64_t tfsr_el[4]; /* tfsre0_el1 is index 0. */ 503 uint64_t gcr_el1; 504 uint64_t rgsr_el1; 505 506 /* Minimal RAS registers */ 507 uint64_t disr_el1; 508 uint64_t vdisr_el2; 509 uint64_t vsesr_el2; 510 511 /* 512 * Fine-Grained Trap registers. We store these as arrays so the 513 * access checking code doesn't have to manually select 514 * HFGRTR_EL2 vs HFDFGRTR_EL2 etc when looking up the bit to test. 515 * FEAT_FGT2 will add more elements to these arrays. 516 */ 517 uint64_t fgt_read[2]; /* HFGRTR, HDFGRTR */ 518 uint64_t fgt_write[2]; /* HFGWTR, HDFGWTR */ 519 uint64_t fgt_exec[1]; /* HFGITR */ 520 521 /* RME registers */ 522 uint64_t gpccr_el3; 523 uint64_t gptbr_el3; 524 uint64_t mfar_el3; 525 526 /* NV2 register */ 527 uint64_t vncr_el2; 528 } cp15; 529 530 struct { 531 /* M profile has up to 4 stack pointers: 532 * a Main Stack Pointer and a Process Stack Pointer for each 533 * of the Secure and Non-Secure states. (If the CPU doesn't support 534 * the security extension then it has only two SPs.) 535 * In QEMU we always store the currently active SP in regs[13], 536 * and the non-active SP for the current security state in 537 * v7m.other_sp. The stack pointers for the inactive security state 538 * are stored in other_ss_msp and other_ss_psp. 539 * switch_v7m_security_state() is responsible for rearranging them 540 * when we change security state. 541 */ 542 uint32_t other_sp; 543 uint32_t other_ss_msp; 544 uint32_t other_ss_psp; 545 uint32_t vecbase[M_REG_NUM_BANKS]; 546 uint32_t basepri[M_REG_NUM_BANKS]; 547 uint32_t control[M_REG_NUM_BANKS]; 548 uint32_t ccr[M_REG_NUM_BANKS]; /* Configuration and Control */ 549 uint32_t cfsr[M_REG_NUM_BANKS]; /* Configurable Fault Status */ 550 uint32_t hfsr; /* HardFault Status */ 551 uint32_t dfsr; /* Debug Fault Status Register */ 552 uint32_t sfsr; /* Secure Fault Status Register */ 553 uint32_t mmfar[M_REG_NUM_BANKS]; /* MemManage Fault Address */ 554 uint32_t bfar; /* BusFault Address */ 555 uint32_t sfar; /* Secure Fault Address Register */ 556 unsigned mpu_ctrl[M_REG_NUM_BANKS]; /* MPU_CTRL */ 557 int exception; 558 uint32_t primask[M_REG_NUM_BANKS]; 559 uint32_t faultmask[M_REG_NUM_BANKS]; 560 uint32_t aircr; /* only holds r/w state if security extn implemented */ 561 uint32_t secure; /* Is CPU in Secure state? (not guest visible) */ 562 uint32_t csselr[M_REG_NUM_BANKS]; 563 uint32_t scr[M_REG_NUM_BANKS]; 564 uint32_t msplim[M_REG_NUM_BANKS]; 565 uint32_t psplim[M_REG_NUM_BANKS]; 566 uint32_t fpcar[M_REG_NUM_BANKS]; 567 uint32_t fpccr[M_REG_NUM_BANKS]; 568 uint32_t fpdscr[M_REG_NUM_BANKS]; 569 uint32_t cpacr[M_REG_NUM_BANKS]; 570 uint32_t nsacr; 571 uint32_t ltpsize; 572 uint32_t vpr; 573 } v7m; 574 575 /* Information associated with an exception about to be taken: 576 * code which raises an exception must set cs->exception_index and 577 * the relevant parts of this structure; the cpu_do_interrupt function 578 * will then set the guest-visible registers as part of the exception 579 * entry process. 580 */ 581 struct { 582 uint32_t syndrome; /* AArch64 format syndrome register */ 583 uint32_t fsr; /* AArch32 format fault status register info */ 584 uint64_t vaddress; /* virtual addr associated with exception, if any */ 585 uint32_t target_el; /* EL the exception should be targeted for */ 586 /* If we implement EL2 we will also need to store information 587 * about the intermediate physical address for stage 2 faults. 588 */ 589 } exception; 590 591 /* Information associated with an SError */ 592 struct { 593 uint8_t pending; 594 uint8_t has_esr; 595 uint64_t esr; 596 } serror; 597 598 uint8_t ext_dabt_raised; /* Tracking/verifying injection of ext DABT */ 599 600 /* State of our input IRQ/FIQ/VIRQ/VFIQ lines */ 601 uint32_t irq_line_state; 602 603 /* Thumb-2 EE state. */ 604 uint32_t teecr; 605 uint32_t teehbr; 606 607 /* VFP coprocessor state. */ 608 struct { 609 ARMVectorReg zregs[32]; 610 611 #ifdef TARGET_AARCH64 612 /* Store FFR as pregs[16] to make it easier to treat as any other. */ 613 #define FFR_PRED_NUM 16 614 ARMPredicateReg pregs[17]; 615 /* Scratch space for aa64 sve predicate temporary. */ 616 ARMPredicateReg preg_tmp; 617 #endif 618 619 /* We store these fpcsr fields separately for convenience. */ 620 uint32_t qc[4] QEMU_ALIGNED(16); 621 int vec_len; 622 int vec_stride; 623 624 uint32_t xregs[16]; 625 626 /* Scratch space for aa32 neon expansion. */ 627 uint32_t scratch[8]; 628 629 /* There are a number of distinct float control structures: 630 * 631 * fp_status: is the "normal" fp status. 632 * fp_status_fp16: used for half-precision calculations 633 * standard_fp_status : the ARM "Standard FPSCR Value" 634 * standard_fp_status_fp16 : used for half-precision 635 * calculations with the ARM "Standard FPSCR Value" 636 * 637 * Half-precision operations are governed by a separate 638 * flush-to-zero control bit in FPSCR:FZ16. We pass a separate 639 * status structure to control this. 640 * 641 * The "Standard FPSCR", ie default-NaN, flush-to-zero, 642 * round-to-nearest and is used by any operations (generally 643 * Neon) which the architecture defines as controlled by the 644 * standard FPSCR value rather than the FPSCR. 645 * 646 * The "standard FPSCR but for fp16 ops" is needed because 647 * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than 648 * using a fixed value for it. 649 * 650 * To avoid having to transfer exception bits around, we simply 651 * say that the FPSCR cumulative exception flags are the logical 652 * OR of the flags in the four fp statuses. This relies on the 653 * only thing which needs to read the exception flags being 654 * an explicit FPSCR read. 655 */ 656 float_status fp_status; 657 float_status fp_status_f16; 658 float_status standard_fp_status; 659 float_status standard_fp_status_f16; 660 661 uint64_t zcr_el[4]; /* ZCR_EL[1-3] */ 662 uint64_t smcr_el[4]; /* SMCR_EL[1-3] */ 663 } vfp; 664 665 uint64_t exclusive_addr; 666 uint64_t exclusive_val; 667 /* 668 * Contains the 'val' for the second 64-bit register of LDXP, which comes 669 * from the higher address, not the high part of a complete 128-bit value. 670 * In some ways it might be more convenient to record the exclusive value 671 * as the low and high halves of a 128 bit data value, but the current 672 * semantics of these fields are baked into the migration format. 673 */ 674 uint64_t exclusive_high; 675 676 /* iwMMXt coprocessor state. */ 677 struct { 678 uint64_t regs[16]; 679 uint64_t val; 680 681 uint32_t cregs[16]; 682 } iwmmxt; 683 684 #ifdef TARGET_AARCH64 685 struct { 686 ARMPACKey apia; 687 ARMPACKey apib; 688 ARMPACKey apda; 689 ARMPACKey apdb; 690 ARMPACKey apga; 691 } keys; 692 693 uint64_t scxtnum_el[4]; 694 695 /* 696 * SME ZA storage -- 256 x 256 byte array, with bytes in host word order, 697 * as we do with vfp.zregs[]. This corresponds to the architectural ZA 698 * array, where ZA[N] is in the least-significant bytes of env->zarray[N]. 699 * When SVL is less than the architectural maximum, the accessible 700 * storage is restricted, such that if the SVL is X bytes the guest can 701 * see only the bottom X elements of zarray[], and only the least 702 * significant X bytes of each element of the array. (In other words, 703 * the observable part is always square.) 704 * 705 * The ZA storage can also be considered as a set of square tiles of 706 * elements of different sizes. The mapping from tiles to the ZA array 707 * is architecturally defined, such that for tiles of elements of esz 708 * bytes, the Nth row (or "horizontal slice") of tile T is in 709 * ZA[T + N * esz]. Note that this means that each tile is not contiguous 710 * in the ZA storage, because its rows are striped through the ZA array. 711 * 712 * Because this is so large, keep this toward the end of the reset area, 713 * to keep the offsets into the rest of the structure smaller. 714 */ 715 ARMVectorReg zarray[ARM_MAX_VQ * 16]; 716 #endif 717 718 struct CPUBreakpoint *cpu_breakpoint[16]; 719 struct CPUWatchpoint *cpu_watchpoint[16]; 720 721 /* Optional fault info across tlb lookup. */ 722 ARMMMUFaultInfo *tlb_fi; 723 724 /* Fields up to this point are cleared by a CPU reset */ 725 struct {} end_reset_fields; 726 727 /* Fields after this point are preserved across CPU reset. */ 728 729 /* Internal CPU feature flags. */ 730 uint64_t features; 731 732 /* PMSAv7 MPU */ 733 struct { 734 uint32_t *drbar; 735 uint32_t *drsr; 736 uint32_t *dracr; 737 uint32_t rnr[M_REG_NUM_BANKS]; 738 } pmsav7; 739 740 /* PMSAv8 MPU */ 741 struct { 742 /* The PMSAv8 implementation also shares some PMSAv7 config 743 * and state: 744 * pmsav7.rnr (region number register) 745 * pmsav7_dregion (number of configured regions) 746 */ 747 uint32_t *rbar[M_REG_NUM_BANKS]; 748 uint32_t *rlar[M_REG_NUM_BANKS]; 749 uint32_t *hprbar; 750 uint32_t *hprlar; 751 uint32_t mair0[M_REG_NUM_BANKS]; 752 uint32_t mair1[M_REG_NUM_BANKS]; 753 uint32_t hprselr; 754 } pmsav8; 755 756 /* v8M SAU */ 757 struct { 758 uint32_t *rbar; 759 uint32_t *rlar; 760 uint32_t rnr; 761 uint32_t ctrl; 762 } sau; 763 764 #if !defined(CONFIG_USER_ONLY) 765 NVICState *nvic; 766 const struct arm_boot_info *boot_info; 767 /* Store GICv3CPUState to access from this struct */ 768 void *gicv3state; 769 #else /* CONFIG_USER_ONLY */ 770 /* For usermode syscall translation. */ 771 bool eabi; 772 #endif /* CONFIG_USER_ONLY */ 773 774 #ifdef TARGET_TAGGED_ADDRESSES 775 /* Linux syscall tagged address support */ 776 bool tagged_addr_enable; 777 #endif 778 } CPUARMState; 779 780 static inline void set_feature(CPUARMState *env, int feature) 781 { 782 env->features |= 1ULL << feature; 783 } 784 785 static inline void unset_feature(CPUARMState *env, int feature) 786 { 787 env->features &= ~(1ULL << feature); 788 } 789 790 /** 791 * ARMELChangeHookFn: 792 * type of a function which can be registered via arm_register_el_change_hook() 793 * to get callbacks when the CPU changes its exception level or mode. 794 */ 795 typedef void ARMELChangeHookFn(ARMCPU *cpu, void *opaque); 796 typedef struct ARMELChangeHook ARMELChangeHook; 797 struct ARMELChangeHook { 798 ARMELChangeHookFn *hook; 799 void *opaque; 800 QLIST_ENTRY(ARMELChangeHook) node; 801 }; 802 803 /* These values map onto the return values for 804 * QEMU_PSCI_0_2_FN_AFFINITY_INFO */ 805 typedef enum ARMPSCIState { 806 PSCI_ON = 0, 807 PSCI_OFF = 1, 808 PSCI_ON_PENDING = 2 809 } ARMPSCIState; 810 811 typedef struct ARMISARegisters ARMISARegisters; 812 813 /* 814 * In map, each set bit is a supported vector length of (bit-number + 1) * 16 815 * bytes, i.e. each bit number + 1 is the vector length in quadwords. 816 * 817 * While processing properties during initialization, corresponding init bits 818 * are set for bits in sve_vq_map that have been set by properties. 819 * 820 * Bits set in supported represent valid vector lengths for the CPU type. 821 */ 822 typedef struct { 823 uint32_t map, init, supported; 824 } ARMVQMap; 825 826 /** 827 * ARMCPU: 828 * @env: #CPUARMState 829 * 830 * An ARM CPU core. 831 */ 832 struct ArchCPU { 833 CPUState parent_obj; 834 835 CPUARMState env; 836 837 /* Coprocessor information */ 838 GHashTable *cp_regs; 839 /* For marshalling (mostly coprocessor) register state between the 840 * kernel and QEMU (for KVM) and between two QEMUs (for migration), 841 * we use these arrays. 842 */ 843 /* List of register indexes managed via these arrays; (full KVM style 844 * 64 bit indexes, not CPRegInfo 32 bit indexes) 845 */ 846 uint64_t *cpreg_indexes; 847 /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */ 848 uint64_t *cpreg_values; 849 /* Length of the indexes, values, reset_values arrays */ 850 int32_t cpreg_array_len; 851 /* These are used only for migration: incoming data arrives in 852 * these fields and is sanity checked in post_load before copying 853 * to the working data structures above. 854 */ 855 uint64_t *cpreg_vmstate_indexes; 856 uint64_t *cpreg_vmstate_values; 857 int32_t cpreg_vmstate_array_len; 858 859 DynamicGDBFeatureInfo dyn_sysreg_feature; 860 DynamicGDBFeatureInfo dyn_svereg_feature; 861 DynamicGDBFeatureInfo dyn_m_systemreg_feature; 862 DynamicGDBFeatureInfo dyn_m_secextreg_feature; 863 864 /* Timers used by the generic (architected) timer */ 865 QEMUTimer *gt_timer[NUM_GTIMERS]; 866 /* 867 * Timer used by the PMU. Its state is restored after migration by 868 * pmu_op_finish() - it does not need other handling during migration 869 */ 870 QEMUTimer *pmu_timer; 871 /* GPIO outputs for generic timer */ 872 qemu_irq gt_timer_outputs[NUM_GTIMERS]; 873 /* GPIO output for GICv3 maintenance interrupt signal */ 874 qemu_irq gicv3_maintenance_interrupt; 875 /* GPIO output for the PMU interrupt */ 876 qemu_irq pmu_interrupt; 877 878 /* MemoryRegion to use for secure physical accesses */ 879 MemoryRegion *secure_memory; 880 881 /* MemoryRegion to use for allocation tag accesses */ 882 MemoryRegion *tag_memory; 883 MemoryRegion *secure_tag_memory; 884 885 /* For v8M, pointer to the IDAU interface provided by board/SoC */ 886 Object *idau; 887 888 /* 'compatible' string for this CPU for Linux device trees */ 889 const char *dtb_compatible; 890 891 /* PSCI version for this CPU 892 * Bits[31:16] = Major Version 893 * Bits[15:0] = Minor Version 894 */ 895 uint32_t psci_version; 896 897 /* Current power state, access guarded by BQL */ 898 ARMPSCIState power_state; 899 900 /* CPU has virtualization extension */ 901 bool has_el2; 902 /* CPU has security extension */ 903 bool has_el3; 904 /* CPU has PMU (Performance Monitor Unit) */ 905 bool has_pmu; 906 /* CPU has VFP */ 907 bool has_vfp; 908 /* CPU has 32 VFP registers */ 909 bool has_vfp_d32; 910 /* CPU has Neon */ 911 bool has_neon; 912 /* CPU has M-profile DSP extension */ 913 bool has_dsp; 914 915 /* CPU has memory protection unit */ 916 bool has_mpu; 917 /* PMSAv7 MPU number of supported regions */ 918 uint32_t pmsav7_dregion; 919 /* PMSAv8 MPU number of supported hyp regions */ 920 uint32_t pmsav8r_hdregion; 921 /* v8M SAU number of supported regions */ 922 uint32_t sau_sregion; 923 924 /* PSCI conduit used to invoke PSCI methods 925 * 0 - disabled, 1 - smc, 2 - hvc 926 */ 927 uint32_t psci_conduit; 928 929 /* For v8M, initial value of the Secure VTOR */ 930 uint32_t init_svtor; 931 /* For v8M, initial value of the Non-secure VTOR */ 932 uint32_t init_nsvtor; 933 934 /* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or 935 * QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type. 936 */ 937 uint32_t kvm_target; 938 939 #ifdef CONFIG_KVM 940 /* KVM init features for this CPU */ 941 uint32_t kvm_init_features[7]; 942 943 /* KVM CPU state */ 944 945 /* KVM virtual time adjustment */ 946 bool kvm_adjvtime; 947 bool kvm_vtime_dirty; 948 uint64_t kvm_vtime; 949 950 /* KVM steal time */ 951 OnOffAuto kvm_steal_time; 952 #endif /* CONFIG_KVM */ 953 954 /* Uniprocessor system with MP extensions */ 955 bool mp_is_up; 956 957 /* True if we tried kvm_arm_host_cpu_features() during CPU instance_init 958 * and the probe failed (so we need to report the error in realize) 959 */ 960 bool host_cpu_probe_failed; 961 962 /* Specify the number of cores in this CPU cluster. Used for the L2CTLR 963 * register. 964 */ 965 int32_t core_count; 966 967 /* The instance init functions for implementation-specific subclasses 968 * set these fields to specify the implementation-dependent values of 969 * various constant registers and reset values of non-constant 970 * registers. 971 * Some of these might become QOM properties eventually. 972 * Field names match the official register names as defined in the 973 * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix 974 * is used for reset values of non-constant registers; no reset_ 975 * prefix means a constant register. 976 * Some of these registers are split out into a substructure that 977 * is shared with the translators to control the ISA. 978 * 979 * Note that if you add an ID register to the ARMISARegisters struct 980 * you need to also update the 32-bit and 64-bit versions of the 981 * kvm_arm_get_host_cpu_features() function to correctly populate the 982 * field by reading the value from the KVM vCPU. 983 */ 984 struct ARMISARegisters { 985 uint32_t id_isar0; 986 uint32_t id_isar1; 987 uint32_t id_isar2; 988 uint32_t id_isar3; 989 uint32_t id_isar4; 990 uint32_t id_isar5; 991 uint32_t id_isar6; 992 uint32_t id_mmfr0; 993 uint32_t id_mmfr1; 994 uint32_t id_mmfr2; 995 uint32_t id_mmfr3; 996 uint32_t id_mmfr4; 997 uint32_t id_mmfr5; 998 uint32_t id_pfr0; 999 uint32_t id_pfr1; 1000 uint32_t id_pfr2; 1001 uint32_t mvfr0; 1002 uint32_t mvfr1; 1003 uint32_t mvfr2; 1004 uint32_t id_dfr0; 1005 uint32_t id_dfr1; 1006 uint32_t dbgdidr; 1007 uint32_t dbgdevid; 1008 uint32_t dbgdevid1; 1009 uint64_t id_aa64isar0; 1010 uint64_t id_aa64isar1; 1011 uint64_t id_aa64isar2; 1012 uint64_t id_aa64pfr0; 1013 uint64_t id_aa64pfr1; 1014 uint64_t id_aa64mmfr0; 1015 uint64_t id_aa64mmfr1; 1016 uint64_t id_aa64mmfr2; 1017 uint64_t id_aa64dfr0; 1018 uint64_t id_aa64dfr1; 1019 uint64_t id_aa64zfr0; 1020 uint64_t id_aa64smfr0; 1021 uint64_t reset_pmcr_el0; 1022 } isar; 1023 uint64_t midr; 1024 uint32_t revidr; 1025 uint32_t reset_fpsid; 1026 uint64_t ctr; 1027 uint32_t reset_sctlr; 1028 uint64_t pmceid0; 1029 uint64_t pmceid1; 1030 uint32_t id_afr0; 1031 uint64_t id_aa64afr0; 1032 uint64_t id_aa64afr1; 1033 uint64_t clidr; 1034 uint64_t mp_affinity; /* MP ID without feature bits */ 1035 /* The elements of this array are the CCSIDR values for each cache, 1036 * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc. 1037 */ 1038 uint64_t ccsidr[16]; 1039 uint64_t reset_cbar; 1040 uint32_t reset_auxcr; 1041 bool reset_hivecs; 1042 uint8_t reset_l0gptsz; 1043 1044 /* 1045 * Intermediate values used during property parsing. 1046 * Once finalized, the values should be read from ID_AA64*. 1047 */ 1048 bool prop_pauth; 1049 bool prop_pauth_impdef; 1050 bool prop_pauth_qarma3; 1051 bool prop_lpa2; 1052 1053 /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */ 1054 uint8_t dcz_blocksize; 1055 /* GM blocksize, in log_2(words), ie low 4 bits of GMID_EL0 */ 1056 uint8_t gm_blocksize; 1057 1058 uint64_t rvbar_prop; /* Property/input signals. */ 1059 1060 /* Configurable aspects of GIC cpu interface (which is part of the CPU) */ 1061 int gic_num_lrs; /* number of list registers */ 1062 int gic_vpribits; /* number of virtual priority bits */ 1063 int gic_vprebits; /* number of virtual preemption bits */ 1064 int gic_pribits; /* number of physical priority bits */ 1065 1066 /* Whether the cfgend input is high (i.e. this CPU should reset into 1067 * big-endian mode). This setting isn't used directly: instead it modifies 1068 * the reset_sctlr value to have SCTLR_B or SCTLR_EE set, depending on the 1069 * architecture version. 1070 */ 1071 bool cfgend; 1072 1073 QLIST_HEAD(, ARMELChangeHook) pre_el_change_hooks; 1074 QLIST_HEAD(, ARMELChangeHook) el_change_hooks; 1075 1076 int32_t node_id; /* NUMA node this CPU belongs to */ 1077 1078 /* Used to synchronize KVM and QEMU in-kernel device levels */ 1079 uint8_t device_irq_level; 1080 1081 /* Used to set the maximum vector length the cpu will support. */ 1082 uint32_t sve_max_vq; 1083 1084 #ifdef CONFIG_USER_ONLY 1085 /* Used to set the default vector length at process start. */ 1086 uint32_t sve_default_vq; 1087 uint32_t sme_default_vq; 1088 #endif 1089 1090 ARMVQMap sve_vq; 1091 ARMVQMap sme_vq; 1092 1093 /* Generic timer counter frequency, in Hz */ 1094 uint64_t gt_cntfrq_hz; 1095 }; 1096 1097 typedef struct ARMCPUInfo { 1098 const char *name; 1099 void (*initfn)(Object *obj); 1100 void (*class_init)(ObjectClass *oc, void *data); 1101 } ARMCPUInfo; 1102 1103 /** 1104 * ARMCPUClass: 1105 * @parent_realize: The parent class' realize handler. 1106 * @parent_phases: The parent class' reset phase handlers. 1107 * 1108 * An ARM CPU model. 1109 */ 1110 struct ARMCPUClass { 1111 CPUClass parent_class; 1112 1113 const ARMCPUInfo *info; 1114 DeviceRealize parent_realize; 1115 ResettablePhases parent_phases; 1116 }; 1117 1118 struct AArch64CPUClass { 1119 ARMCPUClass parent_class; 1120 }; 1121 1122 /* Callback functions for the generic timer's timers. */ 1123 void arm_gt_ptimer_cb(void *opaque); 1124 void arm_gt_vtimer_cb(void *opaque); 1125 void arm_gt_htimer_cb(void *opaque); 1126 void arm_gt_stimer_cb(void *opaque); 1127 void arm_gt_hvtimer_cb(void *opaque); 1128 1129 unsigned int gt_cntfrq_period_ns(ARMCPU *cpu); 1130 void gt_rme_post_el_change(ARMCPU *cpu, void *opaque); 1131 1132 void arm_cpu_post_init(Object *obj); 1133 1134 #define ARM_AFF0_SHIFT 0 1135 #define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT) 1136 #define ARM_AFF1_SHIFT 8 1137 #define ARM_AFF1_MASK (0xFFULL << ARM_AFF1_SHIFT) 1138 #define ARM_AFF2_SHIFT 16 1139 #define ARM_AFF2_MASK (0xFFULL << ARM_AFF2_SHIFT) 1140 #define ARM_AFF3_SHIFT 32 1141 #define ARM_AFF3_MASK (0xFFULL << ARM_AFF3_SHIFT) 1142 #define ARM_DEFAULT_CPUS_PER_CLUSTER 8 1143 1144 #define ARM32_AFFINITY_MASK (ARM_AFF0_MASK | ARM_AFF1_MASK | ARM_AFF2_MASK) 1145 #define ARM64_AFFINITY_MASK \ 1146 (ARM_AFF0_MASK | ARM_AFF1_MASK | ARM_AFF2_MASK | ARM_AFF3_MASK) 1147 #define ARM64_AFFINITY_INVALID (~ARM64_AFFINITY_MASK) 1148 1149 uint64_t arm_build_mp_affinity(int idx, uint8_t clustersz); 1150 1151 #ifndef CONFIG_USER_ONLY 1152 extern const VMStateDescription vmstate_arm_cpu; 1153 1154 void arm_cpu_do_interrupt(CPUState *cpu); 1155 void arm_v7m_cpu_do_interrupt(CPUState *cpu); 1156 1157 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, 1158 MemTxAttrs *attrs); 1159 #endif /* !CONFIG_USER_ONLY */ 1160 1161 int arm_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 1162 int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 1163 1164 int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, 1165 int cpuid, DumpState *s); 1166 int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, 1167 int cpuid, DumpState *s); 1168 1169 /** 1170 * arm_emulate_firmware_reset: Emulate firmware CPU reset handling 1171 * @cpu: CPU (which must have been freshly reset) 1172 * @target_el: exception level to put the CPU into 1173 * @secure: whether to put the CPU in secure state 1174 * 1175 * When QEMU is directly running a guest kernel at a lower level than 1176 * EL3 it implicitly emulates some aspects of the guest firmware. 1177 * This includes that on reset we need to configure the parts of the 1178 * CPU corresponding to EL3 so that the real guest code can run at its 1179 * lower exception level. This function does that post-reset CPU setup, 1180 * for when we do direct boot of a guest kernel, and for when we 1181 * emulate PSCI and similar firmware interfaces starting a CPU at a 1182 * lower exception level. 1183 * 1184 * @target_el must be an EL implemented by the CPU between 1 and 3. 1185 * We do not support dropping into a Secure EL other than 3. 1186 * 1187 * It is the responsibility of the caller to call arm_rebuild_hflags(). 1188 */ 1189 void arm_emulate_firmware_reset(CPUState *cpustate, int target_el); 1190 1191 #ifdef TARGET_AARCH64 1192 int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 1193 int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 1194 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); 1195 void aarch64_sve_change_el(CPUARMState *env, int old_el, 1196 int new_el, bool el0_a64); 1197 void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask); 1198 1199 /* 1200 * SVE registers are encoded in KVM's memory in an endianness-invariant format. 1201 * The byte at offset i from the start of the in-memory representation contains 1202 * the bits [(7 + 8 * i) : (8 * i)] of the register value. As this means the 1203 * lowest offsets are stored in the lowest memory addresses, then that nearly 1204 * matches QEMU's representation, which is to use an array of host-endian 1205 * uint64_t's, where the lower offsets are at the lower indices. To complete 1206 * the translation we just need to byte swap the uint64_t's on big-endian hosts. 1207 */ 1208 static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) 1209 { 1210 #if HOST_BIG_ENDIAN 1211 int i; 1212 1213 for (i = 0; i < nr; ++i) { 1214 dst[i] = bswap64(src[i]); 1215 } 1216 1217 return dst; 1218 #else 1219 return src; 1220 #endif 1221 } 1222 1223 #else 1224 static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } 1225 static inline void aarch64_sve_change_el(CPUARMState *env, int o, 1226 int n, bool a) 1227 { } 1228 #endif 1229 1230 void aarch64_sync_32_to_64(CPUARMState *env); 1231 void aarch64_sync_64_to_32(CPUARMState *env); 1232 1233 int fp_exception_el(CPUARMState *env, int cur_el); 1234 int sve_exception_el(CPUARMState *env, int cur_el); 1235 int sme_exception_el(CPUARMState *env, int cur_el); 1236 1237 /** 1238 * sve_vqm1_for_el_sm: 1239 * @env: CPUARMState 1240 * @el: exception level 1241 * @sm: streaming mode 1242 * 1243 * Compute the current vector length for @el & @sm, in units of 1244 * Quadwords Minus 1 -- the same scale used for ZCR_ELx.LEN. 1245 * If @sm, compute for SVL, otherwise NVL. 1246 */ 1247 uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm); 1248 1249 /* Likewise, but using @sm = PSTATE.SM. */ 1250 uint32_t sve_vqm1_for_el(CPUARMState *env, int el); 1251 1252 static inline bool is_a64(CPUARMState *env) 1253 { 1254 return env->aarch64; 1255 } 1256 1257 /** 1258 * pmu_op_start/finish 1259 * @env: CPUARMState 1260 * 1261 * Convert all PMU counters between their delta form (the typical mode when 1262 * they are enabled) and the guest-visible values. These two calls must 1263 * surround any action which might affect the counters. 1264 */ 1265 void pmu_op_start(CPUARMState *env); 1266 void pmu_op_finish(CPUARMState *env); 1267 1268 /* 1269 * Called when a PMU counter is due to overflow 1270 */ 1271 void arm_pmu_timer_cb(void *opaque); 1272 1273 /** 1274 * Functions to register as EL change hooks for PMU mode filtering 1275 */ 1276 void pmu_pre_el_change(ARMCPU *cpu, void *ignored); 1277 void pmu_post_el_change(ARMCPU *cpu, void *ignored); 1278 1279 /* 1280 * pmu_init 1281 * @cpu: ARMCPU 1282 * 1283 * Initialize the CPU's PMCEID[01]_EL0 registers and associated internal state 1284 * for the current configuration 1285 */ 1286 void pmu_init(ARMCPU *cpu); 1287 1288 /* SCTLR bit meanings. Several bits have been reused in newer 1289 * versions of the architecture; in that case we define constants 1290 * for both old and new bit meanings. Code which tests against those 1291 * bits should probably check or otherwise arrange that the CPU 1292 * is the architectural version it expects. 1293 */ 1294 #define SCTLR_M (1U << 0) 1295 #define SCTLR_A (1U << 1) 1296 #define SCTLR_C (1U << 2) 1297 #define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */ 1298 #define SCTLR_nTLSMD_32 (1U << 3) /* v8.2-LSMAOC, AArch32 only */ 1299 #define SCTLR_SA (1U << 3) /* AArch64 only */ 1300 #define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */ 1301 #define SCTLR_LSMAOE_32 (1U << 4) /* v8.2-LSMAOC, AArch32 only */ 1302 #define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */ 1303 #define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */ 1304 #define SCTLR_CP15BEN (1U << 5) /* v7 onward */ 1305 #define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */ 1306 #define SCTLR_nAA (1U << 6) /* when FEAT_LSE2 is implemented */ 1307 #define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */ 1308 #define SCTLR_ITD (1U << 7) /* v8 onward */ 1309 #define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */ 1310 #define SCTLR_SED (1U << 8) /* v8 onward */ 1311 #define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */ 1312 #define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */ 1313 #define SCTLR_F (1U << 10) /* up to v6 */ 1314 #define SCTLR_SW (1U << 10) /* v7 */ 1315 #define SCTLR_EnRCTX (1U << 10) /* in v8.0-PredInv */ 1316 #define SCTLR_Z (1U << 11) /* in v7, RES1 in v8 */ 1317 #define SCTLR_EOS (1U << 11) /* v8.5-ExS */ 1318 #define SCTLR_I (1U << 12) 1319 #define SCTLR_V (1U << 13) /* AArch32 only */ 1320 #define SCTLR_EnDB (1U << 13) /* v8.3, AArch64 only */ 1321 #define SCTLR_RR (1U << 14) /* up to v7 */ 1322 #define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */ 1323 #define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */ 1324 #define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */ 1325 #define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */ 1326 #define SCTLR_nTWI (1U << 16) /* v8 onward */ 1327 #define SCTLR_HA (1U << 17) /* up to v7, RES0 in v8 */ 1328 #define SCTLR_BR (1U << 17) /* PMSA only */ 1329 #define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */ 1330 #define SCTLR_nTWE (1U << 18) /* v8 onward */ 1331 #define SCTLR_WXN (1U << 19) 1332 #define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */ 1333 #define SCTLR_UWXN (1U << 20) /* v7 onward, AArch32 only */ 1334 #define SCTLR_TSCXT (1U << 20) /* FEAT_CSV2_1p2, AArch64 only */ 1335 #define SCTLR_FI (1U << 21) /* up to v7, v8 RES0 */ 1336 #define SCTLR_IESB (1U << 21) /* v8.2-IESB, AArch64 only */ 1337 #define SCTLR_U (1U << 22) /* up to v6, RAO in v7 */ 1338 #define SCTLR_EIS (1U << 22) /* v8.5-ExS */ 1339 #define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */ 1340 #define SCTLR_SPAN (1U << 23) /* v8.1-PAN */ 1341 #define SCTLR_VE (1U << 24) /* up to v7 */ 1342 #define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */ 1343 #define SCTLR_EE (1U << 25) 1344 #define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */ 1345 #define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */ 1346 #define SCTLR_NMFI (1U << 27) /* up to v7, RAZ in v7VE and v8 */ 1347 #define SCTLR_EnDA (1U << 27) /* v8.3, AArch64 only */ 1348 #define SCTLR_TRE (1U << 28) /* AArch32 only */ 1349 #define SCTLR_nTLSMD_64 (1U << 28) /* v8.2-LSMAOC, AArch64 only */ 1350 #define SCTLR_AFE (1U << 29) /* AArch32 only */ 1351 #define SCTLR_LSMAOE_64 (1U << 29) /* v8.2-LSMAOC, AArch64 only */ 1352 #define SCTLR_TE (1U << 30) /* AArch32 only */ 1353 #define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */ 1354 #define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */ 1355 #define SCTLR_DSSBS_32 (1U << 31) /* v8.5, AArch32 only */ 1356 #define SCTLR_MSCEN (1ULL << 33) /* FEAT_MOPS */ 1357 #define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */ 1358 #define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */ 1359 #define SCTLR_ITFSB (1ULL << 37) /* v8.5-MemTag */ 1360 #define SCTLR_TCF0 (3ULL << 38) /* v8.5-MemTag */ 1361 #define SCTLR_TCF (3ULL << 40) /* v8.5-MemTag */ 1362 #define SCTLR_ATA0 (1ULL << 42) /* v8.5-MemTag */ 1363 #define SCTLR_ATA (1ULL << 43) /* v8.5-MemTag */ 1364 #define SCTLR_DSSBS_64 (1ULL << 44) /* v8.5, AArch64 only */ 1365 #define SCTLR_TWEDEn (1ULL << 45) /* FEAT_TWED */ 1366 #define SCTLR_TWEDEL MAKE_64_MASK(46, 4) /* FEAT_TWED */ 1367 #define SCTLR_TMT0 (1ULL << 50) /* FEAT_TME */ 1368 #define SCTLR_TMT (1ULL << 51) /* FEAT_TME */ 1369 #define SCTLR_TME0 (1ULL << 52) /* FEAT_TME */ 1370 #define SCTLR_TME (1ULL << 53) /* FEAT_TME */ 1371 #define SCTLR_EnASR (1ULL << 54) /* FEAT_LS64_V */ 1372 #define SCTLR_EnAS0 (1ULL << 55) /* FEAT_LS64_ACCDATA */ 1373 #define SCTLR_EnALS (1ULL << 56) /* FEAT_LS64 */ 1374 #define SCTLR_EPAN (1ULL << 57) /* FEAT_PAN3 */ 1375 #define SCTLR_EnTP2 (1ULL << 60) /* FEAT_SME */ 1376 #define SCTLR_NMI (1ULL << 61) /* FEAT_NMI */ 1377 #define SCTLR_SPINTMASK (1ULL << 62) /* FEAT_NMI */ 1378 #define SCTLR_TIDCP (1ULL << 63) /* FEAT_TIDCP1 */ 1379 1380 #define CPSR_M (0x1fU) 1381 #define CPSR_T (1U << 5) 1382 #define CPSR_F (1U << 6) 1383 #define CPSR_I (1U << 7) 1384 #define CPSR_A (1U << 8) 1385 #define CPSR_E (1U << 9) 1386 #define CPSR_IT_2_7 (0xfc00U) 1387 #define CPSR_GE (0xfU << 16) 1388 #define CPSR_IL (1U << 20) 1389 #define CPSR_DIT (1U << 21) 1390 #define CPSR_PAN (1U << 22) 1391 #define CPSR_SSBS (1U << 23) 1392 #define CPSR_J (1U << 24) 1393 #define CPSR_IT_0_1 (3U << 25) 1394 #define CPSR_Q (1U << 27) 1395 #define CPSR_V (1U << 28) 1396 #define CPSR_C (1U << 29) 1397 #define CPSR_Z (1U << 30) 1398 #define CPSR_N (1U << 31) 1399 #define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) 1400 #define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F) 1401 #define ISR_FS (1U << 9) 1402 #define ISR_IS (1U << 10) 1403 1404 #define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7) 1405 #define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \ 1406 | CPSR_NZCV) 1407 /* Bits writable in user mode. */ 1408 #define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE | CPSR_E) 1409 /* Execution state bits. MRS read as zero, MSR writes ignored. */ 1410 #define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL) 1411 1412 /* Bit definitions for M profile XPSR. Most are the same as CPSR. */ 1413 #define XPSR_EXCP 0x1ffU 1414 #define XPSR_SPREALIGN (1U << 9) /* Only set in exception stack frames */ 1415 #define XPSR_IT_2_7 CPSR_IT_2_7 1416 #define XPSR_GE CPSR_GE 1417 #define XPSR_SFPA (1U << 20) /* Only set in exception stack frames */ 1418 #define XPSR_T (1U << 24) /* Not the same as CPSR_T ! */ 1419 #define XPSR_IT_0_1 CPSR_IT_0_1 1420 #define XPSR_Q CPSR_Q 1421 #define XPSR_V CPSR_V 1422 #define XPSR_C CPSR_C 1423 #define XPSR_Z CPSR_Z 1424 #define XPSR_N CPSR_N 1425 #define XPSR_NZCV CPSR_NZCV 1426 #define XPSR_IT CPSR_IT 1427 1428 /* Bit definitions for ARMv8 SPSR (PSTATE) format. 1429 * Only these are valid when in AArch64 mode; in 1430 * AArch32 mode SPSRs are basically CPSR-format. 1431 */ 1432 #define PSTATE_SP (1U) 1433 #define PSTATE_M (0xFU) 1434 #define PSTATE_nRW (1U << 4) 1435 #define PSTATE_F (1U << 6) 1436 #define PSTATE_I (1U << 7) 1437 #define PSTATE_A (1U << 8) 1438 #define PSTATE_D (1U << 9) 1439 #define PSTATE_BTYPE (3U << 10) 1440 #define PSTATE_SSBS (1U << 12) 1441 #define PSTATE_ALLINT (1U << 13) 1442 #define PSTATE_IL (1U << 20) 1443 #define PSTATE_SS (1U << 21) 1444 #define PSTATE_PAN (1U << 22) 1445 #define PSTATE_UAO (1U << 23) 1446 #define PSTATE_DIT (1U << 24) 1447 #define PSTATE_TCO (1U << 25) 1448 #define PSTATE_V (1U << 28) 1449 #define PSTATE_C (1U << 29) 1450 #define PSTATE_Z (1U << 30) 1451 #define PSTATE_N (1U << 31) 1452 #define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V) 1453 #define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F) 1454 #define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE) 1455 /* Mode values for AArch64 */ 1456 #define PSTATE_MODE_EL3h 13 1457 #define PSTATE_MODE_EL3t 12 1458 #define PSTATE_MODE_EL2h 9 1459 #define PSTATE_MODE_EL2t 8 1460 #define PSTATE_MODE_EL1h 5 1461 #define PSTATE_MODE_EL1t 4 1462 #define PSTATE_MODE_EL0t 0 1463 1464 /* PSTATE bits that are accessed via SVCR and not stored in SPSR_ELx. */ 1465 FIELD(SVCR, SM, 0, 1) 1466 FIELD(SVCR, ZA, 1, 1) 1467 1468 /* Fields for SMCR_ELx. */ 1469 FIELD(SMCR, LEN, 0, 4) 1470 FIELD(SMCR, FA64, 31, 1) 1471 1472 /* Write a new value to v7m.exception, thus transitioning into or out 1473 * of Handler mode; this may result in a change of active stack pointer. 1474 */ 1475 void write_v7m_exception(CPUARMState *env, uint32_t new_exc); 1476 1477 /* Map EL and handler into a PSTATE_MODE. */ 1478 static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler) 1479 { 1480 return (el << 2) | handler; 1481 } 1482 1483 /* Return the current PSTATE value. For the moment we don't support 32<->64 bit 1484 * interprocessing, so we don't attempt to sync with the cpsr state used by 1485 * the 32 bit decoder. 1486 */ 1487 static inline uint32_t pstate_read(CPUARMState *env) 1488 { 1489 int ZF; 1490 1491 ZF = (env->ZF == 0); 1492 return (env->NF & 0x80000000) | (ZF << 30) 1493 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) 1494 | env->pstate | env->daif | (env->btype << 10); 1495 } 1496 1497 static inline void pstate_write(CPUARMState *env, uint32_t val) 1498 { 1499 env->ZF = (~val) & PSTATE_Z; 1500 env->NF = val; 1501 env->CF = (val >> 29) & 1; 1502 env->VF = (val << 3) & 0x80000000; 1503 env->daif = val & PSTATE_DAIF; 1504 env->btype = (val >> 10) & 3; 1505 env->pstate = val & ~CACHED_PSTATE_BITS; 1506 } 1507 1508 /* Return the current CPSR value. */ 1509 uint32_t cpsr_read(CPUARMState *env); 1510 1511 typedef enum CPSRWriteType { 1512 CPSRWriteByInstr = 0, /* from guest MSR or CPS */ 1513 CPSRWriteExceptionReturn = 1, /* from guest exception return insn */ 1514 CPSRWriteRaw = 2, 1515 /* trust values, no reg bank switch, no hflags rebuild */ 1516 CPSRWriteByGDBStub = 3, /* from the GDB stub */ 1517 } CPSRWriteType; 1518 1519 /* 1520 * Set the CPSR. Note that some bits of mask must be all-set or all-clear. 1521 * This will do an arm_rebuild_hflags() if any of the bits in @mask 1522 * correspond to TB flags bits cached in the hflags, unless @write_type 1523 * is CPSRWriteRaw. 1524 */ 1525 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 1526 CPSRWriteType write_type); 1527 1528 /* Return the current xPSR value. */ 1529 static inline uint32_t xpsr_read(CPUARMState *env) 1530 { 1531 int ZF; 1532 ZF = (env->ZF == 0); 1533 return (env->NF & 0x80000000) | (ZF << 30) 1534 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 1535 | (env->thumb << 24) | ((env->condexec_bits & 3) << 25) 1536 | ((env->condexec_bits & 0xfc) << 8) 1537 | (env->GE << 16) 1538 | env->v7m.exception; 1539 } 1540 1541 /* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */ 1542 static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) 1543 { 1544 if (mask & XPSR_NZCV) { 1545 env->ZF = (~val) & XPSR_Z; 1546 env->NF = val; 1547 env->CF = (val >> 29) & 1; 1548 env->VF = (val << 3) & 0x80000000; 1549 } 1550 if (mask & XPSR_Q) { 1551 env->QF = ((val & XPSR_Q) != 0); 1552 } 1553 if (mask & XPSR_GE) { 1554 env->GE = (val & XPSR_GE) >> 16; 1555 } 1556 #ifndef CONFIG_USER_ONLY 1557 if (mask & XPSR_T) { 1558 env->thumb = ((val & XPSR_T) != 0); 1559 } 1560 if (mask & XPSR_IT_0_1) { 1561 env->condexec_bits &= ~3; 1562 env->condexec_bits |= (val >> 25) & 3; 1563 } 1564 if (mask & XPSR_IT_2_7) { 1565 env->condexec_bits &= 3; 1566 env->condexec_bits |= (val >> 8) & 0xfc; 1567 } 1568 if (mask & XPSR_EXCP) { 1569 /* Note that this only happens on exception exit */ 1570 write_v7m_exception(env, val & XPSR_EXCP); 1571 } 1572 #endif 1573 } 1574 1575 #define HCR_VM (1ULL << 0) 1576 #define HCR_SWIO (1ULL << 1) 1577 #define HCR_PTW (1ULL << 2) 1578 #define HCR_FMO (1ULL << 3) 1579 #define HCR_IMO (1ULL << 4) 1580 #define HCR_AMO (1ULL << 5) 1581 #define HCR_VF (1ULL << 6) 1582 #define HCR_VI (1ULL << 7) 1583 #define HCR_VSE (1ULL << 8) 1584 #define HCR_FB (1ULL << 9) 1585 #define HCR_BSU_MASK (3ULL << 10) 1586 #define HCR_DC (1ULL << 12) 1587 #define HCR_TWI (1ULL << 13) 1588 #define HCR_TWE (1ULL << 14) 1589 #define HCR_TID0 (1ULL << 15) 1590 #define HCR_TID1 (1ULL << 16) 1591 #define HCR_TID2 (1ULL << 17) 1592 #define HCR_TID3 (1ULL << 18) 1593 #define HCR_TSC (1ULL << 19) 1594 #define HCR_TIDCP (1ULL << 20) 1595 #define HCR_TACR (1ULL << 21) 1596 #define HCR_TSW (1ULL << 22) 1597 #define HCR_TPCP (1ULL << 23) 1598 #define HCR_TPU (1ULL << 24) 1599 #define HCR_TTLB (1ULL << 25) 1600 #define HCR_TVM (1ULL << 26) 1601 #define HCR_TGE (1ULL << 27) 1602 #define HCR_TDZ (1ULL << 28) 1603 #define HCR_HCD (1ULL << 29) 1604 #define HCR_TRVM (1ULL << 30) 1605 #define HCR_RW (1ULL << 31) 1606 #define HCR_CD (1ULL << 32) 1607 #define HCR_ID (1ULL << 33) 1608 #define HCR_E2H (1ULL << 34) 1609 #define HCR_TLOR (1ULL << 35) 1610 #define HCR_TERR (1ULL << 36) 1611 #define HCR_TEA (1ULL << 37) 1612 #define HCR_MIOCNCE (1ULL << 38) 1613 #define HCR_TME (1ULL << 39) 1614 #define HCR_APK (1ULL << 40) 1615 #define HCR_API (1ULL << 41) 1616 #define HCR_NV (1ULL << 42) 1617 #define HCR_NV1 (1ULL << 43) 1618 #define HCR_AT (1ULL << 44) 1619 #define HCR_NV2 (1ULL << 45) 1620 #define HCR_FWB (1ULL << 46) 1621 #define HCR_FIEN (1ULL << 47) 1622 #define HCR_GPF (1ULL << 48) 1623 #define HCR_TID4 (1ULL << 49) 1624 #define HCR_TICAB (1ULL << 50) 1625 #define HCR_AMVOFFEN (1ULL << 51) 1626 #define HCR_TOCU (1ULL << 52) 1627 #define HCR_ENSCXT (1ULL << 53) 1628 #define HCR_TTLBIS (1ULL << 54) 1629 #define HCR_TTLBOS (1ULL << 55) 1630 #define HCR_ATA (1ULL << 56) 1631 #define HCR_DCT (1ULL << 57) 1632 #define HCR_TID5 (1ULL << 58) 1633 #define HCR_TWEDEN (1ULL << 59) 1634 #define HCR_TWEDEL MAKE_64BIT_MASK(60, 4) 1635 1636 #define SCR_NS (1ULL << 0) 1637 #define SCR_IRQ (1ULL << 1) 1638 #define SCR_FIQ (1ULL << 2) 1639 #define SCR_EA (1ULL << 3) 1640 #define SCR_FW (1ULL << 4) 1641 #define SCR_AW (1ULL << 5) 1642 #define SCR_NET (1ULL << 6) 1643 #define SCR_SMD (1ULL << 7) 1644 #define SCR_HCE (1ULL << 8) 1645 #define SCR_SIF (1ULL << 9) 1646 #define SCR_RW (1ULL << 10) 1647 #define SCR_ST (1ULL << 11) 1648 #define SCR_TWI (1ULL << 12) 1649 #define SCR_TWE (1ULL << 13) 1650 #define SCR_TLOR (1ULL << 14) 1651 #define SCR_TERR (1ULL << 15) 1652 #define SCR_APK (1ULL << 16) 1653 #define SCR_API (1ULL << 17) 1654 #define SCR_EEL2 (1ULL << 18) 1655 #define SCR_EASE (1ULL << 19) 1656 #define SCR_NMEA (1ULL << 20) 1657 #define SCR_FIEN (1ULL << 21) 1658 #define SCR_ENSCXT (1ULL << 25) 1659 #define SCR_ATA (1ULL << 26) 1660 #define SCR_FGTEN (1ULL << 27) 1661 #define SCR_ECVEN (1ULL << 28) 1662 #define SCR_TWEDEN (1ULL << 29) 1663 #define SCR_TWEDEL MAKE_64BIT_MASK(30, 4) 1664 #define SCR_TME (1ULL << 34) 1665 #define SCR_AMVOFFEN (1ULL << 35) 1666 #define SCR_ENAS0 (1ULL << 36) 1667 #define SCR_ADEN (1ULL << 37) 1668 #define SCR_HXEN (1ULL << 38) 1669 #define SCR_TRNDR (1ULL << 40) 1670 #define SCR_ENTP2 (1ULL << 41) 1671 #define SCR_GPF (1ULL << 48) 1672 #define SCR_NSE (1ULL << 62) 1673 1674 /* Return the current FPSCR value. */ 1675 uint32_t vfp_get_fpscr(CPUARMState *env); 1676 void vfp_set_fpscr(CPUARMState *env, uint32_t val); 1677 1678 /* FPCR, Floating Point Control Register 1679 * FPSR, Floating Poiht Status Register 1680 * 1681 * For A64 the FPSCR is split into two logically distinct registers, 1682 * FPCR and FPSR. However since they still use non-overlapping bits 1683 * we store the underlying state in fpscr and just mask on read/write. 1684 */ 1685 #define FPSR_MASK 0xf800009f 1686 #define FPCR_MASK 0x07ff9f00 1687 1688 #define FPCR_IOE (1 << 8) /* Invalid Operation exception trap enable */ 1689 #define FPCR_DZE (1 << 9) /* Divide by Zero exception trap enable */ 1690 #define FPCR_OFE (1 << 10) /* Overflow exception trap enable */ 1691 #define FPCR_UFE (1 << 11) /* Underflow exception trap enable */ 1692 #define FPCR_IXE (1 << 12) /* Inexact exception trap enable */ 1693 #define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */ 1694 #define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */ 1695 #define FPCR_RMODE_MASK (3 << 22) /* Rounding mode */ 1696 #define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */ 1697 #define FPCR_DN (1 << 25) /* Default NaN enable bit */ 1698 #define FPCR_AHP (1 << 26) /* Alternative half-precision */ 1699 #define FPCR_QC (1 << 27) /* Cumulative saturation bit */ 1700 #define FPCR_V (1 << 28) /* FP overflow flag */ 1701 #define FPCR_C (1 << 29) /* FP carry flag */ 1702 #define FPCR_Z (1 << 30) /* FP zero flag */ 1703 #define FPCR_N (1 << 31) /* FP negative flag */ 1704 1705 #define FPCR_LTPSIZE_SHIFT 16 /* LTPSIZE, M-profile only */ 1706 #define FPCR_LTPSIZE_MASK (7 << FPCR_LTPSIZE_SHIFT) 1707 #define FPCR_LTPSIZE_LENGTH 3 1708 1709 #define FPCR_NZCV_MASK (FPCR_N | FPCR_Z | FPCR_C | FPCR_V) 1710 #define FPCR_NZCVQC_MASK (FPCR_NZCV_MASK | FPCR_QC) 1711 1712 static inline uint32_t vfp_get_fpsr(CPUARMState *env) 1713 { 1714 return vfp_get_fpscr(env) & FPSR_MASK; 1715 } 1716 1717 static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val) 1718 { 1719 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK); 1720 vfp_set_fpscr(env, new_fpscr); 1721 } 1722 1723 static inline uint32_t vfp_get_fpcr(CPUARMState *env) 1724 { 1725 return vfp_get_fpscr(env) & FPCR_MASK; 1726 } 1727 1728 static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val) 1729 { 1730 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK); 1731 vfp_set_fpscr(env, new_fpscr); 1732 } 1733 1734 enum arm_cpu_mode { 1735 ARM_CPU_MODE_USR = 0x10, 1736 ARM_CPU_MODE_FIQ = 0x11, 1737 ARM_CPU_MODE_IRQ = 0x12, 1738 ARM_CPU_MODE_SVC = 0x13, 1739 ARM_CPU_MODE_MON = 0x16, 1740 ARM_CPU_MODE_ABT = 0x17, 1741 ARM_CPU_MODE_HYP = 0x1a, 1742 ARM_CPU_MODE_UND = 0x1b, 1743 ARM_CPU_MODE_SYS = 0x1f 1744 }; 1745 1746 /* VFP system registers. */ 1747 #define ARM_VFP_FPSID 0 1748 #define ARM_VFP_FPSCR 1 1749 #define ARM_VFP_MVFR2 5 1750 #define ARM_VFP_MVFR1 6 1751 #define ARM_VFP_MVFR0 7 1752 #define ARM_VFP_FPEXC 8 1753 #define ARM_VFP_FPINST 9 1754 #define ARM_VFP_FPINST2 10 1755 /* These ones are M-profile only */ 1756 #define ARM_VFP_FPSCR_NZCVQC 2 1757 #define ARM_VFP_VPR 12 1758 #define ARM_VFP_P0 13 1759 #define ARM_VFP_FPCXT_NS 14 1760 #define ARM_VFP_FPCXT_S 15 1761 1762 /* QEMU-internal value meaning "FPSCR, but we care only about NZCV" */ 1763 #define QEMU_VFP_FPSCR_NZCV 0xffff 1764 1765 /* iwMMXt coprocessor control registers. */ 1766 #define ARM_IWMMXT_wCID 0 1767 #define ARM_IWMMXT_wCon 1 1768 #define ARM_IWMMXT_wCSSF 2 1769 #define ARM_IWMMXT_wCASF 3 1770 #define ARM_IWMMXT_wCGR0 8 1771 #define ARM_IWMMXT_wCGR1 9 1772 #define ARM_IWMMXT_wCGR2 10 1773 #define ARM_IWMMXT_wCGR3 11 1774 1775 /* V7M CCR bits */ 1776 FIELD(V7M_CCR, NONBASETHRDENA, 0, 1) 1777 FIELD(V7M_CCR, USERSETMPEND, 1, 1) 1778 FIELD(V7M_CCR, UNALIGN_TRP, 3, 1) 1779 FIELD(V7M_CCR, DIV_0_TRP, 4, 1) 1780 FIELD(V7M_CCR, BFHFNMIGN, 8, 1) 1781 FIELD(V7M_CCR, STKALIGN, 9, 1) 1782 FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1) 1783 FIELD(V7M_CCR, DC, 16, 1) 1784 FIELD(V7M_CCR, IC, 17, 1) 1785 FIELD(V7M_CCR, BP, 18, 1) 1786 FIELD(V7M_CCR, LOB, 19, 1) 1787 FIELD(V7M_CCR, TRD, 20, 1) 1788 1789 /* V7M SCR bits */ 1790 FIELD(V7M_SCR, SLEEPONEXIT, 1, 1) 1791 FIELD(V7M_SCR, SLEEPDEEP, 2, 1) 1792 FIELD(V7M_SCR, SLEEPDEEPS, 3, 1) 1793 FIELD(V7M_SCR, SEVONPEND, 4, 1) 1794 1795 /* V7M AIRCR bits */ 1796 FIELD(V7M_AIRCR, VECTRESET, 0, 1) 1797 FIELD(V7M_AIRCR, VECTCLRACTIVE, 1, 1) 1798 FIELD(V7M_AIRCR, SYSRESETREQ, 2, 1) 1799 FIELD(V7M_AIRCR, SYSRESETREQS, 3, 1) 1800 FIELD(V7M_AIRCR, PRIGROUP, 8, 3) 1801 FIELD(V7M_AIRCR, BFHFNMINS, 13, 1) 1802 FIELD(V7M_AIRCR, PRIS, 14, 1) 1803 FIELD(V7M_AIRCR, ENDIANNESS, 15, 1) 1804 FIELD(V7M_AIRCR, VECTKEY, 16, 16) 1805 1806 /* V7M CFSR bits for MMFSR */ 1807 FIELD(V7M_CFSR, IACCVIOL, 0, 1) 1808 FIELD(V7M_CFSR, DACCVIOL, 1, 1) 1809 FIELD(V7M_CFSR, MUNSTKERR, 3, 1) 1810 FIELD(V7M_CFSR, MSTKERR, 4, 1) 1811 FIELD(V7M_CFSR, MLSPERR, 5, 1) 1812 FIELD(V7M_CFSR, MMARVALID, 7, 1) 1813 1814 /* V7M CFSR bits for BFSR */ 1815 FIELD(V7M_CFSR, IBUSERR, 8 + 0, 1) 1816 FIELD(V7M_CFSR, PRECISERR, 8 + 1, 1) 1817 FIELD(V7M_CFSR, IMPRECISERR, 8 + 2, 1) 1818 FIELD(V7M_CFSR, UNSTKERR, 8 + 3, 1) 1819 FIELD(V7M_CFSR, STKERR, 8 + 4, 1) 1820 FIELD(V7M_CFSR, LSPERR, 8 + 5, 1) 1821 FIELD(V7M_CFSR, BFARVALID, 8 + 7, 1) 1822 1823 /* V7M CFSR bits for UFSR */ 1824 FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1) 1825 FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1) 1826 FIELD(V7M_CFSR, INVPC, 16 + 2, 1) 1827 FIELD(V7M_CFSR, NOCP, 16 + 3, 1) 1828 FIELD(V7M_CFSR, STKOF, 16 + 4, 1) 1829 FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1) 1830 FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1) 1831 1832 /* V7M CFSR bit masks covering all of the subregister bits */ 1833 FIELD(V7M_CFSR, MMFSR, 0, 8) 1834 FIELD(V7M_CFSR, BFSR, 8, 8) 1835 FIELD(V7M_CFSR, UFSR, 16, 16) 1836 1837 /* V7M HFSR bits */ 1838 FIELD(V7M_HFSR, VECTTBL, 1, 1) 1839 FIELD(V7M_HFSR, FORCED, 30, 1) 1840 FIELD(V7M_HFSR, DEBUGEVT, 31, 1) 1841 1842 /* V7M DFSR bits */ 1843 FIELD(V7M_DFSR, HALTED, 0, 1) 1844 FIELD(V7M_DFSR, BKPT, 1, 1) 1845 FIELD(V7M_DFSR, DWTTRAP, 2, 1) 1846 FIELD(V7M_DFSR, VCATCH, 3, 1) 1847 FIELD(V7M_DFSR, EXTERNAL, 4, 1) 1848 1849 /* V7M SFSR bits */ 1850 FIELD(V7M_SFSR, INVEP, 0, 1) 1851 FIELD(V7M_SFSR, INVIS, 1, 1) 1852 FIELD(V7M_SFSR, INVER, 2, 1) 1853 FIELD(V7M_SFSR, AUVIOL, 3, 1) 1854 FIELD(V7M_SFSR, INVTRAN, 4, 1) 1855 FIELD(V7M_SFSR, LSPERR, 5, 1) 1856 FIELD(V7M_SFSR, SFARVALID, 6, 1) 1857 FIELD(V7M_SFSR, LSERR, 7, 1) 1858 1859 /* v7M MPU_CTRL bits */ 1860 FIELD(V7M_MPU_CTRL, ENABLE, 0, 1) 1861 FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1) 1862 FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1) 1863 1864 /* v7M CLIDR bits */ 1865 FIELD(V7M_CLIDR, CTYPE_ALL, 0, 21) 1866 FIELD(V7M_CLIDR, LOUIS, 21, 3) 1867 FIELD(V7M_CLIDR, LOC, 24, 3) 1868 FIELD(V7M_CLIDR, LOUU, 27, 3) 1869 FIELD(V7M_CLIDR, ICB, 30, 2) 1870 1871 FIELD(V7M_CSSELR, IND, 0, 1) 1872 FIELD(V7M_CSSELR, LEVEL, 1, 3) 1873 /* We use the combination of InD and Level to index into cpu->ccsidr[]; 1874 * define a mask for this and check that it doesn't permit running off 1875 * the end of the array. 1876 */ 1877 FIELD(V7M_CSSELR, INDEX, 0, 4) 1878 1879 /* v7M FPCCR bits */ 1880 FIELD(V7M_FPCCR, LSPACT, 0, 1) 1881 FIELD(V7M_FPCCR, USER, 1, 1) 1882 FIELD(V7M_FPCCR, S, 2, 1) 1883 FIELD(V7M_FPCCR, THREAD, 3, 1) 1884 FIELD(V7M_FPCCR, HFRDY, 4, 1) 1885 FIELD(V7M_FPCCR, MMRDY, 5, 1) 1886 FIELD(V7M_FPCCR, BFRDY, 6, 1) 1887 FIELD(V7M_FPCCR, SFRDY, 7, 1) 1888 FIELD(V7M_FPCCR, MONRDY, 8, 1) 1889 FIELD(V7M_FPCCR, SPLIMVIOL, 9, 1) 1890 FIELD(V7M_FPCCR, UFRDY, 10, 1) 1891 FIELD(V7M_FPCCR, RES0, 11, 15) 1892 FIELD(V7M_FPCCR, TS, 26, 1) 1893 FIELD(V7M_FPCCR, CLRONRETS, 27, 1) 1894 FIELD(V7M_FPCCR, CLRONRET, 28, 1) 1895 FIELD(V7M_FPCCR, LSPENS, 29, 1) 1896 FIELD(V7M_FPCCR, LSPEN, 30, 1) 1897 FIELD(V7M_FPCCR, ASPEN, 31, 1) 1898 /* These bits are banked. Others are non-banked and live in the M_REG_S bank */ 1899 #define R_V7M_FPCCR_BANKED_MASK \ 1900 (R_V7M_FPCCR_LSPACT_MASK | \ 1901 R_V7M_FPCCR_USER_MASK | \ 1902 R_V7M_FPCCR_THREAD_MASK | \ 1903 R_V7M_FPCCR_MMRDY_MASK | \ 1904 R_V7M_FPCCR_SPLIMVIOL_MASK | \ 1905 R_V7M_FPCCR_UFRDY_MASK | \ 1906 R_V7M_FPCCR_ASPEN_MASK) 1907 1908 /* v7M VPR bits */ 1909 FIELD(V7M_VPR, P0, 0, 16) 1910 FIELD(V7M_VPR, MASK01, 16, 4) 1911 FIELD(V7M_VPR, MASK23, 20, 4) 1912 1913 /* 1914 * System register ID fields. 1915 */ 1916 FIELD(CLIDR_EL1, CTYPE1, 0, 3) 1917 FIELD(CLIDR_EL1, CTYPE2, 3, 3) 1918 FIELD(CLIDR_EL1, CTYPE3, 6, 3) 1919 FIELD(CLIDR_EL1, CTYPE4, 9, 3) 1920 FIELD(CLIDR_EL1, CTYPE5, 12, 3) 1921 FIELD(CLIDR_EL1, CTYPE6, 15, 3) 1922 FIELD(CLIDR_EL1, CTYPE7, 18, 3) 1923 FIELD(CLIDR_EL1, LOUIS, 21, 3) 1924 FIELD(CLIDR_EL1, LOC, 24, 3) 1925 FIELD(CLIDR_EL1, LOUU, 27, 3) 1926 FIELD(CLIDR_EL1, ICB, 30, 3) 1927 1928 /* When FEAT_CCIDX is implemented */ 1929 FIELD(CCSIDR_EL1, CCIDX_LINESIZE, 0, 3) 1930 FIELD(CCSIDR_EL1, CCIDX_ASSOCIATIVITY, 3, 21) 1931 FIELD(CCSIDR_EL1, CCIDX_NUMSETS, 32, 24) 1932 1933 /* When FEAT_CCIDX is not implemented */ 1934 FIELD(CCSIDR_EL1, LINESIZE, 0, 3) 1935 FIELD(CCSIDR_EL1, ASSOCIATIVITY, 3, 10) 1936 FIELD(CCSIDR_EL1, NUMSETS, 13, 15) 1937 1938 FIELD(CTR_EL0, IMINLINE, 0, 4) 1939 FIELD(CTR_EL0, L1IP, 14, 2) 1940 FIELD(CTR_EL0, DMINLINE, 16, 4) 1941 FIELD(CTR_EL0, ERG, 20, 4) 1942 FIELD(CTR_EL0, CWG, 24, 4) 1943 FIELD(CTR_EL0, IDC, 28, 1) 1944 FIELD(CTR_EL0, DIC, 29, 1) 1945 FIELD(CTR_EL0, TMINLINE, 32, 6) 1946 1947 FIELD(MIDR_EL1, REVISION, 0, 4) 1948 FIELD(MIDR_EL1, PARTNUM, 4, 12) 1949 FIELD(MIDR_EL1, ARCHITECTURE, 16, 4) 1950 FIELD(MIDR_EL1, VARIANT, 20, 4) 1951 FIELD(MIDR_EL1, IMPLEMENTER, 24, 8) 1952 1953 FIELD(ID_ISAR0, SWAP, 0, 4) 1954 FIELD(ID_ISAR0, BITCOUNT, 4, 4) 1955 FIELD(ID_ISAR0, BITFIELD, 8, 4) 1956 FIELD(ID_ISAR0, CMPBRANCH, 12, 4) 1957 FIELD(ID_ISAR0, COPROC, 16, 4) 1958 FIELD(ID_ISAR0, DEBUG, 20, 4) 1959 FIELD(ID_ISAR0, DIVIDE, 24, 4) 1960 1961 FIELD(ID_ISAR1, ENDIAN, 0, 4) 1962 FIELD(ID_ISAR1, EXCEPT, 4, 4) 1963 FIELD(ID_ISAR1, EXCEPT_AR, 8, 4) 1964 FIELD(ID_ISAR1, EXTEND, 12, 4) 1965 FIELD(ID_ISAR1, IFTHEN, 16, 4) 1966 FIELD(ID_ISAR1, IMMEDIATE, 20, 4) 1967 FIELD(ID_ISAR1, INTERWORK, 24, 4) 1968 FIELD(ID_ISAR1, JAZELLE, 28, 4) 1969 1970 FIELD(ID_ISAR2, LOADSTORE, 0, 4) 1971 FIELD(ID_ISAR2, MEMHINT, 4, 4) 1972 FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4) 1973 FIELD(ID_ISAR2, MULT, 12, 4) 1974 FIELD(ID_ISAR2, MULTS, 16, 4) 1975 FIELD(ID_ISAR2, MULTU, 20, 4) 1976 FIELD(ID_ISAR2, PSR_AR, 24, 4) 1977 FIELD(ID_ISAR2, REVERSAL, 28, 4) 1978 1979 FIELD(ID_ISAR3, SATURATE, 0, 4) 1980 FIELD(ID_ISAR3, SIMD, 4, 4) 1981 FIELD(ID_ISAR3, SVC, 8, 4) 1982 FIELD(ID_ISAR3, SYNCHPRIM, 12, 4) 1983 FIELD(ID_ISAR3, TABBRANCH, 16, 4) 1984 FIELD(ID_ISAR3, T32COPY, 20, 4) 1985 FIELD(ID_ISAR3, TRUENOP, 24, 4) 1986 FIELD(ID_ISAR3, T32EE, 28, 4) 1987 1988 FIELD(ID_ISAR4, UNPRIV, 0, 4) 1989 FIELD(ID_ISAR4, WITHSHIFTS, 4, 4) 1990 FIELD(ID_ISAR4, WRITEBACK, 8, 4) 1991 FIELD(ID_ISAR4, SMC, 12, 4) 1992 FIELD(ID_ISAR4, BARRIER, 16, 4) 1993 FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4) 1994 FIELD(ID_ISAR4, PSR_M, 24, 4) 1995 FIELD(ID_ISAR4, SWP_FRAC, 28, 4) 1996 1997 FIELD(ID_ISAR5, SEVL, 0, 4) 1998 FIELD(ID_ISAR5, AES, 4, 4) 1999 FIELD(ID_ISAR5, SHA1, 8, 4) 2000 FIELD(ID_ISAR5, SHA2, 12, 4) 2001 FIELD(ID_ISAR5, CRC32, 16, 4) 2002 FIELD(ID_ISAR5, RDM, 24, 4) 2003 FIELD(ID_ISAR5, VCMA, 28, 4) 2004 2005 FIELD(ID_ISAR6, JSCVT, 0, 4) 2006 FIELD(ID_ISAR6, DP, 4, 4) 2007 FIELD(ID_ISAR6, FHM, 8, 4) 2008 FIELD(ID_ISAR6, SB, 12, 4) 2009 FIELD(ID_ISAR6, SPECRES, 16, 4) 2010 FIELD(ID_ISAR6, BF16, 20, 4) 2011 FIELD(ID_ISAR6, I8MM, 24, 4) 2012 2013 FIELD(ID_MMFR0, VMSA, 0, 4) 2014 FIELD(ID_MMFR0, PMSA, 4, 4) 2015 FIELD(ID_MMFR0, OUTERSHR, 8, 4) 2016 FIELD(ID_MMFR0, SHARELVL, 12, 4) 2017 FIELD(ID_MMFR0, TCM, 16, 4) 2018 FIELD(ID_MMFR0, AUXREG, 20, 4) 2019 FIELD(ID_MMFR0, FCSE, 24, 4) 2020 FIELD(ID_MMFR0, INNERSHR, 28, 4) 2021 2022 FIELD(ID_MMFR1, L1HVDVA, 0, 4) 2023 FIELD(ID_MMFR1, L1UNIVA, 4, 4) 2024 FIELD(ID_MMFR1, L1HVDSW, 8, 4) 2025 FIELD(ID_MMFR1, L1UNISW, 12, 4) 2026 FIELD(ID_MMFR1, L1HVD, 16, 4) 2027 FIELD(ID_MMFR1, L1UNI, 20, 4) 2028 FIELD(ID_MMFR1, L1TSTCLN, 24, 4) 2029 FIELD(ID_MMFR1, BPRED, 28, 4) 2030 2031 FIELD(ID_MMFR2, L1HVDFG, 0, 4) 2032 FIELD(ID_MMFR2, L1HVDBG, 4, 4) 2033 FIELD(ID_MMFR2, L1HVDRNG, 8, 4) 2034 FIELD(ID_MMFR2, HVDTLB, 12, 4) 2035 FIELD(ID_MMFR2, UNITLB, 16, 4) 2036 FIELD(ID_MMFR2, MEMBARR, 20, 4) 2037 FIELD(ID_MMFR2, WFISTALL, 24, 4) 2038 FIELD(ID_MMFR2, HWACCFLG, 28, 4) 2039 2040 FIELD(ID_MMFR3, CMAINTVA, 0, 4) 2041 FIELD(ID_MMFR3, CMAINTSW, 4, 4) 2042 FIELD(ID_MMFR3, BPMAINT, 8, 4) 2043 FIELD(ID_MMFR3, MAINTBCST, 12, 4) 2044 FIELD(ID_MMFR3, PAN, 16, 4) 2045 FIELD(ID_MMFR3, COHWALK, 20, 4) 2046 FIELD(ID_MMFR3, CMEMSZ, 24, 4) 2047 FIELD(ID_MMFR3, SUPERSEC, 28, 4) 2048 2049 FIELD(ID_MMFR4, SPECSEI, 0, 4) 2050 FIELD(ID_MMFR4, AC2, 4, 4) 2051 FIELD(ID_MMFR4, XNX, 8, 4) 2052 FIELD(ID_MMFR4, CNP, 12, 4) 2053 FIELD(ID_MMFR4, HPDS, 16, 4) 2054 FIELD(ID_MMFR4, LSM, 20, 4) 2055 FIELD(ID_MMFR4, CCIDX, 24, 4) 2056 FIELD(ID_MMFR4, EVT, 28, 4) 2057 2058 FIELD(ID_MMFR5, ETS, 0, 4) 2059 FIELD(ID_MMFR5, NTLBPA, 4, 4) 2060 2061 FIELD(ID_PFR0, STATE0, 0, 4) 2062 FIELD(ID_PFR0, STATE1, 4, 4) 2063 FIELD(ID_PFR0, STATE2, 8, 4) 2064 FIELD(ID_PFR0, STATE3, 12, 4) 2065 FIELD(ID_PFR0, CSV2, 16, 4) 2066 FIELD(ID_PFR0, AMU, 20, 4) 2067 FIELD(ID_PFR0, DIT, 24, 4) 2068 FIELD(ID_PFR0, RAS, 28, 4) 2069 2070 FIELD(ID_PFR1, PROGMOD, 0, 4) 2071 FIELD(ID_PFR1, SECURITY, 4, 4) 2072 FIELD(ID_PFR1, MPROGMOD, 8, 4) 2073 FIELD(ID_PFR1, VIRTUALIZATION, 12, 4) 2074 FIELD(ID_PFR1, GENTIMER, 16, 4) 2075 FIELD(ID_PFR1, SEC_FRAC, 20, 4) 2076 FIELD(ID_PFR1, VIRT_FRAC, 24, 4) 2077 FIELD(ID_PFR1, GIC, 28, 4) 2078 2079 FIELD(ID_PFR2, CSV3, 0, 4) 2080 FIELD(ID_PFR2, SSBS, 4, 4) 2081 FIELD(ID_PFR2, RAS_FRAC, 8, 4) 2082 2083 FIELD(ID_AA64ISAR0, AES, 4, 4) 2084 FIELD(ID_AA64ISAR0, SHA1, 8, 4) 2085 FIELD(ID_AA64ISAR0, SHA2, 12, 4) 2086 FIELD(ID_AA64ISAR0, CRC32, 16, 4) 2087 FIELD(ID_AA64ISAR0, ATOMIC, 20, 4) 2088 FIELD(ID_AA64ISAR0, TME, 24, 4) 2089 FIELD(ID_AA64ISAR0, RDM, 28, 4) 2090 FIELD(ID_AA64ISAR0, SHA3, 32, 4) 2091 FIELD(ID_AA64ISAR0, SM3, 36, 4) 2092 FIELD(ID_AA64ISAR0, SM4, 40, 4) 2093 FIELD(ID_AA64ISAR0, DP, 44, 4) 2094 FIELD(ID_AA64ISAR0, FHM, 48, 4) 2095 FIELD(ID_AA64ISAR0, TS, 52, 4) 2096 FIELD(ID_AA64ISAR0, TLB, 56, 4) 2097 FIELD(ID_AA64ISAR0, RNDR, 60, 4) 2098 2099 FIELD(ID_AA64ISAR1, DPB, 0, 4) 2100 FIELD(ID_AA64ISAR1, APA, 4, 4) 2101 FIELD(ID_AA64ISAR1, API, 8, 4) 2102 FIELD(ID_AA64ISAR1, JSCVT, 12, 4) 2103 FIELD(ID_AA64ISAR1, FCMA, 16, 4) 2104 FIELD(ID_AA64ISAR1, LRCPC, 20, 4) 2105 FIELD(ID_AA64ISAR1, GPA, 24, 4) 2106 FIELD(ID_AA64ISAR1, GPI, 28, 4) 2107 FIELD(ID_AA64ISAR1, FRINTTS, 32, 4) 2108 FIELD(ID_AA64ISAR1, SB, 36, 4) 2109 FIELD(ID_AA64ISAR1, SPECRES, 40, 4) 2110 FIELD(ID_AA64ISAR1, BF16, 44, 4) 2111 FIELD(ID_AA64ISAR1, DGH, 48, 4) 2112 FIELD(ID_AA64ISAR1, I8MM, 52, 4) 2113 FIELD(ID_AA64ISAR1, XS, 56, 4) 2114 FIELD(ID_AA64ISAR1, LS64, 60, 4) 2115 2116 FIELD(ID_AA64ISAR2, WFXT, 0, 4) 2117 FIELD(ID_AA64ISAR2, RPRES, 4, 4) 2118 FIELD(ID_AA64ISAR2, GPA3, 8, 4) 2119 FIELD(ID_AA64ISAR2, APA3, 12, 4) 2120 FIELD(ID_AA64ISAR2, MOPS, 16, 4) 2121 FIELD(ID_AA64ISAR2, BC, 20, 4) 2122 FIELD(ID_AA64ISAR2, PAC_FRAC, 24, 4) 2123 FIELD(ID_AA64ISAR2, CLRBHB, 28, 4) 2124 FIELD(ID_AA64ISAR2, SYSREG_128, 32, 4) 2125 FIELD(ID_AA64ISAR2, SYSINSTR_128, 36, 4) 2126 FIELD(ID_AA64ISAR2, PRFMSLC, 40, 4) 2127 FIELD(ID_AA64ISAR2, RPRFM, 48, 4) 2128 FIELD(ID_AA64ISAR2, CSSC, 52, 4) 2129 FIELD(ID_AA64ISAR2, ATS1A, 60, 4) 2130 2131 FIELD(ID_AA64PFR0, EL0, 0, 4) 2132 FIELD(ID_AA64PFR0, EL1, 4, 4) 2133 FIELD(ID_AA64PFR0, EL2, 8, 4) 2134 FIELD(ID_AA64PFR0, EL3, 12, 4) 2135 FIELD(ID_AA64PFR0, FP, 16, 4) 2136 FIELD(ID_AA64PFR0, ADVSIMD, 20, 4) 2137 FIELD(ID_AA64PFR0, GIC, 24, 4) 2138 FIELD(ID_AA64PFR0, RAS, 28, 4) 2139 FIELD(ID_AA64PFR0, SVE, 32, 4) 2140 FIELD(ID_AA64PFR0, SEL2, 36, 4) 2141 FIELD(ID_AA64PFR0, MPAM, 40, 4) 2142 FIELD(ID_AA64PFR0, AMU, 44, 4) 2143 FIELD(ID_AA64PFR0, DIT, 48, 4) 2144 FIELD(ID_AA64PFR0, RME, 52, 4) 2145 FIELD(ID_AA64PFR0, CSV2, 56, 4) 2146 FIELD(ID_AA64PFR0, CSV3, 60, 4) 2147 2148 FIELD(ID_AA64PFR1, BT, 0, 4) 2149 FIELD(ID_AA64PFR1, SSBS, 4, 4) 2150 FIELD(ID_AA64PFR1, MTE, 8, 4) 2151 FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4) 2152 FIELD(ID_AA64PFR1, MPAM_FRAC, 16, 4) 2153 FIELD(ID_AA64PFR1, SME, 24, 4) 2154 FIELD(ID_AA64PFR1, RNDR_TRAP, 28, 4) 2155 FIELD(ID_AA64PFR1, CSV2_FRAC, 32, 4) 2156 FIELD(ID_AA64PFR1, NMI, 36, 4) 2157 FIELD(ID_AA64PFR1, MTE_FRAC, 40, 4) 2158 FIELD(ID_AA64PFR1, GCS, 44, 4) 2159 FIELD(ID_AA64PFR1, THE, 48, 4) 2160 FIELD(ID_AA64PFR1, MTEX, 52, 4) 2161 FIELD(ID_AA64PFR1, DF2, 56, 4) 2162 FIELD(ID_AA64PFR1, PFAR, 60, 4) 2163 2164 FIELD(ID_AA64MMFR0, PARANGE, 0, 4) 2165 FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4) 2166 FIELD(ID_AA64MMFR0, BIGEND, 8, 4) 2167 FIELD(ID_AA64MMFR0, SNSMEM, 12, 4) 2168 FIELD(ID_AA64MMFR0, BIGENDEL0, 16, 4) 2169 FIELD(ID_AA64MMFR0, TGRAN16, 20, 4) 2170 FIELD(ID_AA64MMFR0, TGRAN64, 24, 4) 2171 FIELD(ID_AA64MMFR0, TGRAN4, 28, 4) 2172 FIELD(ID_AA64MMFR0, TGRAN16_2, 32, 4) 2173 FIELD(ID_AA64MMFR0, TGRAN64_2, 36, 4) 2174 FIELD(ID_AA64MMFR0, TGRAN4_2, 40, 4) 2175 FIELD(ID_AA64MMFR0, EXS, 44, 4) 2176 FIELD(ID_AA64MMFR0, FGT, 56, 4) 2177 FIELD(ID_AA64MMFR0, ECV, 60, 4) 2178 2179 FIELD(ID_AA64MMFR1, HAFDBS, 0, 4) 2180 FIELD(ID_AA64MMFR1, VMIDBITS, 4, 4) 2181 FIELD(ID_AA64MMFR1, VH, 8, 4) 2182 FIELD(ID_AA64MMFR1, HPDS, 12, 4) 2183 FIELD(ID_AA64MMFR1, LO, 16, 4) 2184 FIELD(ID_AA64MMFR1, PAN, 20, 4) 2185 FIELD(ID_AA64MMFR1, SPECSEI, 24, 4) 2186 FIELD(ID_AA64MMFR1, XNX, 28, 4) 2187 FIELD(ID_AA64MMFR1, TWED, 32, 4) 2188 FIELD(ID_AA64MMFR1, ETS, 36, 4) 2189 FIELD(ID_AA64MMFR1, HCX, 40, 4) 2190 FIELD(ID_AA64MMFR1, AFP, 44, 4) 2191 FIELD(ID_AA64MMFR1, NTLBPA, 48, 4) 2192 FIELD(ID_AA64MMFR1, TIDCP1, 52, 4) 2193 FIELD(ID_AA64MMFR1, CMOW, 56, 4) 2194 FIELD(ID_AA64MMFR1, ECBHB, 60, 4) 2195 2196 FIELD(ID_AA64MMFR2, CNP, 0, 4) 2197 FIELD(ID_AA64MMFR2, UAO, 4, 4) 2198 FIELD(ID_AA64MMFR2, LSM, 8, 4) 2199 FIELD(ID_AA64MMFR2, IESB, 12, 4) 2200 FIELD(ID_AA64MMFR2, VARANGE, 16, 4) 2201 FIELD(ID_AA64MMFR2, CCIDX, 20, 4) 2202 FIELD(ID_AA64MMFR2, NV, 24, 4) 2203 FIELD(ID_AA64MMFR2, ST, 28, 4) 2204 FIELD(ID_AA64MMFR2, AT, 32, 4) 2205 FIELD(ID_AA64MMFR2, IDS, 36, 4) 2206 FIELD(ID_AA64MMFR2, FWB, 40, 4) 2207 FIELD(ID_AA64MMFR2, TTL, 48, 4) 2208 FIELD(ID_AA64MMFR2, BBM, 52, 4) 2209 FIELD(ID_AA64MMFR2, EVT, 56, 4) 2210 FIELD(ID_AA64MMFR2, E0PD, 60, 4) 2211 2212 FIELD(ID_AA64DFR0, DEBUGVER, 0, 4) 2213 FIELD(ID_AA64DFR0, TRACEVER, 4, 4) 2214 FIELD(ID_AA64DFR0, PMUVER, 8, 4) 2215 FIELD(ID_AA64DFR0, BRPS, 12, 4) 2216 FIELD(ID_AA64DFR0, PMSS, 16, 4) 2217 FIELD(ID_AA64DFR0, WRPS, 20, 4) 2218 FIELD(ID_AA64DFR0, SEBEP, 24, 4) 2219 FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4) 2220 FIELD(ID_AA64DFR0, PMSVER, 32, 4) 2221 FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4) 2222 FIELD(ID_AA64DFR0, TRACEFILT, 40, 4) 2223 FIELD(ID_AA64DFR0, TRACEBUFFER, 44, 4) 2224 FIELD(ID_AA64DFR0, MTPMU, 48, 4) 2225 FIELD(ID_AA64DFR0, BRBE, 52, 4) 2226 FIELD(ID_AA64DFR0, EXTTRCBUFF, 56, 4) 2227 FIELD(ID_AA64DFR0, HPMN0, 60, 4) 2228 2229 FIELD(ID_AA64ZFR0, SVEVER, 0, 4) 2230 FIELD(ID_AA64ZFR0, AES, 4, 4) 2231 FIELD(ID_AA64ZFR0, BITPERM, 16, 4) 2232 FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4) 2233 FIELD(ID_AA64ZFR0, B16B16, 24, 4) 2234 FIELD(ID_AA64ZFR0, SHA3, 32, 4) 2235 FIELD(ID_AA64ZFR0, SM4, 40, 4) 2236 FIELD(ID_AA64ZFR0, I8MM, 44, 4) 2237 FIELD(ID_AA64ZFR0, F32MM, 52, 4) 2238 FIELD(ID_AA64ZFR0, F64MM, 56, 4) 2239 2240 FIELD(ID_AA64SMFR0, F32F32, 32, 1) 2241 FIELD(ID_AA64SMFR0, BI32I32, 33, 1) 2242 FIELD(ID_AA64SMFR0, B16F32, 34, 1) 2243 FIELD(ID_AA64SMFR0, F16F32, 35, 1) 2244 FIELD(ID_AA64SMFR0, I8I32, 36, 4) 2245 FIELD(ID_AA64SMFR0, F16F16, 42, 1) 2246 FIELD(ID_AA64SMFR0, B16B16, 43, 1) 2247 FIELD(ID_AA64SMFR0, I16I32, 44, 4) 2248 FIELD(ID_AA64SMFR0, F64F64, 48, 1) 2249 FIELD(ID_AA64SMFR0, I16I64, 52, 4) 2250 FIELD(ID_AA64SMFR0, SMEVER, 56, 4) 2251 FIELD(ID_AA64SMFR0, FA64, 63, 1) 2252 2253 FIELD(ID_DFR0, COPDBG, 0, 4) 2254 FIELD(ID_DFR0, COPSDBG, 4, 4) 2255 FIELD(ID_DFR0, MMAPDBG, 8, 4) 2256 FIELD(ID_DFR0, COPTRC, 12, 4) 2257 FIELD(ID_DFR0, MMAPTRC, 16, 4) 2258 FIELD(ID_DFR0, MPROFDBG, 20, 4) 2259 FIELD(ID_DFR0, PERFMON, 24, 4) 2260 FIELD(ID_DFR0, TRACEFILT, 28, 4) 2261 2262 FIELD(ID_DFR1, MTPMU, 0, 4) 2263 FIELD(ID_DFR1, HPMN0, 4, 4) 2264 2265 FIELD(DBGDIDR, SE_IMP, 12, 1) 2266 FIELD(DBGDIDR, NSUHD_IMP, 14, 1) 2267 FIELD(DBGDIDR, VERSION, 16, 4) 2268 FIELD(DBGDIDR, CTX_CMPS, 20, 4) 2269 FIELD(DBGDIDR, BRPS, 24, 4) 2270 FIELD(DBGDIDR, WRPS, 28, 4) 2271 2272 FIELD(DBGDEVID, PCSAMPLE, 0, 4) 2273 FIELD(DBGDEVID, WPADDRMASK, 4, 4) 2274 FIELD(DBGDEVID, BPADDRMASK, 8, 4) 2275 FIELD(DBGDEVID, VECTORCATCH, 12, 4) 2276 FIELD(DBGDEVID, VIRTEXTNS, 16, 4) 2277 FIELD(DBGDEVID, DOUBLELOCK, 20, 4) 2278 FIELD(DBGDEVID, AUXREGS, 24, 4) 2279 FIELD(DBGDEVID, CIDMASK, 28, 4) 2280 2281 FIELD(MVFR0, SIMDREG, 0, 4) 2282 FIELD(MVFR0, FPSP, 4, 4) 2283 FIELD(MVFR0, FPDP, 8, 4) 2284 FIELD(MVFR0, FPTRAP, 12, 4) 2285 FIELD(MVFR0, FPDIVIDE, 16, 4) 2286 FIELD(MVFR0, FPSQRT, 20, 4) 2287 FIELD(MVFR0, FPSHVEC, 24, 4) 2288 FIELD(MVFR0, FPROUND, 28, 4) 2289 2290 FIELD(MVFR1, FPFTZ, 0, 4) 2291 FIELD(MVFR1, FPDNAN, 4, 4) 2292 FIELD(MVFR1, SIMDLS, 8, 4) /* A-profile only */ 2293 FIELD(MVFR1, SIMDINT, 12, 4) /* A-profile only */ 2294 FIELD(MVFR1, SIMDSP, 16, 4) /* A-profile only */ 2295 FIELD(MVFR1, SIMDHP, 20, 4) /* A-profile only */ 2296 FIELD(MVFR1, MVE, 8, 4) /* M-profile only */ 2297 FIELD(MVFR1, FP16, 20, 4) /* M-profile only */ 2298 FIELD(MVFR1, FPHP, 24, 4) 2299 FIELD(MVFR1, SIMDFMAC, 28, 4) 2300 2301 FIELD(MVFR2, SIMDMISC, 0, 4) 2302 FIELD(MVFR2, FPMISC, 4, 4) 2303 2304 FIELD(GPCCR, PPS, 0, 3) 2305 FIELD(GPCCR, IRGN, 8, 2) 2306 FIELD(GPCCR, ORGN, 10, 2) 2307 FIELD(GPCCR, SH, 12, 2) 2308 FIELD(GPCCR, PGS, 14, 2) 2309 FIELD(GPCCR, GPC, 16, 1) 2310 FIELD(GPCCR, GPCP, 17, 1) 2311 FIELD(GPCCR, L0GPTSZ, 20, 4) 2312 2313 FIELD(MFAR, FPA, 12, 40) 2314 FIELD(MFAR, NSE, 62, 1) 2315 FIELD(MFAR, NS, 63, 1) 2316 2317 QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK); 2318 2319 /* If adding a feature bit which corresponds to a Linux ELF 2320 * HWCAP bit, remember to update the feature-bit-to-hwcap 2321 * mapping in linux-user/elfload.c:get_elf_hwcap(). 2322 */ 2323 enum arm_features { 2324 ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */ 2325 ARM_FEATURE_XSCALE, /* Intel XScale extensions. */ 2326 ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */ 2327 ARM_FEATURE_V6, 2328 ARM_FEATURE_V6K, 2329 ARM_FEATURE_V7, 2330 ARM_FEATURE_THUMB2, 2331 ARM_FEATURE_PMSA, /* no MMU; may have Memory Protection Unit */ 2332 ARM_FEATURE_NEON, 2333 ARM_FEATURE_M, /* Microcontroller profile. */ 2334 ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ 2335 ARM_FEATURE_THUMB2EE, 2336 ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */ 2337 ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */ 2338 ARM_FEATURE_V4T, 2339 ARM_FEATURE_V5, 2340 ARM_FEATURE_STRONGARM, 2341 ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ 2342 ARM_FEATURE_GENERIC_TIMER, 2343 ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ 2344 ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */ 2345 ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */ 2346 ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */ 2347 ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */ 2348 ARM_FEATURE_MPIDR, /* has cp15 MPIDR */ 2349 ARM_FEATURE_LPAE, /* has Large Physical Address Extension */ 2350 ARM_FEATURE_V8, 2351 ARM_FEATURE_AARCH64, /* supports 64 bit mode */ 2352 ARM_FEATURE_CBAR, /* has cp15 CBAR */ 2353 ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ 2354 ARM_FEATURE_EL2, /* has EL2 Virtualization support */ 2355 ARM_FEATURE_EL3, /* has EL3 Secure monitor support */ 2356 ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */ 2357 ARM_FEATURE_PMU, /* has PMU support */ 2358 ARM_FEATURE_VBAR, /* has cp15 VBAR */ 2359 ARM_FEATURE_M_SECURITY, /* M profile Security Extension */ 2360 ARM_FEATURE_M_MAIN, /* M profile Main Extension */ 2361 ARM_FEATURE_V8_1M, /* M profile extras only in v8.1M and later */ 2362 }; 2363 2364 static inline int arm_feature(CPUARMState *env, int feature) 2365 { 2366 return (env->features & (1ULL << feature)) != 0; 2367 } 2368 2369 void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp); 2370 2371 /* 2372 * ARM v9 security states. 2373 * The ordering of the enumeration corresponds to the low 2 bits 2374 * of the GPI value, and (except for Root) the concat of NSE:NS. 2375 */ 2376 2377 typedef enum ARMSecuritySpace { 2378 ARMSS_Secure = 0, 2379 ARMSS_NonSecure = 1, 2380 ARMSS_Root = 2, 2381 ARMSS_Realm = 3, 2382 } ARMSecuritySpace; 2383 2384 /* Return true if @space is secure, in the pre-v9 sense. */ 2385 static inline bool arm_space_is_secure(ARMSecuritySpace space) 2386 { 2387 return space == ARMSS_Secure || space == ARMSS_Root; 2388 } 2389 2390 /* Return the ARMSecuritySpace for @secure, assuming !RME or EL[0-2]. */ 2391 static inline ARMSecuritySpace arm_secure_to_space(bool secure) 2392 { 2393 return secure ? ARMSS_Secure : ARMSS_NonSecure; 2394 } 2395 2396 #if !defined(CONFIG_USER_ONLY) 2397 /** 2398 * arm_security_space_below_el3: 2399 * @env: cpu context 2400 * 2401 * Return the security space of exception levels below EL3, following 2402 * an exception return to those levels. Unlike arm_security_space, 2403 * this doesn't care about the current EL. 2404 */ 2405 ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env); 2406 2407 /** 2408 * arm_is_secure_below_el3: 2409 * @env: cpu context 2410 * 2411 * Return true if exception levels below EL3 are in secure state, 2412 * or would be following an exception return to those levels. 2413 */ 2414 static inline bool arm_is_secure_below_el3(CPUARMState *env) 2415 { 2416 ARMSecuritySpace ss = arm_security_space_below_el3(env); 2417 return ss == ARMSS_Secure; 2418 } 2419 2420 /* Return true if the CPU is AArch64 EL3 or AArch32 Mon */ 2421 static inline bool arm_is_el3_or_mon(CPUARMState *env) 2422 { 2423 assert(!arm_feature(env, ARM_FEATURE_M)); 2424 if (arm_feature(env, ARM_FEATURE_EL3)) { 2425 if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) { 2426 /* CPU currently in AArch64 state and EL3 */ 2427 return true; 2428 } else if (!is_a64(env) && 2429 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 2430 /* CPU currently in AArch32 state and monitor mode */ 2431 return true; 2432 } 2433 } 2434 return false; 2435 } 2436 2437 /** 2438 * arm_security_space: 2439 * @env: cpu context 2440 * 2441 * Return the current security space of the cpu. 2442 */ 2443 ARMSecuritySpace arm_security_space(CPUARMState *env); 2444 2445 /** 2446 * arm_is_secure: 2447 * @env: cpu context 2448 * 2449 * Return true if the processor is in secure state. 2450 */ 2451 static inline bool arm_is_secure(CPUARMState *env) 2452 { 2453 return arm_space_is_secure(arm_security_space(env)); 2454 } 2455 2456 /* 2457 * Return true if the current security state has AArch64 EL2 or AArch32 Hyp. 2458 * This corresponds to the pseudocode EL2Enabled(). 2459 */ 2460 static inline bool arm_is_el2_enabled_secstate(CPUARMState *env, 2461 ARMSecuritySpace space) 2462 { 2463 assert(space != ARMSS_Root); 2464 return arm_feature(env, ARM_FEATURE_EL2) 2465 && (space != ARMSS_Secure || (env->cp15.scr_el3 & SCR_EEL2)); 2466 } 2467 2468 static inline bool arm_is_el2_enabled(CPUARMState *env) 2469 { 2470 return arm_is_el2_enabled_secstate(env, arm_security_space_below_el3(env)); 2471 } 2472 2473 #else 2474 static inline ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env) 2475 { 2476 return ARMSS_NonSecure; 2477 } 2478 2479 static inline bool arm_is_secure_below_el3(CPUARMState *env) 2480 { 2481 return false; 2482 } 2483 2484 static inline ARMSecuritySpace arm_security_space(CPUARMState *env) 2485 { 2486 return ARMSS_NonSecure; 2487 } 2488 2489 static inline bool arm_is_secure(CPUARMState *env) 2490 { 2491 return false; 2492 } 2493 2494 static inline bool arm_is_el2_enabled_secstate(CPUARMState *env, 2495 ARMSecuritySpace space) 2496 { 2497 return false; 2498 } 2499 2500 static inline bool arm_is_el2_enabled(CPUARMState *env) 2501 { 2502 return false; 2503 } 2504 #endif 2505 2506 /** 2507 * arm_hcr_el2_eff(): Return the effective value of HCR_EL2. 2508 * E.g. when in secure state, fields in HCR_EL2 are suppressed, 2509 * "for all purposes other than a direct read or write access of HCR_EL2." 2510 * Not included here is HCR_RW. 2511 */ 2512 uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space); 2513 uint64_t arm_hcr_el2_eff(CPUARMState *env); 2514 uint64_t arm_hcrx_el2_eff(CPUARMState *env); 2515 2516 /* Return true if the specified exception level is running in AArch64 state. */ 2517 static inline bool arm_el_is_aa64(CPUARMState *env, int el) 2518 { 2519 /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, 2520 * and if we're not in EL0 then the state of EL0 isn't well defined.) 2521 */ 2522 assert(el >= 1 && el <= 3); 2523 bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); 2524 2525 /* The highest exception level is always at the maximum supported 2526 * register width, and then lower levels have a register width controlled 2527 * by bits in the SCR or HCR registers. 2528 */ 2529 if (el == 3) { 2530 return aa64; 2531 } 2532 2533 if (arm_feature(env, ARM_FEATURE_EL3) && 2534 ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) { 2535 aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW); 2536 } 2537 2538 if (el == 2) { 2539 return aa64; 2540 } 2541 2542 if (arm_is_el2_enabled(env)) { 2543 aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); 2544 } 2545 2546 return aa64; 2547 } 2548 2549 /* Function for determining whether guest cp register reads and writes should 2550 * access the secure or non-secure bank of a cp register. When EL3 is 2551 * operating in AArch32 state, the NS-bit determines whether the secure 2552 * instance of a cp register should be used. When EL3 is AArch64 (or if 2553 * it doesn't exist at all) then there is no register banking, and all 2554 * accesses are to the non-secure version. 2555 */ 2556 static inline bool access_secure_reg(CPUARMState *env) 2557 { 2558 bool ret = (arm_feature(env, ARM_FEATURE_EL3) && 2559 !arm_el_is_aa64(env, 3) && 2560 !(env->cp15.scr_el3 & SCR_NS)); 2561 2562 return ret; 2563 } 2564 2565 /* Macros for accessing a specified CP register bank */ 2566 #define A32_BANKED_REG_GET(_env, _regname, _secure) \ 2567 ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns) 2568 2569 #define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \ 2570 do { \ 2571 if (_secure) { \ 2572 (_env)->cp15._regname##_s = (_val); \ 2573 } else { \ 2574 (_env)->cp15._regname##_ns = (_val); \ 2575 } \ 2576 } while (0) 2577 2578 /* Macros for automatically accessing a specific CP register bank depending on 2579 * the current secure state of the system. These macros are not intended for 2580 * supporting instruction translation reads/writes as these are dependent 2581 * solely on the SCR.NS bit and not the mode. 2582 */ 2583 #define A32_BANKED_CURRENT_REG_GET(_env, _regname) \ 2584 A32_BANKED_REG_GET((_env), _regname, \ 2585 (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3))) 2586 2587 #define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \ 2588 A32_BANKED_REG_SET((_env), _regname, \ 2589 (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \ 2590 (_val)) 2591 2592 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 2593 uint32_t cur_el, bool secure); 2594 2595 /* Return the highest implemented Exception Level */ 2596 static inline int arm_highest_el(CPUARMState *env) 2597 { 2598 if (arm_feature(env, ARM_FEATURE_EL3)) { 2599 return 3; 2600 } 2601 if (arm_feature(env, ARM_FEATURE_EL2)) { 2602 return 2; 2603 } 2604 return 1; 2605 } 2606 2607 /* Return true if a v7M CPU is in Handler mode */ 2608 static inline bool arm_v7m_is_handler_mode(CPUARMState *env) 2609 { 2610 return env->v7m.exception != 0; 2611 } 2612 2613 /* Return the current Exception Level (as per ARMv8; note that this differs 2614 * from the ARMv7 Privilege Level). 2615 */ 2616 static inline int arm_current_el(CPUARMState *env) 2617 { 2618 if (arm_feature(env, ARM_FEATURE_M)) { 2619 return arm_v7m_is_handler_mode(env) || 2620 !(env->v7m.control[env->v7m.secure] & 1); 2621 } 2622 2623 if (is_a64(env)) { 2624 return extract32(env->pstate, 2, 2); 2625 } 2626 2627 switch (env->uncached_cpsr & 0x1f) { 2628 case ARM_CPU_MODE_USR: 2629 return 0; 2630 case ARM_CPU_MODE_HYP: 2631 return 2; 2632 case ARM_CPU_MODE_MON: 2633 return 3; 2634 default: 2635 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 2636 /* If EL3 is 32-bit then all secure privileged modes run in 2637 * EL3 2638 */ 2639 return 3; 2640 } 2641 2642 return 1; 2643 } 2644 } 2645 2646 /** 2647 * write_list_to_cpustate 2648 * @cpu: ARMCPU 2649 * 2650 * For each register listed in the ARMCPU cpreg_indexes list, write 2651 * its value from the cpreg_values list into the ARMCPUState structure. 2652 * This updates TCG's working data structures from KVM data or 2653 * from incoming migration state. 2654 * 2655 * Returns: true if all register values were updated correctly, 2656 * false if some register was unknown or could not be written. 2657 * Note that we do not stop early on failure -- we will attempt 2658 * writing all registers in the list. 2659 */ 2660 bool write_list_to_cpustate(ARMCPU *cpu); 2661 2662 /** 2663 * write_cpustate_to_list: 2664 * @cpu: ARMCPU 2665 * @kvm_sync: true if this is for syncing back to KVM 2666 * 2667 * For each register listed in the ARMCPU cpreg_indexes list, write 2668 * its value from the ARMCPUState structure into the cpreg_values list. 2669 * This is used to copy info from TCG's working data structures into 2670 * KVM or for outbound migration. 2671 * 2672 * @kvm_sync is true if we are doing this in order to sync the 2673 * register state back to KVM. In this case we will only update 2674 * values in the list if the previous list->cpustate sync actually 2675 * successfully wrote the CPU state. Otherwise we will keep the value 2676 * that is in the list. 2677 * 2678 * Returns: true if all register values were read correctly, 2679 * false if some register was unknown or could not be read. 2680 * Note that we do not stop early on failure -- we will attempt 2681 * reading all registers in the list. 2682 */ 2683 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); 2684 2685 #define ARM_CPUID_TI915T 0x54029152 2686 #define ARM_CPUID_TI925T 0x54029252 2687 2688 #define CPU_RESOLVING_TYPE TYPE_ARM_CPU 2689 2690 #define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU 2691 2692 /* ARM has the following "translation regimes" (as the ARM ARM calls them): 2693 * 2694 * If EL3 is 64-bit: 2695 * + NonSecure EL1 & 0 stage 1 2696 * + NonSecure EL1 & 0 stage 2 2697 * + NonSecure EL2 2698 * + NonSecure EL2 & 0 (ARMv8.1-VHE) 2699 * + Secure EL1 & 0 2700 * + Secure EL3 2701 * If EL3 is 32-bit: 2702 * + NonSecure PL1 & 0 stage 1 2703 * + NonSecure PL1 & 0 stage 2 2704 * + NonSecure PL2 2705 * + Secure PL0 2706 * + Secure PL1 2707 * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.) 2708 * 2709 * For QEMU, an mmu_idx is not quite the same as a translation regime because: 2710 * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes, 2711 * because they may differ in access permissions even if the VA->PA map is 2712 * the same 2713 * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2 2714 * translation, which means that we have one mmu_idx that deals with two 2715 * concatenated translation regimes [this sort of combined s1+2 TLB is 2716 * architecturally permitted] 2717 * 3. we don't need to allocate an mmu_idx to translations that we won't be 2718 * handling via the TLB. The only way to do a stage 1 translation without 2719 * the immediate stage 2 translation is via the ATS or AT system insns, 2720 * which can be slow-pathed and always do a page table walk. 2721 * The only use of stage 2 translations is either as part of an s1+2 2722 * lookup or when loading the descriptors during a stage 1 page table walk, 2723 * and in both those cases we don't use the TLB. 2724 * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3" 2725 * translation regimes, because they map reasonably well to each other 2726 * and they can't both be active at the same time. 2727 * 5. we want to be able to use the TLB for accesses done as part of a 2728 * stage1 page table walk, rather than having to walk the stage2 page 2729 * table over and over. 2730 * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access 2731 * Never (PAN) bit within PSTATE. 2732 * 7. we fold together the secure and non-secure regimes for A-profile, 2733 * because there are no banked system registers for aarch64, so the 2734 * process of switching between secure and non-secure is 2735 * already heavyweight. 2736 * 2737 * This gives us the following list of cases: 2738 * 2739 * EL0 EL1&0 stage 1+2 (aka NS PL0) 2740 * EL1 EL1&0 stage 1+2 (aka NS PL1) 2741 * EL1 EL1&0 stage 1+2 +PAN 2742 * EL0 EL2&0 2743 * EL2 EL2&0 2744 * EL2 EL2&0 +PAN 2745 * EL2 (aka NS PL2) 2746 * EL3 (aka S PL1) 2747 * Physical (NS & S) 2748 * Stage2 (NS & S) 2749 * 2750 * for a total of 12 different mmu_idx. 2751 * 2752 * R profile CPUs have an MPU, but can use the same set of MMU indexes 2753 * as A profile. They only need to distinguish EL0 and EL1 (and 2754 * EL2 if we ever model a Cortex-R52). 2755 * 2756 * M profile CPUs are rather different as they do not have a true MMU. 2757 * They have the following different MMU indexes: 2758 * User 2759 * Privileged 2760 * User, execution priority negative (ie the MPU HFNMIENA bit may apply) 2761 * Privileged, execution priority negative (ditto) 2762 * If the CPU supports the v8M Security Extension then there are also: 2763 * Secure User 2764 * Secure Privileged 2765 * Secure User, execution priority negative 2766 * Secure Privileged, execution priority negative 2767 * 2768 * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code 2769 * are not quite the same -- different CPU types (most notably M profile 2770 * vs A/R profile) would like to use MMU indexes with different semantics, 2771 * but since we don't ever need to use all of those in a single CPU we 2772 * can avoid having to set NB_MMU_MODES to "total number of A profile MMU 2773 * modes + total number of M profile MMU modes". The lower bits of 2774 * ARMMMUIdx are the core TLB mmu index, and the higher bits are always 2775 * the same for any particular CPU. 2776 * Variables of type ARMMUIdx are always full values, and the core 2777 * index values are in variables of type 'int'. 2778 * 2779 * Our enumeration includes at the end some entries which are not "true" 2780 * mmu_idx values in that they don't have corresponding TLBs and are only 2781 * valid for doing slow path page table walks. 2782 * 2783 * The constant names here are patterned after the general style of the names 2784 * of the AT/ATS operations. 2785 * The values used are carefully arranged to make mmu_idx => EL lookup easy. 2786 * For M profile we arrange them to have a bit for priv, a bit for negpri 2787 * and a bit for secure. 2788 */ 2789 #define ARM_MMU_IDX_A 0x10 /* A profile */ 2790 #define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ 2791 #define ARM_MMU_IDX_M 0x40 /* M profile */ 2792 2793 /* Meanings of the bits for M profile mmu idx values */ 2794 #define ARM_MMU_IDX_M_PRIV 0x1 2795 #define ARM_MMU_IDX_M_NEGPRI 0x2 2796 #define ARM_MMU_IDX_M_S 0x4 /* Secure */ 2797 2798 #define ARM_MMU_IDX_TYPE_MASK \ 2799 (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB) 2800 #define ARM_MMU_IDX_COREIDX_MASK 0xf 2801 2802 typedef enum ARMMMUIdx { 2803 /* 2804 * A-profile. 2805 */ 2806 ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A, 2807 ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A, 2808 ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A, 2809 ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A, 2810 ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A, 2811 ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A, 2812 ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A, 2813 ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A, 2814 2815 /* 2816 * Used for second stage of an S12 page table walk, or for descriptor 2817 * loads during first stage of an S1 page table walk. Note that both 2818 * are in use simultaneously for SecureEL2: the security state for 2819 * the S2 ptw is selected by the NS bit from the S1 ptw. 2820 */ 2821 ARMMMUIdx_Stage2_S = 8 | ARM_MMU_IDX_A, 2822 ARMMMUIdx_Stage2 = 9 | ARM_MMU_IDX_A, 2823 2824 /* TLBs with 1-1 mapping to the physical address spaces. */ 2825 ARMMMUIdx_Phys_S = 10 | ARM_MMU_IDX_A, 2826 ARMMMUIdx_Phys_NS = 11 | ARM_MMU_IDX_A, 2827 ARMMMUIdx_Phys_Root = 12 | ARM_MMU_IDX_A, 2828 ARMMMUIdx_Phys_Realm = 13 | ARM_MMU_IDX_A, 2829 2830 /* 2831 * These are not allocated TLBs and are used only for AT system 2832 * instructions or for the first stage of an S12 page table walk. 2833 */ 2834 ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB, 2835 ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB, 2836 ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB, 2837 2838 /* 2839 * M-profile. 2840 */ 2841 ARMMMUIdx_MUser = ARM_MMU_IDX_M, 2842 ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV, 2843 ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI, 2844 ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI, 2845 ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S, 2846 ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S, 2847 ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S, 2848 ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S, 2849 } ARMMMUIdx; 2850 2851 /* 2852 * Bit macros for the core-mmu-index values for each index, 2853 * for use when calling tlb_flush_by_mmuidx() and friends. 2854 */ 2855 #define TO_CORE_BIT(NAME) \ 2856 ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK) 2857 2858 typedef enum ARMMMUIdxBit { 2859 TO_CORE_BIT(E10_0), 2860 TO_CORE_BIT(E20_0), 2861 TO_CORE_BIT(E10_1), 2862 TO_CORE_BIT(E10_1_PAN), 2863 TO_CORE_BIT(E2), 2864 TO_CORE_BIT(E20_2), 2865 TO_CORE_BIT(E20_2_PAN), 2866 TO_CORE_BIT(E3), 2867 TO_CORE_BIT(Stage2), 2868 TO_CORE_BIT(Stage2_S), 2869 2870 TO_CORE_BIT(MUser), 2871 TO_CORE_BIT(MPriv), 2872 TO_CORE_BIT(MUserNegPri), 2873 TO_CORE_BIT(MPrivNegPri), 2874 TO_CORE_BIT(MSUser), 2875 TO_CORE_BIT(MSPriv), 2876 TO_CORE_BIT(MSUserNegPri), 2877 TO_CORE_BIT(MSPrivNegPri), 2878 } ARMMMUIdxBit; 2879 2880 #undef TO_CORE_BIT 2881 2882 #define MMU_USER_IDX 0 2883 2884 /* Indexes used when registering address spaces with cpu_address_space_init */ 2885 typedef enum ARMASIdx { 2886 ARMASIdx_NS = 0, 2887 ARMASIdx_S = 1, 2888 ARMASIdx_TagNS = 2, 2889 ARMASIdx_TagS = 3, 2890 } ARMASIdx; 2891 2892 static inline ARMMMUIdx arm_space_to_phys(ARMSecuritySpace space) 2893 { 2894 /* Assert the relative order of the physical mmu indexes. */ 2895 QEMU_BUILD_BUG_ON(ARMSS_Secure != 0); 2896 QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS != ARMMMUIdx_Phys_S + ARMSS_NonSecure); 2897 QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_Root != ARMMMUIdx_Phys_S + ARMSS_Root); 2898 QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_Realm != ARMMMUIdx_Phys_S + ARMSS_Realm); 2899 2900 return ARMMMUIdx_Phys_S + space; 2901 } 2902 2903 static inline ARMSecuritySpace arm_phys_to_space(ARMMMUIdx idx) 2904 { 2905 assert(idx >= ARMMMUIdx_Phys_S && idx <= ARMMMUIdx_Phys_Realm); 2906 return idx - ARMMMUIdx_Phys_S; 2907 } 2908 2909 static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu) 2910 { 2911 /* If all the CLIDR.Ctypem bits are 0 there are no caches, and 2912 * CSSELR is RAZ/WI. 2913 */ 2914 return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0; 2915 } 2916 2917 static inline bool arm_sctlr_b(CPUARMState *env) 2918 { 2919 return 2920 /* We need not implement SCTLR.ITD in user-mode emulation, so 2921 * let linux-user ignore the fact that it conflicts with SCTLR_B. 2922 * This lets people run BE32 binaries with "-cpu any". 2923 */ 2924 #ifndef CONFIG_USER_ONLY 2925 !arm_feature(env, ARM_FEATURE_V7) && 2926 #endif 2927 (env->cp15.sctlr_el[1] & SCTLR_B) != 0; 2928 } 2929 2930 uint64_t arm_sctlr(CPUARMState *env, int el); 2931 2932 static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, 2933 bool sctlr_b) 2934 { 2935 #ifdef CONFIG_USER_ONLY 2936 /* 2937 * In system mode, BE32 is modelled in line with the 2938 * architecture (as word-invariant big-endianness), where loads 2939 * and stores are done little endian but from addresses which 2940 * are adjusted by XORing with the appropriate constant. So the 2941 * endianness to use for the raw data access is not affected by 2942 * SCTLR.B. 2943 * In user mode, however, we model BE32 as byte-invariant 2944 * big-endianness (because user-only code cannot tell the 2945 * difference), and so we need to use a data access endianness 2946 * that depends on SCTLR.B. 2947 */ 2948 if (sctlr_b) { 2949 return true; 2950 } 2951 #endif 2952 /* In 32bit endianness is determined by looking at CPSR's E bit */ 2953 return env->uncached_cpsr & CPSR_E; 2954 } 2955 2956 static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) 2957 { 2958 return sctlr & (el ? SCTLR_EE : SCTLR_E0E); 2959 } 2960 2961 /* Return true if the processor is in big-endian mode. */ 2962 static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) 2963 { 2964 if (!is_a64(env)) { 2965 return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); 2966 } else { 2967 int cur_el = arm_current_el(env); 2968 uint64_t sctlr = arm_sctlr(env, cur_el); 2969 return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); 2970 } 2971 } 2972 2973 #include "exec/cpu-all.h" 2974 2975 /* 2976 * We have more than 32-bits worth of state per TB, so we split the data 2977 * between tb->flags and tb->cs_base, which is otherwise unused for ARM. 2978 * We collect these two parts in CPUARMTBFlags where they are named 2979 * flags and flags2 respectively. 2980 * 2981 * The flags that are shared between all execution modes, TBFLAG_ANY, 2982 * are stored in flags. The flags that are specific to a given mode 2983 * are stores in flags2. Since cs_base is sized on the configured 2984 * address size, flags2 always has 64-bits for A64, and a minimum of 2985 * 32-bits for A32 and M32. 2986 * 2987 * The bits for 32-bit A-profile and M-profile partially overlap: 2988 * 2989 * 31 23 11 10 0 2990 * +-------------+----------+----------------+ 2991 * | | | TBFLAG_A32 | 2992 * | TBFLAG_AM32 | +-----+----------+ 2993 * | | |TBFLAG_M32| 2994 * +-------------+----------------+----------+ 2995 * 31 23 6 5 0 2996 * 2997 * Unless otherwise noted, these bits are cached in env->hflags. 2998 */ 2999 FIELD(TBFLAG_ANY, AARCH64_STATE, 0, 1) 3000 FIELD(TBFLAG_ANY, SS_ACTIVE, 1, 1) 3001 FIELD(TBFLAG_ANY, PSTATE__SS, 2, 1) /* Not cached. */ 3002 FIELD(TBFLAG_ANY, BE_DATA, 3, 1) 3003 FIELD(TBFLAG_ANY, MMUIDX, 4, 4) 3004 /* Target EL if we take a floating-point-disabled exception */ 3005 FIELD(TBFLAG_ANY, FPEXC_EL, 8, 2) 3006 /* Memory operations require alignment: SCTLR_ELx.A or CCR.UNALIGN_TRP */ 3007 FIELD(TBFLAG_ANY, ALIGN_MEM, 10, 1) 3008 FIELD(TBFLAG_ANY, PSTATE__IL, 11, 1) 3009 FIELD(TBFLAG_ANY, FGT_ACTIVE, 12, 1) 3010 FIELD(TBFLAG_ANY, FGT_SVC, 13, 1) 3011 3012 /* 3013 * Bit usage when in AArch32 state, both A- and M-profile. 3014 */ 3015 FIELD(TBFLAG_AM32, CONDEXEC, 24, 8) /* Not cached. */ 3016 FIELD(TBFLAG_AM32, THUMB, 23, 1) /* Not cached. */ 3017 3018 /* 3019 * Bit usage when in AArch32 state, for A-profile only. 3020 */ 3021 FIELD(TBFLAG_A32, VECLEN, 0, 3) /* Not cached. */ 3022 FIELD(TBFLAG_A32, VECSTRIDE, 3, 2) /* Not cached. */ 3023 /* 3024 * We store the bottom two bits of the CPAR as TB flags and handle 3025 * checks on the other bits at runtime. This shares the same bits as 3026 * VECSTRIDE, which is OK as no XScale CPU has VFP. 3027 * Not cached, because VECLEN+VECSTRIDE are not cached. 3028 */ 3029 FIELD(TBFLAG_A32, XSCALE_CPAR, 5, 2) 3030 FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */ 3031 FIELD(TBFLAG_A32, SCTLR__B, 8, 1) /* Cannot overlap with SCTLR_B */ 3032 FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1) 3033 /* 3034 * Indicates whether cp register reads and writes by guest code should access 3035 * the secure or nonsecure bank of banked registers; note that this is not 3036 * the same thing as the current security state of the processor! 3037 */ 3038 FIELD(TBFLAG_A32, NS, 10, 1) 3039 /* 3040 * Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. 3041 * This requires an SME trap from AArch32 mode when using NEON. 3042 */ 3043 FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1) 3044 3045 /* 3046 * Bit usage when in AArch32 state, for M-profile only. 3047 */ 3048 /* Handler (ie not Thread) mode */ 3049 FIELD(TBFLAG_M32, HANDLER, 0, 1) 3050 /* Whether we should generate stack-limit checks */ 3051 FIELD(TBFLAG_M32, STACKCHECK, 1, 1) 3052 /* Set if FPCCR.LSPACT is set */ 3053 FIELD(TBFLAG_M32, LSPACT, 2, 1) /* Not cached. */ 3054 /* Set if we must create a new FP context */ 3055 FIELD(TBFLAG_M32, NEW_FP_CTXT_NEEDED, 3, 1) /* Not cached. */ 3056 /* Set if FPCCR.S does not match current security state */ 3057 FIELD(TBFLAG_M32, FPCCR_S_WRONG, 4, 1) /* Not cached. */ 3058 /* Set if MVE insns are definitely not predicated by VPR or LTPSIZE */ 3059 FIELD(TBFLAG_M32, MVE_NO_PRED, 5, 1) /* Not cached. */ 3060 /* Set if in secure mode */ 3061 FIELD(TBFLAG_M32, SECURE, 6, 1) 3062 3063 /* 3064 * Bit usage when in AArch64 state 3065 */ 3066 FIELD(TBFLAG_A64, TBII, 0, 2) 3067 FIELD(TBFLAG_A64, SVEEXC_EL, 2, 2) 3068 /* The current vector length, either NVL or SVL. */ 3069 FIELD(TBFLAG_A64, VL, 4, 4) 3070 FIELD(TBFLAG_A64, PAUTH_ACTIVE, 8, 1) 3071 FIELD(TBFLAG_A64, BT, 9, 1) 3072 FIELD(TBFLAG_A64, BTYPE, 10, 2) /* Not cached. */ 3073 FIELD(TBFLAG_A64, TBID, 12, 2) 3074 FIELD(TBFLAG_A64, UNPRIV, 14, 1) 3075 FIELD(TBFLAG_A64, ATA, 15, 1) 3076 FIELD(TBFLAG_A64, TCMA, 16, 2) 3077 FIELD(TBFLAG_A64, MTE_ACTIVE, 18, 1) 3078 FIELD(TBFLAG_A64, MTE0_ACTIVE, 19, 1) 3079 FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2) 3080 FIELD(TBFLAG_A64, PSTATE_SM, 22, 1) 3081 FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1) 3082 FIELD(TBFLAG_A64, SVL, 24, 4) 3083 /* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */ 3084 FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1) 3085 FIELD(TBFLAG_A64, TRAP_ERET, 29, 1) 3086 FIELD(TBFLAG_A64, NAA, 30, 1) 3087 FIELD(TBFLAG_A64, ATA0, 31, 1) 3088 FIELD(TBFLAG_A64, NV, 32, 1) 3089 FIELD(TBFLAG_A64, NV1, 33, 1) 3090 FIELD(TBFLAG_A64, NV2, 34, 1) 3091 /* Set if FEAT_NV2 RAM accesses use the EL2&0 translation regime */ 3092 FIELD(TBFLAG_A64, NV2_MEM_E20, 35, 1) 3093 /* Set if FEAT_NV2 RAM accesses are big-endian */ 3094 FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1) 3095 3096 /* 3097 * Helpers for using the above. Note that only the A64 accessors use 3098 * FIELD_DP64() and FIELD_EX64(), because in the other cases the flags 3099 * word either is or might be 32 bits only. 3100 */ 3101 #define DP_TBFLAG_ANY(DST, WHICH, VAL) \ 3102 (DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL)) 3103 #define DP_TBFLAG_A64(DST, WHICH, VAL) \ 3104 (DST.flags2 = FIELD_DP64(DST.flags2, TBFLAG_A64, WHICH, VAL)) 3105 #define DP_TBFLAG_A32(DST, WHICH, VAL) \ 3106 (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A32, WHICH, VAL)) 3107 #define DP_TBFLAG_M32(DST, WHICH, VAL) \ 3108 (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_M32, WHICH, VAL)) 3109 #define DP_TBFLAG_AM32(DST, WHICH, VAL) \ 3110 (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_AM32, WHICH, VAL)) 3111 3112 #define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH) 3113 #define EX_TBFLAG_A64(IN, WHICH) FIELD_EX64(IN.flags2, TBFLAG_A64, WHICH) 3114 #define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A32, WHICH) 3115 #define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_M32, WHICH) 3116 #define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_AM32, WHICH) 3117 3118 /** 3119 * sve_vq 3120 * @env: the cpu context 3121 * 3122 * Return the VL cached within env->hflags, in units of quadwords. 3123 */ 3124 static inline int sve_vq(CPUARMState *env) 3125 { 3126 return EX_TBFLAG_A64(env->hflags, VL) + 1; 3127 } 3128 3129 /** 3130 * sme_vq 3131 * @env: the cpu context 3132 * 3133 * Return the SVL cached within env->hflags, in units of quadwords. 3134 */ 3135 static inline int sme_vq(CPUARMState *env) 3136 { 3137 return EX_TBFLAG_A64(env->hflags, SVL) + 1; 3138 } 3139 3140 static inline bool bswap_code(bool sctlr_b) 3141 { 3142 #ifdef CONFIG_USER_ONLY 3143 /* BE8 (SCTLR.B = 0, TARGET_BIG_ENDIAN = 1) is mixed endian. 3144 * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_BIG_ENDIAN=0 3145 * would also end up as a mixed-endian mode with BE code, LE data. 3146 */ 3147 return TARGET_BIG_ENDIAN ^ sctlr_b; 3148 #else 3149 /* All code access in ARM is little endian, and there are no loaders 3150 * doing swaps that need to be reversed 3151 */ 3152 return 0; 3153 #endif 3154 } 3155 3156 #ifdef CONFIG_USER_ONLY 3157 static inline bool arm_cpu_bswap_data(CPUARMState *env) 3158 { 3159 return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env); 3160 } 3161 #endif 3162 3163 void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc, 3164 uint64_t *cs_base, uint32_t *flags); 3165 3166 enum { 3167 QEMU_PSCI_CONDUIT_DISABLED = 0, 3168 QEMU_PSCI_CONDUIT_SMC = 1, 3169 QEMU_PSCI_CONDUIT_HVC = 2, 3170 }; 3171 3172 #ifndef CONFIG_USER_ONLY 3173 /* Return the address space index to use for a memory access */ 3174 static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) 3175 { 3176 return attrs.secure ? ARMASIdx_S : ARMASIdx_NS; 3177 } 3178 3179 /* Return the AddressSpace to use for a memory access 3180 * (which depends on whether the access is S or NS, and whether 3181 * the board gave us a separate AddressSpace for S accesses). 3182 */ 3183 static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs) 3184 { 3185 return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs)); 3186 } 3187 #endif 3188 3189 /** 3190 * arm_register_pre_el_change_hook: 3191 * Register a hook function which will be called immediately before this 3192 * CPU changes exception level or mode. The hook function will be 3193 * passed a pointer to the ARMCPU and the opaque data pointer passed 3194 * to this function when the hook was registered. 3195 * 3196 * Note that if a pre-change hook is called, any registered post-change hooks 3197 * are guaranteed to subsequently be called. 3198 */ 3199 void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, 3200 void *opaque); 3201 /** 3202 * arm_register_el_change_hook: 3203 * Register a hook function which will be called immediately after this 3204 * CPU changes exception level or mode. The hook function will be 3205 * passed a pointer to the ARMCPU and the opaque data pointer passed 3206 * to this function when the hook was registered. 3207 * 3208 * Note that any registered hooks registered here are guaranteed to be called 3209 * if pre-change hooks have been. 3210 */ 3211 void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void 3212 *opaque); 3213 3214 /** 3215 * arm_rebuild_hflags: 3216 * Rebuild the cached TBFLAGS for arbitrary changed processor state. 3217 */ 3218 void arm_rebuild_hflags(CPUARMState *env); 3219 3220 /** 3221 * aa32_vfp_dreg: 3222 * Return a pointer to the Dn register within env in 32-bit mode. 3223 */ 3224 static inline uint64_t *aa32_vfp_dreg(CPUARMState *env, unsigned regno) 3225 { 3226 return &env->vfp.zregs[regno >> 1].d[regno & 1]; 3227 } 3228 3229 /** 3230 * aa32_vfp_qreg: 3231 * Return a pointer to the Qn register within env in 32-bit mode. 3232 */ 3233 static inline uint64_t *aa32_vfp_qreg(CPUARMState *env, unsigned regno) 3234 { 3235 return &env->vfp.zregs[regno].d[0]; 3236 } 3237 3238 /** 3239 * aa64_vfp_qreg: 3240 * Return a pointer to the Qn register within env in 64-bit mode. 3241 */ 3242 static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno) 3243 { 3244 return &env->vfp.zregs[regno].d[0]; 3245 } 3246 3247 /* Shared between translate-sve.c and sve_helper.c. */ 3248 extern const uint64_t pred_esz_masks[5]; 3249 3250 /* 3251 * AArch64 usage of the PAGE_TARGET_* bits for linux-user. 3252 * Note that with the Linux kernel, PROT_MTE may not be cleared by mprotect 3253 * mprotect but PROT_BTI may be cleared. C.f. the kernel's VM_ARCH_CLEAR. 3254 */ 3255 #define PAGE_BTI PAGE_TARGET_1 3256 #define PAGE_MTE PAGE_TARGET_2 3257 #define PAGE_TARGET_STICKY PAGE_MTE 3258 3259 /* We associate one allocation tag per 16 bytes, the minimum. */ 3260 #define LOG2_TAG_GRANULE 4 3261 #define TAG_GRANULE (1 << LOG2_TAG_GRANULE) 3262 3263 #ifdef CONFIG_USER_ONLY 3264 #define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1)) 3265 #endif 3266 3267 #ifdef TARGET_TAGGED_ADDRESSES 3268 /** 3269 * cpu_untagged_addr: 3270 * @cs: CPU context 3271 * @x: tagged address 3272 * 3273 * Remove any address tag from @x. This is explicitly related to the 3274 * linux syscall TIF_TAGGED_ADDR setting, not TBI in general. 3275 * 3276 * There should be a better place to put this, but we need this in 3277 * include/exec/cpu_ldst.h, and not some place linux-user specific. 3278 */ 3279 static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x) 3280 { 3281 ARMCPU *cpu = ARM_CPU(cs); 3282 if (cpu->env.tagged_addr_enable) { 3283 /* 3284 * TBI is enabled for userspace but not kernelspace addresses. 3285 * Only clear the tag if bit 55 is clear. 3286 */ 3287 x &= sextract64(x, 0, 56); 3288 } 3289 return x; 3290 } 3291 #endif 3292 3293 #endif 3294