1 /* 2 * MIPS internal definitions and helpers 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2 or later. 5 * See the COPYING file in the top-level directory. 6 */ 7 8 #ifndef MIPS_INTERNAL_H 9 #define MIPS_INTERNAL_H 10 11 #include "exec/memattrs.h" 12 #ifdef CONFIG_TCG 13 #include "tcg/tcg-internal.h" 14 #endif 15 #include "cpu.h" 16 17 /* 18 * MMU types, the first four entries have the same layout as the 19 * CP0C0_MT field. 20 */ 21 enum mips_mmu_types { 22 MMU_TYPE_NONE = 0, 23 MMU_TYPE_R4000 = 1, /* Standard TLB */ 24 MMU_TYPE_BAT = 2, /* Block Address Translation */ 25 MMU_TYPE_FMT = 3, /* Fixed Mapping */ 26 MMU_TYPE_DVF = 4, /* Dual VTLB and FTLB */ 27 MMU_TYPE_R3000, 28 MMU_TYPE_R6000, 29 MMU_TYPE_R8000 30 }; 31 32 struct mips_def_t { 33 const char *name; 34 int32_t CP0_PRid; 35 int32_t CP0_Config0; 36 int32_t CP0_Config1; 37 int32_t CP0_Config2; 38 int32_t CP0_Config3; 39 int32_t CP0_Config4; 40 int32_t CP0_Config4_rw_bitmask; 41 int32_t CP0_Config5; 42 int32_t CP0_Config5_rw_bitmask; 43 int32_t CP0_Config6; 44 int32_t CP0_Config6_rw_bitmask; 45 int32_t CP0_Config7; 46 int32_t CP0_Config7_rw_bitmask; 47 target_ulong CP0_LLAddr_rw_bitmask; 48 int CP0_LLAddr_shift; 49 int32_t SYNCI_Step; 50 /* 51 * @CCRes: rate at which the coprocessor 0 counter increments 52 * 53 * The Count register acts as a timer, incrementing at a constant rate, 54 * whether or not an instruction is executed, retired, or any forward 55 * progress is made through the pipeline. The rate at which the counter 56 * increments is implementation dependent, and is a function of the 57 * pipeline clock of the processor, not the issue width of the processor. 58 */ 59 int32_t CCRes; 60 int32_t CP0_Status_rw_bitmask; 61 int32_t CP0_TCStatus_rw_bitmask; 62 int32_t CP0_SRSCtl; 63 int32_t CP1_fcr0; 64 int32_t CP1_fcr31_rw_bitmask; 65 int32_t CP1_fcr31; 66 int32_t MSAIR; 67 int32_t SEGBITS; 68 int32_t PABITS; 69 int32_t CP0_SRSConf0_rw_bitmask; 70 int32_t CP0_SRSConf0; 71 int32_t CP0_SRSConf1_rw_bitmask; 72 int32_t CP0_SRSConf1; 73 int32_t CP0_SRSConf2_rw_bitmask; 74 int32_t CP0_SRSConf2; 75 int32_t CP0_SRSConf3_rw_bitmask; 76 int32_t CP0_SRSConf3; 77 int32_t CP0_SRSConf4_rw_bitmask; 78 int32_t CP0_SRSConf4; 79 int32_t CP0_PageGrain_rw_bitmask; 80 int32_t CP0_PageGrain; 81 target_ulong CP0_EBaseWG_rw_bitmask; 82 uint32_t lcsr_cpucfg1; 83 uint32_t lcsr_cpucfg2; 84 uint64_t insn_flags; 85 enum mips_mmu_types mmu_type; 86 }; 87 88 extern const char regnames[32][3]; 89 extern const char fregnames[32][4]; 90 91 extern const struct mips_def_t mips_defs[]; 92 extern const int mips_defs_number; 93 94 int mips_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); 95 int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 96 97 #define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL) 98 #define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL) 99 #define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL) 100 #define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL) 101 #define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL) 102 103 #if !defined(CONFIG_USER_ONLY) 104 105 enum { 106 TLBRET_XI = -6, 107 TLBRET_RI = -5, 108 TLBRET_DIRTY = -4, 109 TLBRET_INVALID = -3, 110 TLBRET_NOMATCH = -2, 111 TLBRET_BADADDR = -1, 112 TLBRET_MATCH = 0 113 }; 114 115 int get_physical_address(CPUMIPSState *env, hwaddr *physical, 116 int *prot, target_ulong real_address, 117 MMUAccessType access_type, int mmu_idx); 118 hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 119 120 typedef struct r4k_tlb_t r4k_tlb_t; 121 struct r4k_tlb_t { 122 target_ulong VPN; 123 uint32_t PageMask; 124 uint16_t ASID; 125 uint32_t MMID; 126 unsigned int G:1; 127 unsigned int C0:3; 128 unsigned int C1:3; 129 unsigned int V0:1; 130 unsigned int V1:1; 131 unsigned int D0:1; 132 unsigned int D1:1; 133 unsigned int XI0:1; 134 unsigned int XI1:1; 135 unsigned int RI0:1; 136 unsigned int RI1:1; 137 unsigned int EHINV:1; 138 uint64_t PFN[2]; 139 }; 140 141 struct CPUMIPSTLBContext { 142 uint32_t nb_tlb; 143 uint32_t tlb_in_use; 144 int (*map_address)(CPUMIPSState *env, hwaddr *physical, int *prot, 145 target_ulong address, MMUAccessType access_type); 146 void (*helper_tlbwi)(CPUMIPSState *env); 147 void (*helper_tlbwr)(CPUMIPSState *env); 148 void (*helper_tlbp)(CPUMIPSState *env); 149 void (*helper_tlbr)(CPUMIPSState *env); 150 void (*helper_tlbinv)(CPUMIPSState *env); 151 void (*helper_tlbinvf)(CPUMIPSState *env); 152 union { 153 struct { 154 r4k_tlb_t tlb[MIPS_TLB_MAX]; 155 } r4k; 156 } mmu; 157 }; 158 159 void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc); 160 void cpu_mips_store_status(CPUMIPSState *env, target_ulong val); 161 void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val); 162 163 extern const VMStateDescription vmstate_mips_cpu; 164 165 #endif /* !CONFIG_USER_ONLY */ 166 167 static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env) 168 { 169 return (env->CP0_Status & (1 << CP0St_IE)) && 170 !(env->CP0_Status & (1 << CP0St_EXL)) && 171 !(env->CP0_Status & (1 << CP0St_ERL)) && 172 !(env->hflags & MIPS_HFLAG_DM) && 173 /* 174 * Note that the TCStatus IXMT field is initialized to zero, 175 * and only MT capable cores can set it to one. So we don't 176 * need to check for MT capabilities here. 177 */ 178 !(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT)); 179 } 180 181 /* Check if there is pending and not masked out interrupt */ 182 static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env) 183 { 184 int32_t pending; 185 int32_t status; 186 bool r; 187 188 pending = env->CP0_Cause & CP0Ca_IP_mask; 189 status = env->CP0_Status & CP0Ca_IP_mask; 190 191 if (env->CP0_Config3 & (1 << CP0C3_VEIC)) { 192 /* 193 * A MIPS configured with a vectorizing external interrupt controller 194 * will feed a vector into the Cause pending lines. The core treats 195 * the status lines as a vector level, not as individual masks. 196 */ 197 r = pending > status; 198 } else { 199 /* 200 * A MIPS configured with compatibility or VInt (Vectored Interrupts) 201 * treats the pending lines as individual interrupt lines, the status 202 * lines are individual masks. 203 */ 204 r = (pending & status) != 0; 205 } 206 return r; 207 } 208 209 void msa_reset(CPUMIPSState *env); 210 211 /* cp0_timer.c */ 212 uint32_t cpu_mips_get_count(CPUMIPSState *env); 213 void cpu_mips_store_count(CPUMIPSState *env, uint32_t value); 214 void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value); 215 void cpu_mips_start_count(CPUMIPSState *env); 216 void cpu_mips_stop_count(CPUMIPSState *env); 217 218 static inline void mips_env_set_pc(CPUMIPSState *env, target_ulong value) 219 { 220 env->active_tc.PC = value & ~(target_ulong)1; 221 if (value & 1) { 222 env->hflags |= MIPS_HFLAG_M16; 223 } else { 224 env->hflags &= ~(MIPS_HFLAG_M16); 225 } 226 } 227 228 static inline bool mips_env_is_bigendian(CPUMIPSState *env) 229 { 230 return extract32(env->CP0_Config0, CP0C0_BE, 1); 231 } 232 233 static inline MemOp mo_endian_env(CPUMIPSState *env) 234 { 235 return mips_env_is_bigendian(env) ? MO_BE : MO_LE; 236 } 237 238 static inline void restore_pamask(CPUMIPSState *env) 239 { 240 if (env->hflags & MIPS_HFLAG_ELPA) { 241 env->PAMask = (1ULL << env->PABITS) - 1; 242 } else { 243 env->PAMask = PAMASK_BASE; 244 } 245 } 246 247 static inline int mips_vpe_active(CPUMIPSState *env) 248 { 249 int active = 1; 250 251 /* Check that the VPE is enabled. */ 252 if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) { 253 active = 0; 254 } 255 /* Check that the VPE is activated. */ 256 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) { 257 active = 0; 258 } 259 260 /* 261 * Now verify that there are active thread contexts in the VPE. 262 * 263 * This assumes the CPU model will internally reschedule threads 264 * if the active one goes to sleep. If there are no threads available 265 * the active one will be in a sleeping state, and we can turn off 266 * the entire VPE. 267 */ 268 if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) { 269 /* TC is not activated. */ 270 active = 0; 271 } 272 if (env->active_tc.CP0_TCHalt & 1) { 273 /* TC is in halt state. */ 274 active = 0; 275 } 276 277 return active; 278 } 279 280 static inline int mips_vp_active(CPUMIPSState *env) 281 { 282 CPUState *other_cs = first_cpu; 283 284 /* Check if the VP disabled other VPs (which means the VP is enabled) */ 285 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) { 286 return 1; 287 } 288 289 /* Check if the virtual processor is disabled due to a DVP */ 290 CPU_FOREACH(other_cs) { 291 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 292 if ((&other_cpu->env != env) && 293 ((other_cpu->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) { 294 return 0; 295 } 296 } 297 return 1; 298 } 299 300 static inline void compute_hflags(CPUMIPSState *env) 301 { 302 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 | 303 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU | 304 MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 | 305 MIPS_HFLAG_DSP_R3 | MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA | 306 MIPS_HFLAG_FRE | MIPS_HFLAG_ELPA | MIPS_HFLAG_ERL); 307 if (env->CP0_Status & (1 << CP0St_ERL)) { 308 env->hflags |= MIPS_HFLAG_ERL; 309 } 310 if (!(env->CP0_Status & (1 << CP0St_EXL)) && 311 !(env->CP0_Status & (1 << CP0St_ERL)) && 312 !(env->hflags & MIPS_HFLAG_DM)) { 313 env->hflags |= (env->CP0_Status >> CP0St_KSU) & 314 MIPS_HFLAG_KSU; 315 } 316 #if defined(TARGET_MIPS64) 317 if ((env->insn_flags & ISA_MIPS3) && 318 (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) || 319 (env->CP0_Status & (1 << CP0St_PX)) || 320 (env->CP0_Status & (1 << CP0St_UX)))) { 321 env->hflags |= MIPS_HFLAG_64; 322 } 323 324 if (!(env->insn_flags & ISA_MIPS3)) { 325 env->hflags |= MIPS_HFLAG_AWRAP; 326 } else if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) && 327 !(env->CP0_Status & (1 << CP0St_UX))) { 328 env->hflags |= MIPS_HFLAG_AWRAP; 329 } else if (env->insn_flags & ISA_MIPS_R6) { 330 /* Address wrapping for Supervisor and Kernel is specified in R6 */ 331 if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) && 332 !(env->CP0_Status & (1 << CP0St_SX))) || 333 (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) && 334 !(env->CP0_Status & (1 << CP0St_KX)))) { 335 env->hflags |= MIPS_HFLAG_AWRAP; 336 } 337 } 338 #endif 339 if (((env->CP0_Status & (1 << CP0St_CU0)) && 340 !(env->insn_flags & ISA_MIPS_R6)) || 341 !(env->hflags & MIPS_HFLAG_KSU)) { 342 env->hflags |= MIPS_HFLAG_CP0; 343 } 344 if (env->CP0_Status & (1 << CP0St_CU1)) { 345 env->hflags |= MIPS_HFLAG_FPU; 346 } 347 if (env->CP0_Status & (1 << CP0St_FR)) { 348 env->hflags |= MIPS_HFLAG_F64; 349 } 350 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) && 351 (env->CP0_Config5 & (1 << CP0C5_SBRI))) { 352 env->hflags |= MIPS_HFLAG_SBRI; 353 } 354 if (env->insn_flags & ASE_DSP_R3) { 355 /* 356 * Our cpu supports DSP R3 ASE, so enable 357 * access to DSP R3 resources. 358 */ 359 if (env->CP0_Status & (1 << CP0St_MX)) { 360 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 | 361 MIPS_HFLAG_DSP_R3; 362 } 363 } else if (env->insn_flags & ASE_DSP_R2) { 364 /* 365 * Our cpu supports DSP R2 ASE, so enable 366 * access to DSP R2 resources. 367 */ 368 if (env->CP0_Status & (1 << CP0St_MX)) { 369 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2; 370 } 371 372 } else if (env->insn_flags & ASE_DSP) { 373 /* 374 * Our cpu supports DSP ASE, so enable 375 * access to DSP resources. 376 */ 377 if (env->CP0_Status & (1 << CP0St_MX)) { 378 env->hflags |= MIPS_HFLAG_DSP; 379 } 380 381 } 382 if (env->insn_flags & ISA_MIPS_R2) { 383 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) { 384 env->hflags |= MIPS_HFLAG_COP1X; 385 } 386 } else if (env->insn_flags & ISA_MIPS_R1) { 387 if (env->hflags & MIPS_HFLAG_64) { 388 env->hflags |= MIPS_HFLAG_COP1X; 389 } 390 } else if (env->insn_flags & ISA_MIPS4) { 391 /* 392 * All supported MIPS IV CPUs use the XX (CU3) to enable 393 * and disable the MIPS IV extensions to the MIPS III ISA. 394 * Some other MIPS IV CPUs ignore the bit, so the check here 395 * would be too restrictive for them. 396 */ 397 if (env->CP0_Status & (1U << CP0St_CU3)) { 398 env->hflags |= MIPS_HFLAG_COP1X; 399 } 400 } 401 if (ase_msa_available(env)) { 402 if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) { 403 env->hflags |= MIPS_HFLAG_MSA; 404 } 405 } 406 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { 407 if (env->CP0_Config5 & (1 << CP0C5_FRE)) { 408 env->hflags |= MIPS_HFLAG_FRE; 409 } 410 } 411 if (env->CP0_Config3 & (1 << CP0C3_LPA)) { 412 if (env->CP0_PageGrain & (1 << CP0PG_ELPA)) { 413 env->hflags |= MIPS_HFLAG_ELPA; 414 } 415 } 416 } 417 418 #endif 419