1 /* 2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "cpu.h" 23 #include "sysemu/kvm.h" 24 #include "kvm_ppc.h" 25 #include "mmu-hash64.h" 26 #include "mmu-hash32.h" 27 #include "exec/exec-all.h" 28 #include "exec/page-protection.h" 29 #include "exec/log.h" 30 #include "helper_regs.h" 31 #include "qemu/error-report.h" 32 #include "qemu/qemu-print.h" 33 #include "internal.h" 34 #include "mmu-book3s-v3.h" 35 #include "mmu-radix64.h" 36 #include "exec/helper-proto.h" 37 #include "exec/cpu_ldst.h" 38 39 /* #define FLUSH_ALL_TLBS */ 40 41 /*****************************************************************************/ 42 /* PowerPC MMU emulation */ 43 44 /* Software driven TLB helpers */ 45 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) 46 { 47 ppc6xx_tlb_t *tlb; 48 int nr, max = 2 * env->nb_tlb; 49 50 for (nr = 0; nr < max; nr++) { 51 tlb = &env->tlb.tlb6[nr]; 52 pte_invalidate(&tlb->pte0); 53 } 54 tlb_flush(env_cpu(env)); 55 } 56 57 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, 58 target_ulong eaddr, 59 int is_code, int match_epn) 60 { 61 #if !defined(FLUSH_ALL_TLBS) 62 CPUState *cs = env_cpu(env); 63 ppc6xx_tlb_t *tlb; 64 int way, nr; 65 66 /* Invalidate ITLB + DTLB, all ways */ 67 for (way = 0; way < env->nb_ways; way++) { 68 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code); 69 tlb = &env->tlb.tlb6[nr]; 70 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) { 71 qemu_log_mask(CPU_LOG_MMU, "TLB invalidate %d/%d " 72 TARGET_FMT_lx "\n", nr, env->nb_tlb, eaddr); 73 pte_invalidate(&tlb->pte0); 74 tlb_flush_page(cs, tlb->EPN); 75 } 76 } 77 #else 78 /* XXX: PowerPC specification say this is valid as well */ 79 ppc6xx_tlb_invalidate_all(env); 80 #endif 81 } 82 83 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env, 84 target_ulong eaddr, int is_code) 85 { 86 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0); 87 } 88 89 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way, 90 int is_code, target_ulong pte0, target_ulong pte1) 91 { 92 ppc6xx_tlb_t *tlb; 93 int nr; 94 95 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code); 96 tlb = &env->tlb.tlb6[nr]; 97 qemu_log_mask(CPU_LOG_MMU, "Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " 98 TARGET_FMT_lx " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, 99 EPN, pte0, pte1); 100 /* Invalidate any pending reference in QEMU for this virtual address */ 101 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1); 102 tlb->pte0 = pte0; 103 tlb->pte1 = pte1; 104 tlb->EPN = EPN; 105 /* Store last way for LRU mechanism */ 106 env->last_way = way; 107 } 108 109 /* Helpers specific to PowerPC 40x implementations */ 110 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) 111 { 112 ppcemb_tlb_t *tlb; 113 int i; 114 115 for (i = 0; i < env->nb_tlb; i++) { 116 tlb = &env->tlb.tlbe[i]; 117 tlb->prot &= ~PAGE_VALID; 118 } 119 tlb_flush(env_cpu(env)); 120 } 121 122 static void booke206_flush_tlb(CPUPPCState *env, int flags, 123 const int check_iprot) 124 { 125 int tlb_size; 126 int i, j; 127 ppcmas_tlb_t *tlb = env->tlb.tlbm; 128 129 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 130 if (flags & (1 << i)) { 131 tlb_size = booke206_tlb_size(env, i); 132 for (j = 0; j < tlb_size; j++) { 133 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) { 134 tlb[j].mas1 &= ~MAS1_VALID; 135 } 136 } 137 } 138 tlb += booke206_tlb_size(env, i); 139 } 140 141 tlb_flush(env_cpu(env)); 142 } 143 144 /*****************************************************************************/ 145 /* BATs management */ 146 #if !defined(FLUSH_ALL_TLBS) 147 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, 148 target_ulong mask) 149 { 150 CPUState *cs = env_cpu(env); 151 target_ulong base, end, page; 152 153 base = BATu & ~0x0001FFFF; 154 end = base + mask + 0x00020000; 155 if (((end - base) >> TARGET_PAGE_BITS) > 1024) { 156 /* Flushing 1024 4K pages is slower than a complete flush */ 157 qemu_log_mask(CPU_LOG_MMU, "Flush all BATs\n"); 158 tlb_flush(cs); 159 qemu_log_mask(CPU_LOG_MMU, "Flush done\n"); 160 return; 161 } 162 qemu_log_mask(CPU_LOG_MMU, "Flush BAT from " TARGET_FMT_lx 163 " to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", 164 base, end, mask); 165 for (page = base; page != end; page += TARGET_PAGE_SIZE) { 166 tlb_flush_page(cs, page); 167 } 168 qemu_log_mask(CPU_LOG_MMU, "Flush done\n"); 169 } 170 #endif 171 172 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr, 173 target_ulong value) 174 { 175 qemu_log_mask(CPU_LOG_MMU, "Set %cBAT%d%c to " TARGET_FMT_lx " (" 176 TARGET_FMT_lx ")\n", ID, nr, ul == 0 ? 'u' : 'l', 177 value, env->nip); 178 } 179 180 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value) 181 { 182 target_ulong mask; 183 184 dump_store_bat(env, 'I', 0, nr, value); 185 if (env->IBAT[0][nr] != value) { 186 mask = (value << 15) & 0x0FFE0000UL; 187 #if !defined(FLUSH_ALL_TLBS) 188 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 189 #endif 190 /* 191 * When storing valid upper BAT, mask BEPI and BRPN and 192 * invalidate all TLBs covered by this BAT 193 */ 194 mask = (value << 15) & 0x0FFE0000UL; 195 env->IBAT[0][nr] = (value & 0x00001FFFUL) | 196 (value & ~0x0001FFFFUL & ~mask); 197 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) | 198 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask); 199 #if !defined(FLUSH_ALL_TLBS) 200 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 201 #else 202 tlb_flush(env_cpu(env)); 203 #endif 204 } 205 } 206 207 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value) 208 { 209 dump_store_bat(env, 'I', 1, nr, value); 210 env->IBAT[1][nr] = value; 211 } 212 213 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value) 214 { 215 target_ulong mask; 216 217 dump_store_bat(env, 'D', 0, nr, value); 218 if (env->DBAT[0][nr] != value) { 219 /* 220 * When storing valid upper BAT, mask BEPI and BRPN and 221 * invalidate all TLBs covered by this BAT 222 */ 223 mask = (value << 15) & 0x0FFE0000UL; 224 #if !defined(FLUSH_ALL_TLBS) 225 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 226 #endif 227 mask = (value << 15) & 0x0FFE0000UL; 228 env->DBAT[0][nr] = (value & 0x00001FFFUL) | 229 (value & ~0x0001FFFFUL & ~mask); 230 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) | 231 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask); 232 #if !defined(FLUSH_ALL_TLBS) 233 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 234 #else 235 tlb_flush(env_cpu(env)); 236 #endif 237 } 238 } 239 240 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value) 241 { 242 dump_store_bat(env, 'D', 1, nr, value); 243 env->DBAT[1][nr] = value; 244 } 245 246 /*****************************************************************************/ 247 /* TLB management */ 248 void ppc_tlb_invalidate_all(CPUPPCState *env) 249 { 250 #if defined(TARGET_PPC64) 251 if (mmu_is_64bit(env->mmu_model)) { 252 env->tlb_need_flush = 0; 253 tlb_flush(env_cpu(env)); 254 } else 255 #endif /* defined(TARGET_PPC64) */ 256 switch (env->mmu_model) { 257 case POWERPC_MMU_SOFT_6xx: 258 ppc6xx_tlb_invalidate_all(env); 259 break; 260 case POWERPC_MMU_SOFT_4xx: 261 ppc4xx_tlb_invalidate_all(env); 262 break; 263 case POWERPC_MMU_REAL: 264 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n"); 265 break; 266 case POWERPC_MMU_MPC8xx: 267 /* XXX: TODO */ 268 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 269 break; 270 case POWERPC_MMU_BOOKE: 271 tlb_flush(env_cpu(env)); 272 break; 273 case POWERPC_MMU_BOOKE206: 274 booke206_flush_tlb(env, -1, 0); 275 break; 276 case POWERPC_MMU_32B: 277 env->tlb_need_flush = 0; 278 tlb_flush(env_cpu(env)); 279 break; 280 default: 281 /* XXX: TODO */ 282 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model); 283 break; 284 } 285 } 286 287 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) 288 { 289 #if !defined(FLUSH_ALL_TLBS) 290 addr &= TARGET_PAGE_MASK; 291 #if defined(TARGET_PPC64) 292 if (mmu_is_64bit(env->mmu_model)) { 293 /* tlbie invalidate TLBs for all segments */ 294 /* 295 * XXX: given the fact that there are too many segments to invalidate, 296 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, 297 * we just invalidate all TLBs 298 */ 299 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 300 } else 301 #endif /* defined(TARGET_PPC64) */ 302 switch (env->mmu_model) { 303 case POWERPC_MMU_SOFT_6xx: 304 ppc6xx_tlb_invalidate_virt(env, addr, 0); 305 ppc6xx_tlb_invalidate_virt(env, addr, 1); 306 break; 307 case POWERPC_MMU_32B: 308 /* 309 * Actual CPUs invalidate entire congruence classes based on 310 * the geometry of their TLBs and some OSes take that into 311 * account, we just mark the TLB to be flushed later (context 312 * synchronizing event or sync instruction on 32-bit). 313 */ 314 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 315 break; 316 default: 317 /* Should never reach here with other MMU models */ 318 assert(0); 319 } 320 #else 321 ppc_tlb_invalidate_all(env); 322 #endif 323 } 324 325 /*****************************************************************************/ 326 /* Special registers manipulation */ 327 328 /* Segment registers load and store */ 329 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num) 330 { 331 #if defined(TARGET_PPC64) 332 if (mmu_is_64bit(env->mmu_model)) { 333 /* XXX */ 334 return 0; 335 } 336 #endif 337 return env->sr[sr_num]; 338 } 339 340 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value) 341 { 342 qemu_log_mask(CPU_LOG_MMU, 343 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, 344 (int)srnum, value, env->sr[srnum]); 345 #if defined(TARGET_PPC64) 346 if (mmu_is_64bit(env->mmu_model)) { 347 PowerPCCPU *cpu = env_archcpu(env); 348 uint64_t esid, vsid; 349 350 /* ESID = srnum */ 351 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V; 352 353 /* VSID = VSID */ 354 vsid = (value & 0xfffffff) << 12; 355 /* flags = flags */ 356 vsid |= ((value >> 27) & 0xf) << 8; 357 358 ppc_store_slb(cpu, srnum, esid, vsid); 359 } else 360 #endif 361 if (env->sr[srnum] != value) { 362 env->sr[srnum] = value; 363 /* 364 * Invalidating 256MB of virtual memory in 4kB pages is way 365 * longer than flushing the whole TLB. 366 */ 367 #if !defined(FLUSH_ALL_TLBS) && 0 368 { 369 target_ulong page, end; 370 /* Invalidate 256 MB of virtual memory */ 371 page = (16 << 20) * srnum; 372 end = page + (16 << 20); 373 for (; page != end; page += TARGET_PAGE_SIZE) { 374 tlb_flush_page(env_cpu(env), page); 375 } 376 } 377 #else 378 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 379 #endif 380 } 381 } 382 383 /* TLB management */ 384 void helper_tlbia(CPUPPCState *env) 385 { 386 ppc_tlb_invalidate_all(env); 387 } 388 389 void helper_tlbie(CPUPPCState *env, target_ulong addr) 390 { 391 ppc_tlb_invalidate_one(env, addr); 392 } 393 394 #if defined(TARGET_PPC64) 395 396 /* Invalidation Selector */ 397 #define TLBIE_IS_VA 0 398 #define TLBIE_IS_PID 1 399 #define TLBIE_IS_LPID 2 400 #define TLBIE_IS_ALL 3 401 402 /* Radix Invalidation Control */ 403 #define TLBIE_RIC_TLB 0 404 #define TLBIE_RIC_PWC 1 405 #define TLBIE_RIC_ALL 2 406 #define TLBIE_RIC_GRP 3 407 408 /* Radix Actual Page sizes */ 409 #define TLBIE_R_AP_4K 0 410 #define TLBIE_R_AP_64K 5 411 #define TLBIE_R_AP_2M 1 412 #define TLBIE_R_AP_1G 2 413 414 /* RB field masks */ 415 #define TLBIE_RB_EPN_MASK PPC_BITMASK(0, 51) 416 #define TLBIE_RB_IS_MASK PPC_BITMASK(52, 53) 417 #define TLBIE_RB_AP_MASK PPC_BITMASK(56, 58) 418 419 void helper_tlbie_isa300(CPUPPCState *env, target_ulong rb, target_ulong rs, 420 uint32_t flags) 421 { 422 unsigned ric = (flags & TLBIE_F_RIC_MASK) >> TLBIE_F_RIC_SHIFT; 423 /* 424 * With the exception of the checks for invalid instruction forms, 425 * PRS is currently ignored, because we don't know if a given TLB entry 426 * is process or partition scoped. 427 */ 428 bool prs = flags & TLBIE_F_PRS; 429 bool r = flags & TLBIE_F_R; 430 bool local = flags & TLBIE_F_LOCAL; 431 bool effR; 432 unsigned is = extract64(rb, PPC_BIT_NR(53), 2); 433 unsigned ap; /* actual page size */ 434 target_ulong addr, pgoffs_mask; 435 436 qemu_log_mask(CPU_LOG_MMU, 437 "%s: local=%d addr=" TARGET_FMT_lx " ric=%u prs=%d r=%d is=%u\n", 438 __func__, local, rb & TARGET_PAGE_MASK, ric, prs, r, is); 439 440 effR = FIELD_EX64(env->msr, MSR, HV) ? r : env->spr[SPR_LPCR] & LPCR_HR; 441 442 /* Partial TLB invalidation is supported for Radix only for now. */ 443 if (!effR) { 444 goto inval_all; 445 } 446 447 /* Check for invalid instruction forms (effR=1). */ 448 if (unlikely(ric == TLBIE_RIC_GRP || 449 ((ric == TLBIE_RIC_PWC || ric == TLBIE_RIC_ALL) && 450 is == TLBIE_IS_VA) || 451 (!prs && is == TLBIE_IS_PID))) { 452 qemu_log_mask(LOG_GUEST_ERROR, 453 "%s: invalid instruction form: ric=%u prs=%d r=%d is=%u\n", 454 __func__, ric, prs, r, is); 455 goto invalid; 456 } 457 458 /* We don't cache Page Walks. */ 459 if (ric == TLBIE_RIC_PWC) { 460 if (local) { 461 unsigned set = extract64(rb, PPC_BIT_NR(51), 12); 462 if (set != 0) { 463 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid set: %d\n", 464 __func__, set); 465 goto invalid; 466 } 467 } 468 return; 469 } 470 471 /* 472 * Invalidation by LPID or PID is not supported, so fallback 473 * to full TLB flush in these cases. 474 */ 475 if (is != TLBIE_IS_VA) { 476 goto inval_all; 477 } 478 479 /* 480 * The results of an attempt to invalidate a translation outside of 481 * quadrant 0 for Radix Tree translation (effR=1, RIC=0, PRS=1, IS=0, 482 * and EA 0:1 != 0b00) are boundedly undefined. 483 */ 484 if (unlikely(ric == TLBIE_RIC_TLB && prs && is == TLBIE_IS_VA && 485 (rb & R_EADDR_QUADRANT) != R_EADDR_QUADRANT0)) { 486 qemu_log_mask(LOG_GUEST_ERROR, 487 "%s: attempt to invalidate a translation outside of quadrant 0\n", 488 __func__); 489 goto inval_all; 490 } 491 492 assert(is == TLBIE_IS_VA); 493 assert(ric == TLBIE_RIC_TLB || ric == TLBIE_RIC_ALL); 494 495 ap = extract64(rb, PPC_BIT_NR(58), 3); 496 switch (ap) { 497 case TLBIE_R_AP_4K: 498 pgoffs_mask = 0xfffull; 499 break; 500 501 case TLBIE_R_AP_64K: 502 pgoffs_mask = 0xffffull; 503 break; 504 505 case TLBIE_R_AP_2M: 506 pgoffs_mask = 0x1fffffull; 507 break; 508 509 case TLBIE_R_AP_1G: 510 pgoffs_mask = 0x3fffffffull; 511 break; 512 513 default: 514 /* 515 * If the value specified in RS 0:31, RS 32:63, RB 54:55, RB 56:58, 516 * RB 44:51, or RB 56:63, when it is needed to perform the specified 517 * operation, is not supported by the implementation, the instruction 518 * is treated as if the instruction form were invalid. 519 */ 520 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid AP: %d\n", __func__, ap); 521 goto invalid; 522 } 523 524 addr = rb & TLBIE_RB_EPN_MASK & ~pgoffs_mask; 525 526 if (local) { 527 tlb_flush_page(env_cpu(env), addr); 528 } else { 529 tlb_flush_page_all_cpus_synced(env_cpu(env), addr); 530 } 531 return; 532 533 inval_all: 534 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 535 if (!local) { 536 env->tlb_need_flush |= TLB_NEED_GLOBAL_FLUSH; 537 } 538 return; 539 540 invalid: 541 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 542 POWERPC_EXCP_INVAL | 543 POWERPC_EXCP_INVAL_INVAL, GETPC()); 544 } 545 546 #endif 547 548 void helper_tlbiva(CPUPPCState *env, target_ulong addr) 549 { 550 /* tlbiva instruction only exists on BookE */ 551 assert(env->mmu_model == POWERPC_MMU_BOOKE); 552 /* XXX: TODO */ 553 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n"); 554 } 555 556 /* Software driven TLBs management */ 557 /* PowerPC 602/603 software TLB load instructions helpers */ 558 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code) 559 { 560 target_ulong RPN, CMP, EPN; 561 int way; 562 563 RPN = env->spr[SPR_RPA]; 564 if (is_code) { 565 CMP = env->spr[SPR_ICMP]; 566 EPN = env->spr[SPR_IMISS]; 567 } else { 568 CMP = env->spr[SPR_DCMP]; 569 EPN = env->spr[SPR_DMISS]; 570 } 571 way = (env->spr[SPR_SRR1] >> 17) & 1; 572 (void)EPN; /* avoid a compiler warning */ 573 qemu_log_mask(CPU_LOG_MMU, "%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx 574 " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n", 575 __func__, new_EPN, EPN, CMP, RPN, way); 576 /* Store this TLB */ 577 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK), 578 way, is_code, CMP, RPN); 579 } 580 581 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN) 582 { 583 do_6xx_tlb(env, EPN, 0); 584 } 585 586 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN) 587 { 588 do_6xx_tlb(env, EPN, 1); 589 } 590 591 static inline target_ulong booke_tlb_to_page_size(int size) 592 { 593 return 1024 << (2 * size); 594 } 595 596 static inline int booke_page_size_to_tlb(target_ulong page_size) 597 { 598 int size; 599 600 switch (page_size) { 601 case 0x00000400UL: 602 size = 0x0; 603 break; 604 case 0x00001000UL: 605 size = 0x1; 606 break; 607 case 0x00004000UL: 608 size = 0x2; 609 break; 610 case 0x00010000UL: 611 size = 0x3; 612 break; 613 case 0x00040000UL: 614 size = 0x4; 615 break; 616 case 0x00100000UL: 617 size = 0x5; 618 break; 619 case 0x00400000UL: 620 size = 0x6; 621 break; 622 case 0x01000000UL: 623 size = 0x7; 624 break; 625 case 0x04000000UL: 626 size = 0x8; 627 break; 628 case 0x10000000UL: 629 size = 0x9; 630 break; 631 case 0x40000000UL: 632 size = 0xA; 633 break; 634 #if defined(TARGET_PPC64) 635 case 0x000100000000ULL: 636 size = 0xB; 637 break; 638 case 0x000400000000ULL: 639 size = 0xC; 640 break; 641 case 0x001000000000ULL: 642 size = 0xD; 643 break; 644 case 0x004000000000ULL: 645 size = 0xE; 646 break; 647 case 0x010000000000ULL: 648 size = 0xF; 649 break; 650 #endif 651 default: 652 size = -1; 653 break; 654 } 655 656 return size; 657 } 658 659 /* Helpers for 4xx TLB management */ 660 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */ 661 662 #define PPC4XX_TLBHI_V 0x00000040 663 #define PPC4XX_TLBHI_E 0x00000020 664 #define PPC4XX_TLBHI_SIZE_MIN 0 665 #define PPC4XX_TLBHI_SIZE_MAX 7 666 #define PPC4XX_TLBHI_SIZE_DEFAULT 1 667 #define PPC4XX_TLBHI_SIZE_SHIFT 7 668 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007 669 670 #define PPC4XX_TLBLO_EX 0x00000200 671 #define PPC4XX_TLBLO_WR 0x00000100 672 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF 673 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00 674 675 void helper_store_40x_pid(CPUPPCState *env, target_ulong val) 676 { 677 if (env->spr[SPR_40x_PID] != val) { 678 env->spr[SPR_40x_PID] = val; 679 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 680 } 681 } 682 683 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry) 684 { 685 ppcemb_tlb_t *tlb; 686 target_ulong ret; 687 int size; 688 689 entry &= PPC4XX_TLB_ENTRY_MASK; 690 tlb = &env->tlb.tlbe[entry]; 691 ret = tlb->EPN; 692 if (tlb->prot & PAGE_VALID) { 693 ret |= PPC4XX_TLBHI_V; 694 } 695 size = booke_page_size_to_tlb(tlb->size); 696 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) { 697 size = PPC4XX_TLBHI_SIZE_DEFAULT; 698 } 699 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT; 700 helper_store_40x_pid(env, tlb->PID); 701 return ret; 702 } 703 704 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry) 705 { 706 ppcemb_tlb_t *tlb; 707 target_ulong ret; 708 709 entry &= PPC4XX_TLB_ENTRY_MASK; 710 tlb = &env->tlb.tlbe[entry]; 711 ret = tlb->RPN; 712 if (tlb->prot & PAGE_EXEC) { 713 ret |= PPC4XX_TLBLO_EX; 714 } 715 if (tlb->prot & PAGE_WRITE) { 716 ret |= PPC4XX_TLBLO_WR; 717 } 718 return ret; 719 } 720 721 static void ppcemb_tlb_flush(CPUState *cs, ppcemb_tlb_t *tlb) 722 { 723 unsigned mmu_idx = 0; 724 725 if (tlb->prot & 0xf) { 726 mmu_idx |= 0x1; 727 } 728 if ((tlb->prot >> 4) & 0xf) { 729 mmu_idx |= 0x2; 730 } 731 if (tlb->attr & 1) { 732 mmu_idx <<= 2; 733 } 734 735 tlb_flush_range_by_mmuidx(cs, tlb->EPN, tlb->size, mmu_idx, 736 TARGET_LONG_BITS); 737 } 738 739 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, 740 target_ulong val) 741 { 742 CPUState *cs = env_cpu(env); 743 ppcemb_tlb_t *tlb; 744 745 qemu_log_mask(CPU_LOG_MMU, "%s entry %d val " TARGET_FMT_lx "\n", 746 __func__, (int)entry, 747 val); 748 entry &= PPC4XX_TLB_ENTRY_MASK; 749 tlb = &env->tlb.tlbe[entry]; 750 /* Invalidate previous TLB (if it's valid) */ 751 if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) { 752 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start " 753 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, 754 (int)entry, tlb->EPN, tlb->EPN + tlb->size); 755 ppcemb_tlb_flush(cs, tlb); 756 } 757 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) 758 & PPC4XX_TLBHI_SIZE_MASK); 759 /* 760 * We cannot handle TLB size < TARGET_PAGE_SIZE. 761 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY 762 */ 763 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) { 764 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u " 765 "are not supported (%d)\n" 766 "Please implement TARGET_PAGE_BITS_VARY\n", 767 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7)); 768 } 769 tlb->EPN = val & ~(tlb->size - 1); 770 if (val & PPC4XX_TLBHI_V) { 771 tlb->prot |= PAGE_VALID; 772 if (val & PPC4XX_TLBHI_E) { 773 /* XXX: TO BE FIXED */ 774 cpu_abort(cs, 775 "Little-endian TLB entries are not supported by now\n"); 776 } 777 } else { 778 tlb->prot &= ~PAGE_VALID; 779 } 780 tlb->PID = env->spr[SPR_40x_PID]; /* PID */ 781 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx 782 " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx 783 " prot %c%c%c%c PID %d\n", __func__, 784 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 785 tlb->prot & PAGE_READ ? 'r' : '-', 786 tlb->prot & PAGE_WRITE ? 'w' : '-', 787 tlb->prot & PAGE_EXEC ? 'x' : '-', 788 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 789 } 790 791 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry, 792 target_ulong val) 793 { 794 CPUState *cs = env_cpu(env); 795 ppcemb_tlb_t *tlb; 796 797 qemu_log_mask(CPU_LOG_MMU, "%s entry %i val " TARGET_FMT_lx "\n", 798 __func__, (int)entry, val); 799 entry &= PPC4XX_TLB_ENTRY_MASK; 800 tlb = &env->tlb.tlbe[entry]; 801 /* Invalidate previous TLB (if it's valid) */ 802 if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) { 803 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start " 804 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, 805 (int)entry, tlb->EPN, tlb->EPN + tlb->size); 806 ppcemb_tlb_flush(cs, tlb); 807 } 808 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK; 809 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK; 810 tlb->prot = PAGE_READ; 811 if (val & PPC4XX_TLBLO_EX) { 812 tlb->prot |= PAGE_EXEC; 813 } 814 if (val & PPC4XX_TLBLO_WR) { 815 tlb->prot |= PAGE_WRITE; 816 } 817 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx 818 " EPN " TARGET_FMT_lx 819 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__, 820 (int)entry, tlb->RPN, tlb->EPN, tlb->size, 821 tlb->prot & PAGE_READ ? 'r' : '-', 822 tlb->prot & PAGE_WRITE ? 'w' : '-', 823 tlb->prot & PAGE_EXEC ? 'x' : '-', 824 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID); 825 } 826 827 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address) 828 { 829 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]); 830 } 831 832 static bool mmubooke_pid_match(CPUPPCState *env, ppcemb_tlb_t *tlb) 833 { 834 if (tlb->PID == env->spr[SPR_BOOKE_PID]) { 835 return true; 836 } 837 if (!env->nb_pids) { 838 return false; 839 } 840 841 if (env->spr[SPR_BOOKE_PID1] && tlb->PID == env->spr[SPR_BOOKE_PID1]) { 842 return true; 843 } 844 if (env->spr[SPR_BOOKE_PID2] && tlb->PID == env->spr[SPR_BOOKE_PID2]) { 845 return true; 846 } 847 848 return false; 849 } 850 851 /* PowerPC 440 TLB management */ 852 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, 853 target_ulong value) 854 { 855 ppcemb_tlb_t *tlb; 856 857 qemu_log_mask(CPU_LOG_MMU, "%s word %d entry %d value " TARGET_FMT_lx "\n", 858 __func__, word, (int)entry, value); 859 entry &= 0x3F; 860 tlb = &env->tlb.tlbe[entry]; 861 862 /* Invalidate previous TLB (if it's valid) */ 863 if ((tlb->prot & PAGE_VALID) && mmubooke_pid_match(env, tlb)) { 864 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start " 865 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__, 866 (int)entry, tlb->EPN, tlb->EPN + tlb->size); 867 ppcemb_tlb_flush(env_cpu(env), tlb); 868 } 869 870 switch (word) { 871 default: 872 /* Just here to please gcc */ 873 case 0: 874 tlb->EPN = value & 0xFFFFFC00; 875 tlb->size = booke_tlb_to_page_size((value >> 4) & 0xF); 876 tlb->attr &= ~0x1; 877 tlb->attr |= (value >> 8) & 1; 878 if (value & 0x200) { 879 tlb->prot |= PAGE_VALID; 880 } else { 881 tlb->prot &= ~PAGE_VALID; 882 } 883 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; 884 break; 885 case 1: 886 tlb->RPN = value & 0xFFFFFC0F; 887 break; 888 case 2: 889 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00); 890 tlb->prot = tlb->prot & PAGE_VALID; 891 if (value & 0x1) { 892 tlb->prot |= PAGE_READ << 4; 893 } 894 if (value & 0x2) { 895 tlb->prot |= PAGE_WRITE << 4; 896 } 897 if (value & 0x4) { 898 tlb->prot |= PAGE_EXEC << 4; 899 } 900 if (value & 0x8) { 901 tlb->prot |= PAGE_READ; 902 } 903 if (value & 0x10) { 904 tlb->prot |= PAGE_WRITE; 905 } 906 if (value & 0x20) { 907 tlb->prot |= PAGE_EXEC; 908 } 909 break; 910 } 911 } 912 913 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word, 914 target_ulong entry) 915 { 916 ppcemb_tlb_t *tlb; 917 target_ulong ret; 918 int size; 919 920 entry &= 0x3F; 921 tlb = &env->tlb.tlbe[entry]; 922 switch (word) { 923 default: 924 /* Just here to please gcc */ 925 case 0: 926 ret = tlb->EPN; 927 size = booke_page_size_to_tlb(tlb->size); 928 if (size < 0 || size > 0xF) { 929 size = 1; 930 } 931 ret |= size << 4; 932 if (tlb->attr & 0x1) { 933 ret |= 0x100; 934 } 935 if (tlb->prot & PAGE_VALID) { 936 ret |= 0x200; 937 } 938 env->spr[SPR_440_MMUCR] &= ~0x000000FF; 939 env->spr[SPR_440_MMUCR] |= tlb->PID; 940 break; 941 case 1: 942 ret = tlb->RPN; 943 break; 944 case 2: 945 ret = tlb->attr & ~0x1; 946 if (tlb->prot & (PAGE_READ << 4)) { 947 ret |= 0x1; 948 } 949 if (tlb->prot & (PAGE_WRITE << 4)) { 950 ret |= 0x2; 951 } 952 if (tlb->prot & (PAGE_EXEC << 4)) { 953 ret |= 0x4; 954 } 955 if (tlb->prot & PAGE_READ) { 956 ret |= 0x8; 957 } 958 if (tlb->prot & PAGE_WRITE) { 959 ret |= 0x10; 960 } 961 if (tlb->prot & PAGE_EXEC) { 962 ret |= 0x20; 963 } 964 break; 965 } 966 return ret; 967 } 968 969 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address) 970 { 971 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF); 972 } 973 974 /* PowerPC BookE 2.06 TLB management */ 975 976 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env) 977 { 978 uint32_t tlbncfg = 0; 979 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; 980 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); 981 int tlb; 982 983 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 984 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; 985 986 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { 987 cpu_abort(env_cpu(env), "we don't support HES yet\n"); 988 } 989 990 return booke206_get_tlbm(env, tlb, ea, esel); 991 } 992 993 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid) 994 { 995 env->spr[pidn] = pid; 996 /* changing PIDs mean we're in a different address space now */ 997 tlb_flush(env_cpu(env)); 998 } 999 1000 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val) 1001 { 1002 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK; 1003 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD); 1004 } 1005 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val) 1006 { 1007 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK; 1008 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE); 1009 } 1010 1011 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb) 1012 { 1013 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { 1014 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK); 1015 } else { 1016 tlb_flush(env_cpu(env)); 1017 } 1018 } 1019 1020 void helper_booke206_tlbwe(CPUPPCState *env) 1021 { 1022 uint32_t tlbncfg, tlbn; 1023 ppcmas_tlb_t *tlb; 1024 uint32_t size_tlb, size_ps; 1025 target_ulong mask; 1026 1027 1028 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) { 1029 case MAS0_WQ_ALWAYS: 1030 /* good to go, write that entry */ 1031 break; 1032 case MAS0_WQ_COND: 1033 /* XXX check if reserved */ 1034 if (0) { 1035 return; 1036 } 1037 break; 1038 case MAS0_WQ_CLR_RSRV: 1039 /* XXX clear entry */ 1040 return; 1041 default: 1042 /* no idea what to do */ 1043 return; 1044 } 1045 1046 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) && 1047 !FIELD_EX64(env->msr, MSR, GS)) { 1048 /* XXX we don't support direct LRAT setting yet */ 1049 fprintf(stderr, "cpu: don't support LRAT setting yet\n"); 1050 return; 1051 } 1052 1053 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT; 1054 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn]; 1055 1056 tlb = booke206_cur_tlb(env); 1057 1058 if (!tlb) { 1059 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1060 POWERPC_EXCP_INVAL | 1061 POWERPC_EXCP_INVAL_INVAL, GETPC()); 1062 } 1063 1064 /* check that we support the targeted size */ 1065 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 1066 size_ps = booke206_tlbnps(env, tlbn); 1067 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) && 1068 !(size_ps & (1 << size_tlb))) { 1069 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 1070 POWERPC_EXCP_INVAL | 1071 POWERPC_EXCP_INVAL_INVAL, GETPC()); 1072 } 1073 1074 if (FIELD_EX64(env->msr, MSR, GS)) { 1075 cpu_abort(env_cpu(env), "missing HV implementation\n"); 1076 } 1077 1078 if (tlb->mas1 & MAS1_VALID) { 1079 /* 1080 * Invalidate the page in QEMU TLB if it was a valid entry. 1081 * 1082 * In "PowerPC e500 Core Family Reference Manual, Rev. 1", 1083 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction": 1084 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf) 1085 * 1086 * "Note that when an L2 TLB entry is written, it may be displacing an 1087 * already valid entry in the same L2 TLB location (a victim). If a 1088 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1 1089 * TLB entry is automatically invalidated." 1090 */ 1091 flush_page(env, tlb); 1092 } 1093 1094 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) | 1095 env->spr[SPR_BOOKE_MAS3]; 1096 tlb->mas1 = env->spr[SPR_BOOKE_MAS1]; 1097 1098 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { 1099 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */ 1100 booke206_fixed_size_tlbn(env, tlbn, tlb); 1101 } else { 1102 if (!(tlbncfg & TLBnCFG_AVAIL)) { 1103 /* force !AVAIL TLB entries to correct page size */ 1104 tlb->mas1 &= ~MAS1_TSIZE_MASK; 1105 /* XXX can be configured in MMUCSR0 */ 1106 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12; 1107 } 1108 } 1109 1110 /* Make a mask from TLB size to discard invalid bits in EPN field */ 1111 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 1112 /* Add a mask for page attributes */ 1113 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E; 1114 1115 if (!FIELD_EX64(env->msr, MSR, CM)) { 1116 /* 1117 * Executing a tlbwe instruction in 32-bit mode will set bits 1118 * 0:31 of the TLB EPN field to zero. 1119 */ 1120 mask &= 0xffffffff; 1121 } 1122 1123 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask; 1124 1125 if (!(tlbncfg & TLBnCFG_IPROT)) { 1126 /* no IPROT supported by TLB */ 1127 tlb->mas1 &= ~MAS1_IPROT; 1128 } 1129 1130 flush_page(env, tlb); 1131 } 1132 1133 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb) 1134 { 1135 int tlbn = booke206_tlbm_to_tlbn(env, tlb); 1136 int way = booke206_tlbm_to_way(env, tlb); 1137 1138 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT; 1139 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT; 1140 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1141 1142 env->spr[SPR_BOOKE_MAS1] = tlb->mas1; 1143 env->spr[SPR_BOOKE_MAS2] = tlb->mas2; 1144 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3; 1145 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32; 1146 } 1147 1148 void helper_booke206_tlbre(CPUPPCState *env) 1149 { 1150 ppcmas_tlb_t *tlb = NULL; 1151 1152 tlb = booke206_cur_tlb(env); 1153 if (!tlb) { 1154 env->spr[SPR_BOOKE_MAS1] = 0; 1155 } else { 1156 booke206_tlb_to_mas(env, tlb); 1157 } 1158 } 1159 1160 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address) 1161 { 1162 ppcmas_tlb_t *tlb = NULL; 1163 int i, j; 1164 hwaddr raddr; 1165 uint32_t spid, sas; 1166 1167 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT; 1168 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS; 1169 1170 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1171 int ways = booke206_tlb_ways(env, i); 1172 1173 for (j = 0; j < ways; j++) { 1174 tlb = booke206_get_tlbm(env, i, address, j); 1175 1176 if (!tlb) { 1177 continue; 1178 } 1179 1180 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) { 1181 continue; 1182 } 1183 1184 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { 1185 continue; 1186 } 1187 1188 booke206_tlb_to_mas(env, tlb); 1189 return; 1190 } 1191 } 1192 1193 /* no entry found, fill with defaults */ 1194 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; 1195 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; 1196 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; 1197 env->spr[SPR_BOOKE_MAS3] = 0; 1198 env->spr[SPR_BOOKE_MAS7] = 0; 1199 1200 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) { 1201 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; 1202 } 1203 1204 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16) 1205 << MAS1_TID_SHIFT; 1206 1207 /* next victim logic */ 1208 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; 1209 env->last_way++; 1210 env->last_way &= booke206_tlb_ways(env, 0) - 1; 1211 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; 1212 } 1213 1214 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, 1215 vaddr ea) 1216 { 1217 int i; 1218 int ways = booke206_tlb_ways(env, tlbn); 1219 target_ulong mask; 1220 1221 for (i = 0; i < ways; i++) { 1222 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i); 1223 if (!tlb) { 1224 continue; 1225 } 1226 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); 1227 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) && 1228 !(tlb->mas1 & MAS1_IPROT)) { 1229 tlb->mas1 &= ~MAS1_VALID; 1230 } 1231 } 1232 } 1233 1234 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address) 1235 { 1236 CPUState *cs; 1237 1238 if (address & 0x4) { 1239 /* flush all entries */ 1240 if (address & 0x8) { 1241 /* flush all of TLB1 */ 1242 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1); 1243 } else { 1244 /* flush all of TLB0 */ 1245 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0); 1246 } 1247 return; 1248 } 1249 1250 if (address & 0x8) { 1251 /* flush TLB1 entries */ 1252 booke206_invalidate_ea_tlb(env, 1, address); 1253 CPU_FOREACH(cs) { 1254 tlb_flush(cs); 1255 } 1256 } else { 1257 /* flush TLB0 entries */ 1258 booke206_invalidate_ea_tlb(env, 0, address); 1259 CPU_FOREACH(cs) { 1260 tlb_flush_page(cs, address & MAS2_EPN_MASK); 1261 } 1262 } 1263 } 1264 1265 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address) 1266 { 1267 /* XXX missing LPID handling */ 1268 booke206_flush_tlb(env, -1, 1); 1269 } 1270 1271 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address) 1272 { 1273 int i, j; 1274 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 1275 ppcmas_tlb_t *tlb = env->tlb.tlbm; 1276 int tlb_size; 1277 1278 /* XXX missing LPID handling */ 1279 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1280 tlb_size = booke206_tlb_size(env, i); 1281 for (j = 0; j < tlb_size; j++) { 1282 if (!(tlb[j].mas1 & MAS1_IPROT) && 1283 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) { 1284 tlb[j].mas1 &= ~MAS1_VALID; 1285 } 1286 } 1287 tlb += booke206_tlb_size(env, i); 1288 } 1289 tlb_flush(env_cpu(env)); 1290 } 1291 1292 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address) 1293 { 1294 int i, j; 1295 ppcmas_tlb_t *tlb; 1296 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 1297 int pid = tid >> MAS6_SPID_SHIFT; 1298 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS; 1299 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0; 1300 /* XXX check for unsupported isize and raise an invalid opcode then */ 1301 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK; 1302 /* XXX implement MAV2 handling */ 1303 bool mav2 = false; 1304 1305 /* XXX missing LPID handling */ 1306 /* flush by pid and ea */ 1307 for (i = 0; i < BOOKE206_MAX_TLBN; i++) { 1308 int ways = booke206_tlb_ways(env, i); 1309 1310 for (j = 0; j < ways; j++) { 1311 tlb = booke206_get_tlbm(env, i, address, j); 1312 if (!tlb) { 1313 continue; 1314 } 1315 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) || 1316 (tlb->mas1 & MAS1_IPROT) || 1317 ((tlb->mas1 & MAS1_IND) != ind) || 1318 ((tlb->mas8 & MAS8_TGS) != sgs)) { 1319 continue; 1320 } 1321 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) { 1322 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */ 1323 continue; 1324 } 1325 /* XXX e500mc doesn't match SAS, but other cores might */ 1326 tlb->mas1 &= ~MAS1_VALID; 1327 } 1328 } 1329 tlb_flush(env_cpu(env)); 1330 } 1331 1332 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type) 1333 { 1334 int flags = 0; 1335 1336 if (type & 2) { 1337 flags |= BOOKE206_FLUSH_TLB1; 1338 } 1339 1340 if (type & 4) { 1341 flags |= BOOKE206_FLUSH_TLB0; 1342 } 1343 1344 booke206_flush_tlb(env, flags, 1); 1345 } 1346 1347 1348 void helper_check_tlb_flush_local(CPUPPCState *env) 1349 { 1350 check_tlb_flush(env, false); 1351 } 1352 1353 void helper_check_tlb_flush_global(CPUPPCState *env) 1354 { 1355 check_tlb_flush(env, true); 1356 } 1357 1358 1359 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size, 1360 MMUAccessType access_type, int mmu_idx, 1361 bool probe, uintptr_t retaddr) 1362 { 1363 PowerPCCPU *cpu = POWERPC_CPU(cs); 1364 hwaddr raddr; 1365 int page_size, prot; 1366 1367 if (ppc_xlate(cpu, eaddr, access_type, &raddr, 1368 &page_size, &prot, mmu_idx, !probe)) { 1369 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 1370 prot, mmu_idx, 1UL << page_size); 1371 return true; 1372 } 1373 if (probe) { 1374 return false; 1375 } 1376 raise_exception_err_ra(&cpu->env, cs->exception_index, 1377 cpu->env.error_code, retaddr); 1378 } 1379