1 /*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "sysemu/kvm.h"
24 #include "kvm_ppc.h"
25 #include "mmu-hash64.h"
26 #include "mmu-hash32.h"
27 #include "exec/exec-all.h"
28 #include "exec/page-protection.h"
29 #include "exec/log.h"
30 #include "helper_regs.h"
31 #include "qemu/error-report.h"
32 #include "qemu/qemu-print.h"
33 #include "internal.h"
34 #include "mmu-book3s-v3.h"
35 #include "mmu-radix64.h"
36 #include "exec/helper-proto.h"
37 #include "exec/cpu_ldst.h"
38
39 /* #define FLUSH_ALL_TLBS */
40
41 /*****************************************************************************/
42 /* PowerPC MMU emulation */
43
44 /* Software driven TLB helpers */
ppc6xx_tlb_invalidate_all(CPUPPCState * env)45 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
46 {
47 ppc6xx_tlb_t *tlb;
48 int nr, max;
49
50 /* LOG_SWTLB("Invalidate all TLBs\n"); */
51 /* Invalidate all defined software TLB */
52 max = env->nb_tlb;
53 if (env->id_tlbs == 1) {
54 max *= 2;
55 }
56 for (nr = 0; nr < max; nr++) {
57 tlb = &env->tlb.tlb6[nr];
58 pte_invalidate(&tlb->pte0);
59 }
60 tlb_flush(env_cpu(env));
61 }
62
ppc6xx_tlb_invalidate_virt2(CPUPPCState * env,target_ulong eaddr,int is_code,int match_epn)63 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
64 target_ulong eaddr,
65 int is_code, int match_epn)
66 {
67 #if !defined(FLUSH_ALL_TLBS)
68 CPUState *cs = env_cpu(env);
69 ppc6xx_tlb_t *tlb;
70 int way, nr;
71
72 /* Invalidate ITLB + DTLB, all ways */
73 for (way = 0; way < env->nb_ways; way++) {
74 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
75 tlb = &env->tlb.tlb6[nr];
76 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
77 qemu_log_mask(CPU_LOG_MMU, "TLB invalidate %d/%d "
78 TARGET_FMT_lx "\n", nr, env->nb_tlb, eaddr);
79 pte_invalidate(&tlb->pte0);
80 tlb_flush_page(cs, tlb->EPN);
81 }
82 }
83 #else
84 /* XXX: PowerPC specification say this is valid as well */
85 ppc6xx_tlb_invalidate_all(env);
86 #endif
87 }
88
ppc6xx_tlb_invalidate_virt(CPUPPCState * env,target_ulong eaddr,int is_code)89 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env,
90 target_ulong eaddr, int is_code)
91 {
92 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0);
93 }
94
ppc6xx_tlb_store(CPUPPCState * env,target_ulong EPN,int way,int is_code,target_ulong pte0,target_ulong pte1)95 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
96 int is_code, target_ulong pte0, target_ulong pte1)
97 {
98 ppc6xx_tlb_t *tlb;
99 int nr;
100
101 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
102 tlb = &env->tlb.tlb6[nr];
103 qemu_log_mask(CPU_LOG_MMU, "Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 "
104 TARGET_FMT_lx " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb,
105 EPN, pte0, pte1);
106 /* Invalidate any pending reference in QEMU for this virtual address */
107 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
108 tlb->pte0 = pte0;
109 tlb->pte1 = pte1;
110 tlb->EPN = EPN;
111 /* Store last way for LRU mechanism */
112 env->last_way = way;
113 }
114
115 /* Helpers specific to PowerPC 40x implementations */
ppc4xx_tlb_invalidate_all(CPUPPCState * env)116 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
117 {
118 ppcemb_tlb_t *tlb;
119 int i;
120
121 for (i = 0; i < env->nb_tlb; i++) {
122 tlb = &env->tlb.tlbe[i];
123 tlb->prot &= ~PAGE_VALID;
124 }
125 tlb_flush(env_cpu(env));
126 }
127
booke206_flush_tlb(CPUPPCState * env,int flags,const int check_iprot)128 static void booke206_flush_tlb(CPUPPCState *env, int flags,
129 const int check_iprot)
130 {
131 int tlb_size;
132 int i, j;
133 ppcmas_tlb_t *tlb = env->tlb.tlbm;
134
135 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
136 if (flags & (1 << i)) {
137 tlb_size = booke206_tlb_size(env, i);
138 for (j = 0; j < tlb_size; j++) {
139 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) {
140 tlb[j].mas1 &= ~MAS1_VALID;
141 }
142 }
143 }
144 tlb += booke206_tlb_size(env, i);
145 }
146
147 tlb_flush(env_cpu(env));
148 }
149
150 /*****************************************************************************/
151 /* BATs management */
152 #if !defined(FLUSH_ALL_TLBS)
do_invalidate_BAT(CPUPPCState * env,target_ulong BATu,target_ulong mask)153 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
154 target_ulong mask)
155 {
156 CPUState *cs = env_cpu(env);
157 target_ulong base, end, page;
158
159 base = BATu & ~0x0001FFFF;
160 end = base + mask + 0x00020000;
161 if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
162 /* Flushing 1024 4K pages is slower than a complete flush */
163 qemu_log_mask(CPU_LOG_MMU, "Flush all BATs\n");
164 tlb_flush(cs);
165 qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
166 return;
167 }
168 qemu_log_mask(CPU_LOG_MMU, "Flush BAT from " TARGET_FMT_lx
169 " to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
170 base, end, mask);
171 for (page = base; page != end; page += TARGET_PAGE_SIZE) {
172 tlb_flush_page(cs, page);
173 }
174 qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
175 }
176 #endif
177
dump_store_bat(CPUPPCState * env,char ID,int ul,int nr,target_ulong value)178 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
179 target_ulong value)
180 {
181 qemu_log_mask(CPU_LOG_MMU, "Set %cBAT%d%c to " TARGET_FMT_lx " ("
182 TARGET_FMT_lx ")\n", ID, nr, ul == 0 ? 'u' : 'l',
183 value, env->nip);
184 }
185
helper_store_ibatu(CPUPPCState * env,uint32_t nr,target_ulong value)186 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
187 {
188 target_ulong mask;
189
190 dump_store_bat(env, 'I', 0, nr, value);
191 if (env->IBAT[0][nr] != value) {
192 mask = (value << 15) & 0x0FFE0000UL;
193 #if !defined(FLUSH_ALL_TLBS)
194 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
195 #endif
196 /*
197 * When storing valid upper BAT, mask BEPI and BRPN and
198 * invalidate all TLBs covered by this BAT
199 */
200 mask = (value << 15) & 0x0FFE0000UL;
201 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
202 (value & ~0x0001FFFFUL & ~mask);
203 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
204 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
205 #if !defined(FLUSH_ALL_TLBS)
206 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
207 #else
208 tlb_flush(env_cpu(env));
209 #endif
210 }
211 }
212
helper_store_ibatl(CPUPPCState * env,uint32_t nr,target_ulong value)213 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value)
214 {
215 dump_store_bat(env, 'I', 1, nr, value);
216 env->IBAT[1][nr] = value;
217 }
218
helper_store_dbatu(CPUPPCState * env,uint32_t nr,target_ulong value)219 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
220 {
221 target_ulong mask;
222
223 dump_store_bat(env, 'D', 0, nr, value);
224 if (env->DBAT[0][nr] != value) {
225 /*
226 * When storing valid upper BAT, mask BEPI and BRPN and
227 * invalidate all TLBs covered by this BAT
228 */
229 mask = (value << 15) & 0x0FFE0000UL;
230 #if !defined(FLUSH_ALL_TLBS)
231 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
232 #endif
233 mask = (value << 15) & 0x0FFE0000UL;
234 env->DBAT[0][nr] = (value & 0x00001FFFUL) |
235 (value & ~0x0001FFFFUL & ~mask);
236 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
237 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
238 #if !defined(FLUSH_ALL_TLBS)
239 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
240 #else
241 tlb_flush(env_cpu(env));
242 #endif
243 }
244 }
245
helper_store_dbatl(CPUPPCState * env,uint32_t nr,target_ulong value)246 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
247 {
248 dump_store_bat(env, 'D', 1, nr, value);
249 env->DBAT[1][nr] = value;
250 }
251
252 /*****************************************************************************/
253 /* TLB management */
ppc_tlb_invalidate_all(CPUPPCState * env)254 void ppc_tlb_invalidate_all(CPUPPCState *env)
255 {
256 #if defined(TARGET_PPC64)
257 if (mmu_is_64bit(env->mmu_model)) {
258 env->tlb_need_flush = 0;
259 tlb_flush(env_cpu(env));
260 } else
261 #endif /* defined(TARGET_PPC64) */
262 switch (env->mmu_model) {
263 case POWERPC_MMU_SOFT_6xx:
264 ppc6xx_tlb_invalidate_all(env);
265 break;
266 case POWERPC_MMU_SOFT_4xx:
267 ppc4xx_tlb_invalidate_all(env);
268 break;
269 case POWERPC_MMU_REAL:
270 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n");
271 break;
272 case POWERPC_MMU_MPC8xx:
273 /* XXX: TODO */
274 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
275 break;
276 case POWERPC_MMU_BOOKE:
277 tlb_flush(env_cpu(env));
278 break;
279 case POWERPC_MMU_BOOKE206:
280 booke206_flush_tlb(env, -1, 0);
281 break;
282 case POWERPC_MMU_32B:
283 env->tlb_need_flush = 0;
284 tlb_flush(env_cpu(env));
285 break;
286 default:
287 /* XXX: TODO */
288 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model);
289 break;
290 }
291 }
292
ppc_tlb_invalidate_one(CPUPPCState * env,target_ulong addr)293 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
294 {
295 #if !defined(FLUSH_ALL_TLBS)
296 addr &= TARGET_PAGE_MASK;
297 #if defined(TARGET_PPC64)
298 if (mmu_is_64bit(env->mmu_model)) {
299 /* tlbie invalidate TLBs for all segments */
300 /*
301 * XXX: given the fact that there are too many segments to invalidate,
302 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
303 * we just invalidate all TLBs
304 */
305 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
306 } else
307 #endif /* defined(TARGET_PPC64) */
308 switch (env->mmu_model) {
309 case POWERPC_MMU_SOFT_6xx:
310 ppc6xx_tlb_invalidate_virt(env, addr, 0);
311 if (env->id_tlbs == 1) {
312 ppc6xx_tlb_invalidate_virt(env, addr, 1);
313 }
314 break;
315 case POWERPC_MMU_32B:
316 /*
317 * Actual CPUs invalidate entire congruence classes based on
318 * the geometry of their TLBs and some OSes take that into
319 * account, we just mark the TLB to be flushed later (context
320 * synchronizing event or sync instruction on 32-bit).
321 */
322 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
323 break;
324 default:
325 /* Should never reach here with other MMU models */
326 assert(0);
327 }
328 #else
329 ppc_tlb_invalidate_all(env);
330 #endif
331 }
332
333 /*****************************************************************************/
334 /* Special registers manipulation */
335
336 /* Segment registers load and store */
helper_load_sr(CPUPPCState * env,target_ulong sr_num)337 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
338 {
339 #if defined(TARGET_PPC64)
340 if (mmu_is_64bit(env->mmu_model)) {
341 /* XXX */
342 return 0;
343 }
344 #endif
345 return env->sr[sr_num];
346 }
347
helper_store_sr(CPUPPCState * env,target_ulong srnum,target_ulong value)348 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
349 {
350 qemu_log_mask(CPU_LOG_MMU,
351 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
352 (int)srnum, value, env->sr[srnum]);
353 #if defined(TARGET_PPC64)
354 if (mmu_is_64bit(env->mmu_model)) {
355 PowerPCCPU *cpu = env_archcpu(env);
356 uint64_t esid, vsid;
357
358 /* ESID = srnum */
359 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
360
361 /* VSID = VSID */
362 vsid = (value & 0xfffffff) << 12;
363 /* flags = flags */
364 vsid |= ((value >> 27) & 0xf) << 8;
365
366 ppc_store_slb(cpu, srnum, esid, vsid);
367 } else
368 #endif
369 if (env->sr[srnum] != value) {
370 env->sr[srnum] = value;
371 /*
372 * Invalidating 256MB of virtual memory in 4kB pages is way
373 * longer than flushing the whole TLB.
374 */
375 #if !defined(FLUSH_ALL_TLBS) && 0
376 {
377 target_ulong page, end;
378 /* Invalidate 256 MB of virtual memory */
379 page = (16 << 20) * srnum;
380 end = page + (16 << 20);
381 for (; page != end; page += TARGET_PAGE_SIZE) {
382 tlb_flush_page(env_cpu(env), page);
383 }
384 }
385 #else
386 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
387 #endif
388 }
389 }
390
391 /* TLB management */
helper_tlbia(CPUPPCState * env)392 void helper_tlbia(CPUPPCState *env)
393 {
394 ppc_tlb_invalidate_all(env);
395 }
396
helper_tlbie(CPUPPCState * env,target_ulong addr)397 void helper_tlbie(CPUPPCState *env, target_ulong addr)
398 {
399 ppc_tlb_invalidate_one(env, addr);
400 }
401
402 #if defined(TARGET_PPC64)
403
404 /* Invalidation Selector */
405 #define TLBIE_IS_VA 0
406 #define TLBIE_IS_PID 1
407 #define TLBIE_IS_LPID 2
408 #define TLBIE_IS_ALL 3
409
410 /* Radix Invalidation Control */
411 #define TLBIE_RIC_TLB 0
412 #define TLBIE_RIC_PWC 1
413 #define TLBIE_RIC_ALL 2
414 #define TLBIE_RIC_GRP 3
415
416 /* Radix Actual Page sizes */
417 #define TLBIE_R_AP_4K 0
418 #define TLBIE_R_AP_64K 5
419 #define TLBIE_R_AP_2M 1
420 #define TLBIE_R_AP_1G 2
421
422 /* RB field masks */
423 #define TLBIE_RB_EPN_MASK PPC_BITMASK(0, 51)
424 #define TLBIE_RB_IS_MASK PPC_BITMASK(52, 53)
425 #define TLBIE_RB_AP_MASK PPC_BITMASK(56, 58)
426
helper_tlbie_isa300(CPUPPCState * env,target_ulong rb,target_ulong rs,uint32_t flags)427 void helper_tlbie_isa300(CPUPPCState *env, target_ulong rb, target_ulong rs,
428 uint32_t flags)
429 {
430 unsigned ric = (flags & TLBIE_F_RIC_MASK) >> TLBIE_F_RIC_SHIFT;
431 /*
432 * With the exception of the checks for invalid instruction forms,
433 * PRS is currently ignored, because we don't know if a given TLB entry
434 * is process or partition scoped.
435 */
436 bool prs = flags & TLBIE_F_PRS;
437 bool r = flags & TLBIE_F_R;
438 bool local = flags & TLBIE_F_LOCAL;
439 bool effR;
440 unsigned is = extract64(rb, PPC_BIT_NR(53), 2);
441 unsigned ap; /* actual page size */
442 target_ulong addr, pgoffs_mask;
443
444 qemu_log_mask(CPU_LOG_MMU,
445 "%s: local=%d addr=" TARGET_FMT_lx " ric=%u prs=%d r=%d is=%u\n",
446 __func__, local, rb & TARGET_PAGE_MASK, ric, prs, r, is);
447
448 effR = FIELD_EX64(env->msr, MSR, HV) ? r : env->spr[SPR_LPCR] & LPCR_HR;
449
450 /* Partial TLB invalidation is supported for Radix only for now. */
451 if (!effR) {
452 goto inval_all;
453 }
454
455 /* Check for invalid instruction forms (effR=1). */
456 if (unlikely(ric == TLBIE_RIC_GRP ||
457 ((ric == TLBIE_RIC_PWC || ric == TLBIE_RIC_ALL) &&
458 is == TLBIE_IS_VA) ||
459 (!prs && is == TLBIE_IS_PID))) {
460 qemu_log_mask(LOG_GUEST_ERROR,
461 "%s: invalid instruction form: ric=%u prs=%d r=%d is=%u\n",
462 __func__, ric, prs, r, is);
463 goto invalid;
464 }
465
466 /* We don't cache Page Walks. */
467 if (ric == TLBIE_RIC_PWC) {
468 if (local) {
469 unsigned set = extract64(rb, PPC_BIT_NR(51), 12);
470 if (set != 0) {
471 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid set: %d\n",
472 __func__, set);
473 goto invalid;
474 }
475 }
476 return;
477 }
478
479 /*
480 * Invalidation by LPID or PID is not supported, so fallback
481 * to full TLB flush in these cases.
482 */
483 if (is != TLBIE_IS_VA) {
484 goto inval_all;
485 }
486
487 /*
488 * The results of an attempt to invalidate a translation outside of
489 * quadrant 0 for Radix Tree translation (effR=1, RIC=0, PRS=1, IS=0,
490 * and EA 0:1 != 0b00) are boundedly undefined.
491 */
492 if (unlikely(ric == TLBIE_RIC_TLB && prs && is == TLBIE_IS_VA &&
493 (rb & R_EADDR_QUADRANT) != R_EADDR_QUADRANT0)) {
494 qemu_log_mask(LOG_GUEST_ERROR,
495 "%s: attempt to invalidate a translation outside of quadrant 0\n",
496 __func__);
497 goto inval_all;
498 }
499
500 assert(is == TLBIE_IS_VA);
501 assert(ric == TLBIE_RIC_TLB || ric == TLBIE_RIC_ALL);
502
503 ap = extract64(rb, PPC_BIT_NR(58), 3);
504 switch (ap) {
505 case TLBIE_R_AP_4K:
506 pgoffs_mask = 0xfffull;
507 break;
508
509 case TLBIE_R_AP_64K:
510 pgoffs_mask = 0xffffull;
511 break;
512
513 case TLBIE_R_AP_2M:
514 pgoffs_mask = 0x1fffffull;
515 break;
516
517 case TLBIE_R_AP_1G:
518 pgoffs_mask = 0x3fffffffull;
519 break;
520
521 default:
522 /*
523 * If the value specified in RS 0:31, RS 32:63, RB 54:55, RB 56:58,
524 * RB 44:51, or RB 56:63, when it is needed to perform the specified
525 * operation, is not supported by the implementation, the instruction
526 * is treated as if the instruction form were invalid.
527 */
528 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid AP: %d\n", __func__, ap);
529 goto invalid;
530 }
531
532 addr = rb & TLBIE_RB_EPN_MASK & ~pgoffs_mask;
533
534 if (local) {
535 tlb_flush_page(env_cpu(env), addr);
536 } else {
537 tlb_flush_page_all_cpus(env_cpu(env), addr);
538 }
539 return;
540
541 inval_all:
542 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
543 if (!local) {
544 env->tlb_need_flush |= TLB_NEED_GLOBAL_FLUSH;
545 }
546 return;
547
548 invalid:
549 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
550 POWERPC_EXCP_INVAL |
551 POWERPC_EXCP_INVAL_INVAL, GETPC());
552 }
553
554 #endif
555
helper_tlbiva(CPUPPCState * env,target_ulong addr)556 void helper_tlbiva(CPUPPCState *env, target_ulong addr)
557 {
558 /* tlbiva instruction only exists on BookE */
559 assert(env->mmu_model == POWERPC_MMU_BOOKE);
560 /* XXX: TODO */
561 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n");
562 }
563
564 /* Software driven TLBs management */
565 /* PowerPC 602/603 software TLB load instructions helpers */
do_6xx_tlb(CPUPPCState * env,target_ulong new_EPN,int is_code)566 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
567 {
568 target_ulong RPN, CMP, EPN;
569 int way;
570
571 RPN = env->spr[SPR_RPA];
572 if (is_code) {
573 CMP = env->spr[SPR_ICMP];
574 EPN = env->spr[SPR_IMISS];
575 } else {
576 CMP = env->spr[SPR_DCMP];
577 EPN = env->spr[SPR_DMISS];
578 }
579 way = (env->spr[SPR_SRR1] >> 17) & 1;
580 (void)EPN; /* avoid a compiler warning */
581 qemu_log_mask(CPU_LOG_MMU, "%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx
582 " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n",
583 __func__, new_EPN, EPN, CMP, RPN, way);
584 /* Store this TLB */
585 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
586 way, is_code, CMP, RPN);
587 }
588
helper_6xx_tlbd(CPUPPCState * env,target_ulong EPN)589 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN)
590 {
591 do_6xx_tlb(env, EPN, 0);
592 }
593
helper_6xx_tlbi(CPUPPCState * env,target_ulong EPN)594 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
595 {
596 do_6xx_tlb(env, EPN, 1);
597 }
598
599 /*****************************************************************************/
600 /* PowerPC 601 specific instructions (POWER bridge) */
601
helper_rac(CPUPPCState * env,target_ulong addr)602 target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
603 {
604 mmu_ctx_t ctx;
605 int nb_BATs;
606 target_ulong ret = 0;
607
608 /*
609 * We don't have to generate many instances of this instruction,
610 * as rac is supervisor only.
611 *
612 * XXX: FIX THIS: Pretend we have no BAT
613 */
614 nb_BATs = env->nb_BATs;
615 env->nb_BATs = 0;
616 if (get_physical_address_wtlb(env, &ctx, addr, 0, ACCESS_INT, 0) == 0) {
617 ret = ctx.raddr;
618 }
619 env->nb_BATs = nb_BATs;
620 return ret;
621 }
622
booke_tlb_to_page_size(int size)623 static inline target_ulong booke_tlb_to_page_size(int size)
624 {
625 return 1024 << (2 * size);
626 }
627
booke_page_size_to_tlb(target_ulong page_size)628 static inline int booke_page_size_to_tlb(target_ulong page_size)
629 {
630 int size;
631
632 switch (page_size) {
633 case 0x00000400UL:
634 size = 0x0;
635 break;
636 case 0x00001000UL:
637 size = 0x1;
638 break;
639 case 0x00004000UL:
640 size = 0x2;
641 break;
642 case 0x00010000UL:
643 size = 0x3;
644 break;
645 case 0x00040000UL:
646 size = 0x4;
647 break;
648 case 0x00100000UL:
649 size = 0x5;
650 break;
651 case 0x00400000UL:
652 size = 0x6;
653 break;
654 case 0x01000000UL:
655 size = 0x7;
656 break;
657 case 0x04000000UL:
658 size = 0x8;
659 break;
660 case 0x10000000UL:
661 size = 0x9;
662 break;
663 case 0x40000000UL:
664 size = 0xA;
665 break;
666 #if defined(TARGET_PPC64)
667 case 0x000100000000ULL:
668 size = 0xB;
669 break;
670 case 0x000400000000ULL:
671 size = 0xC;
672 break;
673 case 0x001000000000ULL:
674 size = 0xD;
675 break;
676 case 0x004000000000ULL:
677 size = 0xE;
678 break;
679 case 0x010000000000ULL:
680 size = 0xF;
681 break;
682 #endif
683 default:
684 size = -1;
685 break;
686 }
687
688 return size;
689 }
690
691 /* Helpers for 4xx TLB management */
692 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
693
694 #define PPC4XX_TLBHI_V 0x00000040
695 #define PPC4XX_TLBHI_E 0x00000020
696 #define PPC4XX_TLBHI_SIZE_MIN 0
697 #define PPC4XX_TLBHI_SIZE_MAX 7
698 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
699 #define PPC4XX_TLBHI_SIZE_SHIFT 7
700 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
701
702 #define PPC4XX_TLBLO_EX 0x00000200
703 #define PPC4XX_TLBLO_WR 0x00000100
704 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
705 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
706
helper_store_40x_pid(CPUPPCState * env,target_ulong val)707 void helper_store_40x_pid(CPUPPCState *env, target_ulong val)
708 {
709 if (env->spr[SPR_40x_PID] != val) {
710 env->spr[SPR_40x_PID] = val;
711 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
712 }
713 }
714
helper_4xx_tlbre_hi(CPUPPCState * env,target_ulong entry)715 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
716 {
717 ppcemb_tlb_t *tlb;
718 target_ulong ret;
719 int size;
720
721 entry &= PPC4XX_TLB_ENTRY_MASK;
722 tlb = &env->tlb.tlbe[entry];
723 ret = tlb->EPN;
724 if (tlb->prot & PAGE_VALID) {
725 ret |= PPC4XX_TLBHI_V;
726 }
727 size = booke_page_size_to_tlb(tlb->size);
728 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
729 size = PPC4XX_TLBHI_SIZE_DEFAULT;
730 }
731 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
732 helper_store_40x_pid(env, tlb->PID);
733 return ret;
734 }
735
helper_4xx_tlbre_lo(CPUPPCState * env,target_ulong entry)736 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
737 {
738 ppcemb_tlb_t *tlb;
739 target_ulong ret;
740
741 entry &= PPC4XX_TLB_ENTRY_MASK;
742 tlb = &env->tlb.tlbe[entry];
743 ret = tlb->RPN;
744 if (tlb->prot & PAGE_EXEC) {
745 ret |= PPC4XX_TLBLO_EX;
746 }
747 if (tlb->prot & PAGE_WRITE) {
748 ret |= PPC4XX_TLBLO_WR;
749 }
750 return ret;
751 }
752
ppcemb_tlb_flush(CPUState * cs,ppcemb_tlb_t * tlb)753 static void ppcemb_tlb_flush(CPUState *cs, ppcemb_tlb_t *tlb)
754 {
755 unsigned mmu_idx = 0;
756
757 if (tlb->prot & 0xf) {
758 mmu_idx |= 0x1;
759 }
760 if ((tlb->prot >> 4) & 0xf) {
761 mmu_idx |= 0x2;
762 }
763 if (tlb->attr & 1) {
764 mmu_idx <<= 2;
765 }
766
767 tlb_flush_range_by_mmuidx(cs, tlb->EPN, tlb->size, mmu_idx,
768 TARGET_LONG_BITS);
769 }
770
helper_4xx_tlbwe_hi(CPUPPCState * env,target_ulong entry,target_ulong val)771 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
772 target_ulong val)
773 {
774 CPUState *cs = env_cpu(env);
775 ppcemb_tlb_t *tlb;
776
777 qemu_log_mask(CPU_LOG_MMU, "%s entry %d val " TARGET_FMT_lx "\n",
778 __func__, (int)entry,
779 val);
780 entry &= PPC4XX_TLB_ENTRY_MASK;
781 tlb = &env->tlb.tlbe[entry];
782 /* Invalidate previous TLB (if it's valid) */
783 if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) {
784 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
785 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
786 (int)entry, tlb->EPN, tlb->EPN + tlb->size);
787 ppcemb_tlb_flush(cs, tlb);
788 }
789 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
790 & PPC4XX_TLBHI_SIZE_MASK);
791 /*
792 * We cannot handle TLB size < TARGET_PAGE_SIZE.
793 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
794 */
795 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
796 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u "
797 "are not supported (%d)\n"
798 "Please implement TARGET_PAGE_BITS_VARY\n",
799 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
800 }
801 tlb->EPN = val & ~(tlb->size - 1);
802 if (val & PPC4XX_TLBHI_V) {
803 tlb->prot |= PAGE_VALID;
804 if (val & PPC4XX_TLBHI_E) {
805 /* XXX: TO BE FIXED */
806 cpu_abort(cs,
807 "Little-endian TLB entries are not supported by now\n");
808 }
809 } else {
810 tlb->prot &= ~PAGE_VALID;
811 }
812 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
813 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
814 " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx
815 " prot %c%c%c%c PID %d\n", __func__,
816 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
817 tlb->prot & PAGE_READ ? 'r' : '-',
818 tlb->prot & PAGE_WRITE ? 'w' : '-',
819 tlb->prot & PAGE_EXEC ? 'x' : '-',
820 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
821 }
822
helper_4xx_tlbwe_lo(CPUPPCState * env,target_ulong entry,target_ulong val)823 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
824 target_ulong val)
825 {
826 CPUState *cs = env_cpu(env);
827 ppcemb_tlb_t *tlb;
828
829 qemu_log_mask(CPU_LOG_MMU, "%s entry %i val " TARGET_FMT_lx "\n",
830 __func__, (int)entry, val);
831 entry &= PPC4XX_TLB_ENTRY_MASK;
832 tlb = &env->tlb.tlbe[entry];
833 /* Invalidate previous TLB (if it's valid) */
834 if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) {
835 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
836 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
837 (int)entry, tlb->EPN, tlb->EPN + tlb->size);
838 ppcemb_tlb_flush(cs, tlb);
839 }
840 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
841 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
842 tlb->prot = PAGE_READ;
843 if (val & PPC4XX_TLBLO_EX) {
844 tlb->prot |= PAGE_EXEC;
845 }
846 if (val & PPC4XX_TLBLO_WR) {
847 tlb->prot |= PAGE_WRITE;
848 }
849 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
850 " EPN " TARGET_FMT_lx
851 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
852 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
853 tlb->prot & PAGE_READ ? 'r' : '-',
854 tlb->prot & PAGE_WRITE ? 'w' : '-',
855 tlb->prot & PAGE_EXEC ? 'x' : '-',
856 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
857 }
858
helper_4xx_tlbsx(CPUPPCState * env,target_ulong address)859 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
860 {
861 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
862 }
863
mmubooke_pid_match(CPUPPCState * env,ppcemb_tlb_t * tlb)864 static bool mmubooke_pid_match(CPUPPCState *env, ppcemb_tlb_t *tlb)
865 {
866 if (tlb->PID == env->spr[SPR_BOOKE_PID]) {
867 return true;
868 }
869 if (!env->nb_pids) {
870 return false;
871 }
872
873 if (env->spr[SPR_BOOKE_PID1] && tlb->PID == env->spr[SPR_BOOKE_PID1]) {
874 return true;
875 }
876 if (env->spr[SPR_BOOKE_PID2] && tlb->PID == env->spr[SPR_BOOKE_PID2]) {
877 return true;
878 }
879
880 return false;
881 }
882
883 /* PowerPC 440 TLB management */
helper_440_tlbwe(CPUPPCState * env,uint32_t word,target_ulong entry,target_ulong value)884 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
885 target_ulong value)
886 {
887 ppcemb_tlb_t *tlb;
888
889 qemu_log_mask(CPU_LOG_MMU, "%s word %d entry %d value " TARGET_FMT_lx "\n",
890 __func__, word, (int)entry, value);
891 entry &= 0x3F;
892 tlb = &env->tlb.tlbe[entry];
893
894 /* Invalidate previous TLB (if it's valid) */
895 if ((tlb->prot & PAGE_VALID) && mmubooke_pid_match(env, tlb)) {
896 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
897 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
898 (int)entry, tlb->EPN, tlb->EPN + tlb->size);
899 ppcemb_tlb_flush(env_cpu(env), tlb);
900 }
901
902 switch (word) {
903 default:
904 /* Just here to please gcc */
905 case 0:
906 tlb->EPN = value & 0xFFFFFC00;
907 tlb->size = booke_tlb_to_page_size((value >> 4) & 0xF);
908 tlb->attr &= ~0x1;
909 tlb->attr |= (value >> 8) & 1;
910 if (value & 0x200) {
911 tlb->prot |= PAGE_VALID;
912 } else {
913 tlb->prot &= ~PAGE_VALID;
914 }
915 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
916 break;
917 case 1:
918 tlb->RPN = value & 0xFFFFFC0F;
919 break;
920 case 2:
921 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
922 tlb->prot = tlb->prot & PAGE_VALID;
923 if (value & 0x1) {
924 tlb->prot |= PAGE_READ << 4;
925 }
926 if (value & 0x2) {
927 tlb->prot |= PAGE_WRITE << 4;
928 }
929 if (value & 0x4) {
930 tlb->prot |= PAGE_EXEC << 4;
931 }
932 if (value & 0x8) {
933 tlb->prot |= PAGE_READ;
934 }
935 if (value & 0x10) {
936 tlb->prot |= PAGE_WRITE;
937 }
938 if (value & 0x20) {
939 tlb->prot |= PAGE_EXEC;
940 }
941 break;
942 }
943 }
944
helper_440_tlbre(CPUPPCState * env,uint32_t word,target_ulong entry)945 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word,
946 target_ulong entry)
947 {
948 ppcemb_tlb_t *tlb;
949 target_ulong ret;
950 int size;
951
952 entry &= 0x3F;
953 tlb = &env->tlb.tlbe[entry];
954 switch (word) {
955 default:
956 /* Just here to please gcc */
957 case 0:
958 ret = tlb->EPN;
959 size = booke_page_size_to_tlb(tlb->size);
960 if (size < 0 || size > 0xF) {
961 size = 1;
962 }
963 ret |= size << 4;
964 if (tlb->attr & 0x1) {
965 ret |= 0x100;
966 }
967 if (tlb->prot & PAGE_VALID) {
968 ret |= 0x200;
969 }
970 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
971 env->spr[SPR_440_MMUCR] |= tlb->PID;
972 break;
973 case 1:
974 ret = tlb->RPN;
975 break;
976 case 2:
977 ret = tlb->attr & ~0x1;
978 if (tlb->prot & (PAGE_READ << 4)) {
979 ret |= 0x1;
980 }
981 if (tlb->prot & (PAGE_WRITE << 4)) {
982 ret |= 0x2;
983 }
984 if (tlb->prot & (PAGE_EXEC << 4)) {
985 ret |= 0x4;
986 }
987 if (tlb->prot & PAGE_READ) {
988 ret |= 0x8;
989 }
990 if (tlb->prot & PAGE_WRITE) {
991 ret |= 0x10;
992 }
993 if (tlb->prot & PAGE_EXEC) {
994 ret |= 0x20;
995 }
996 break;
997 }
998 return ret;
999 }
1000
helper_440_tlbsx(CPUPPCState * env,target_ulong address)1001 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
1002 {
1003 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
1004 }
1005
1006 /* PowerPC BookE 2.06 TLB management */
1007
booke206_cur_tlb(CPUPPCState * env)1008 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
1009 {
1010 uint32_t tlbncfg = 0;
1011 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
1012 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
1013 int tlb;
1014
1015 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
1016 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
1017
1018 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
1019 cpu_abort(env_cpu(env), "we don't support HES yet\n");
1020 }
1021
1022 return booke206_get_tlbm(env, tlb, ea, esel);
1023 }
1024
helper_booke_setpid(CPUPPCState * env,uint32_t pidn,target_ulong pid)1025 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
1026 {
1027 env->spr[pidn] = pid;
1028 /* changing PIDs mean we're in a different address space now */
1029 tlb_flush(env_cpu(env));
1030 }
1031
helper_booke_set_eplc(CPUPPCState * env,target_ulong val)1032 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val)
1033 {
1034 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK;
1035 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD);
1036 }
helper_booke_set_epsc(CPUPPCState * env,target_ulong val)1037 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val)
1038 {
1039 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK;
1040 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE);
1041 }
1042
flush_page(CPUPPCState * env,ppcmas_tlb_t * tlb)1043 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb)
1044 {
1045 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
1046 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK);
1047 } else {
1048 tlb_flush(env_cpu(env));
1049 }
1050 }
1051
helper_booke206_tlbwe(CPUPPCState * env)1052 void helper_booke206_tlbwe(CPUPPCState *env)
1053 {
1054 uint32_t tlbncfg, tlbn;
1055 ppcmas_tlb_t *tlb;
1056 uint32_t size_tlb, size_ps;
1057 target_ulong mask;
1058
1059
1060 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
1061 case MAS0_WQ_ALWAYS:
1062 /* good to go, write that entry */
1063 break;
1064 case MAS0_WQ_COND:
1065 /* XXX check if reserved */
1066 if (0) {
1067 return;
1068 }
1069 break;
1070 case MAS0_WQ_CLR_RSRV:
1071 /* XXX clear entry */
1072 return;
1073 default:
1074 /* no idea what to do */
1075 return;
1076 }
1077
1078 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
1079 !FIELD_EX64(env->msr, MSR, GS)) {
1080 /* XXX we don't support direct LRAT setting yet */
1081 fprintf(stderr, "cpu: don't support LRAT setting yet\n");
1082 return;
1083 }
1084
1085 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
1086 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
1087
1088 tlb = booke206_cur_tlb(env);
1089
1090 if (!tlb) {
1091 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1092 POWERPC_EXCP_INVAL |
1093 POWERPC_EXCP_INVAL_INVAL, GETPC());
1094 }
1095
1096 /* check that we support the targeted size */
1097 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
1098 size_ps = booke206_tlbnps(env, tlbn);
1099 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
1100 !(size_ps & (1 << size_tlb))) {
1101 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1102 POWERPC_EXCP_INVAL |
1103 POWERPC_EXCP_INVAL_INVAL, GETPC());
1104 }
1105
1106 if (FIELD_EX64(env->msr, MSR, GS)) {
1107 cpu_abort(env_cpu(env), "missing HV implementation\n");
1108 }
1109
1110 if (tlb->mas1 & MAS1_VALID) {
1111 /*
1112 * Invalidate the page in QEMU TLB if it was a valid entry.
1113 *
1114 * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
1115 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
1116 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
1117 *
1118 * "Note that when an L2 TLB entry is written, it may be displacing an
1119 * already valid entry in the same L2 TLB location (a victim). If a
1120 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
1121 * TLB entry is automatically invalidated."
1122 */
1123 flush_page(env, tlb);
1124 }
1125
1126 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
1127 env->spr[SPR_BOOKE_MAS3];
1128 tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
1129
1130 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
1131 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
1132 booke206_fixed_size_tlbn(env, tlbn, tlb);
1133 } else {
1134 if (!(tlbncfg & TLBnCFG_AVAIL)) {
1135 /* force !AVAIL TLB entries to correct page size */
1136 tlb->mas1 &= ~MAS1_TSIZE_MASK;
1137 /* XXX can be configured in MMUCSR0 */
1138 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
1139 }
1140 }
1141
1142 /* Make a mask from TLB size to discard invalid bits in EPN field */
1143 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
1144 /* Add a mask for page attributes */
1145 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
1146
1147 if (!FIELD_EX64(env->msr, MSR, CM)) {
1148 /*
1149 * Executing a tlbwe instruction in 32-bit mode will set bits
1150 * 0:31 of the TLB EPN field to zero.
1151 */
1152 mask &= 0xffffffff;
1153 }
1154
1155 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask;
1156
1157 if (!(tlbncfg & TLBnCFG_IPROT)) {
1158 /* no IPROT supported by TLB */
1159 tlb->mas1 &= ~MAS1_IPROT;
1160 }
1161
1162 flush_page(env, tlb);
1163 }
1164
booke206_tlb_to_mas(CPUPPCState * env,ppcmas_tlb_t * tlb)1165 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
1166 {
1167 int tlbn = booke206_tlbm_to_tlbn(env, tlb);
1168 int way = booke206_tlbm_to_way(env, tlb);
1169
1170 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
1171 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
1172 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1173
1174 env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
1175 env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
1176 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
1177 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
1178 }
1179
helper_booke206_tlbre(CPUPPCState * env)1180 void helper_booke206_tlbre(CPUPPCState *env)
1181 {
1182 ppcmas_tlb_t *tlb = NULL;
1183
1184 tlb = booke206_cur_tlb(env);
1185 if (!tlb) {
1186 env->spr[SPR_BOOKE_MAS1] = 0;
1187 } else {
1188 booke206_tlb_to_mas(env, tlb);
1189 }
1190 }
1191
helper_booke206_tlbsx(CPUPPCState * env,target_ulong address)1192 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
1193 {
1194 ppcmas_tlb_t *tlb = NULL;
1195 int i, j;
1196 hwaddr raddr;
1197 uint32_t spid, sas;
1198
1199 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
1200 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
1201
1202 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1203 int ways = booke206_tlb_ways(env, i);
1204
1205 for (j = 0; j < ways; j++) {
1206 tlb = booke206_get_tlbm(env, i, address, j);
1207
1208 if (!tlb) {
1209 continue;
1210 }
1211
1212 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
1213 continue;
1214 }
1215
1216 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1217 continue;
1218 }
1219
1220 booke206_tlb_to_mas(env, tlb);
1221 return;
1222 }
1223 }
1224
1225 /* no entry found, fill with defaults */
1226 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
1227 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
1228 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
1229 env->spr[SPR_BOOKE_MAS3] = 0;
1230 env->spr[SPR_BOOKE_MAS7] = 0;
1231
1232 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
1233 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
1234 }
1235
1236 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
1237 << MAS1_TID_SHIFT;
1238
1239 /* next victim logic */
1240 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
1241 env->last_way++;
1242 env->last_way &= booke206_tlb_ways(env, 0) - 1;
1243 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1244 }
1245
booke206_invalidate_ea_tlb(CPUPPCState * env,int tlbn,vaddr ea)1246 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
1247 vaddr ea)
1248 {
1249 int i;
1250 int ways = booke206_tlb_ways(env, tlbn);
1251 target_ulong mask;
1252
1253 for (i = 0; i < ways; i++) {
1254 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
1255 if (!tlb) {
1256 continue;
1257 }
1258 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
1259 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
1260 !(tlb->mas1 & MAS1_IPROT)) {
1261 tlb->mas1 &= ~MAS1_VALID;
1262 }
1263 }
1264 }
1265
helper_booke206_tlbivax(CPUPPCState * env,target_ulong address)1266 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
1267 {
1268 CPUState *cs;
1269
1270 if (address & 0x4) {
1271 /* flush all entries */
1272 if (address & 0x8) {
1273 /* flush all of TLB1 */
1274 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
1275 } else {
1276 /* flush all of TLB0 */
1277 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
1278 }
1279 return;
1280 }
1281
1282 if (address & 0x8) {
1283 /* flush TLB1 entries */
1284 booke206_invalidate_ea_tlb(env, 1, address);
1285 CPU_FOREACH(cs) {
1286 tlb_flush(cs);
1287 }
1288 } else {
1289 /* flush TLB0 entries */
1290 booke206_invalidate_ea_tlb(env, 0, address);
1291 CPU_FOREACH(cs) {
1292 tlb_flush_page(cs, address & MAS2_EPN_MASK);
1293 }
1294 }
1295 }
1296
helper_booke206_tlbilx0(CPUPPCState * env,target_ulong address)1297 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
1298 {
1299 /* XXX missing LPID handling */
1300 booke206_flush_tlb(env, -1, 1);
1301 }
1302
helper_booke206_tlbilx1(CPUPPCState * env,target_ulong address)1303 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
1304 {
1305 int i, j;
1306 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
1307 ppcmas_tlb_t *tlb = env->tlb.tlbm;
1308 int tlb_size;
1309
1310 /* XXX missing LPID handling */
1311 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1312 tlb_size = booke206_tlb_size(env, i);
1313 for (j = 0; j < tlb_size; j++) {
1314 if (!(tlb[j].mas1 & MAS1_IPROT) &&
1315 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
1316 tlb[j].mas1 &= ~MAS1_VALID;
1317 }
1318 }
1319 tlb += booke206_tlb_size(env, i);
1320 }
1321 tlb_flush(env_cpu(env));
1322 }
1323
helper_booke206_tlbilx3(CPUPPCState * env,target_ulong address)1324 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
1325 {
1326 int i, j;
1327 ppcmas_tlb_t *tlb;
1328 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
1329 int pid = tid >> MAS6_SPID_SHIFT;
1330 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
1331 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
1332 /* XXX check for unsupported isize and raise an invalid opcode then */
1333 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
1334 /* XXX implement MAV2 handling */
1335 bool mav2 = false;
1336
1337 /* XXX missing LPID handling */
1338 /* flush by pid and ea */
1339 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1340 int ways = booke206_tlb_ways(env, i);
1341
1342 for (j = 0; j < ways; j++) {
1343 tlb = booke206_get_tlbm(env, i, address, j);
1344 if (!tlb) {
1345 continue;
1346 }
1347 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
1348 (tlb->mas1 & MAS1_IPROT) ||
1349 ((tlb->mas1 & MAS1_IND) != ind) ||
1350 ((tlb->mas8 & MAS8_TGS) != sgs)) {
1351 continue;
1352 }
1353 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
1354 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
1355 continue;
1356 }
1357 /* XXX e500mc doesn't match SAS, but other cores might */
1358 tlb->mas1 &= ~MAS1_VALID;
1359 }
1360 }
1361 tlb_flush(env_cpu(env));
1362 }
1363
helper_booke206_tlbflush(CPUPPCState * env,target_ulong type)1364 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
1365 {
1366 int flags = 0;
1367
1368 if (type & 2) {
1369 flags |= BOOKE206_FLUSH_TLB1;
1370 }
1371
1372 if (type & 4) {
1373 flags |= BOOKE206_FLUSH_TLB0;
1374 }
1375
1376 booke206_flush_tlb(env, flags, 1);
1377 }
1378
1379
helper_check_tlb_flush_local(CPUPPCState * env)1380 void helper_check_tlb_flush_local(CPUPPCState *env)
1381 {
1382 check_tlb_flush(env, false);
1383 }
1384
helper_check_tlb_flush_global(CPUPPCState * env)1385 void helper_check_tlb_flush_global(CPUPPCState *env)
1386 {
1387 check_tlb_flush(env, true);
1388 }
1389
1390
ppc_cpu_tlb_fill(CPUState * cs,vaddr eaddr,int size,MMUAccessType access_type,int mmu_idx,bool probe,uintptr_t retaddr)1391 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size,
1392 MMUAccessType access_type, int mmu_idx,
1393 bool probe, uintptr_t retaddr)
1394 {
1395 PowerPCCPU *cpu = POWERPC_CPU(cs);
1396 hwaddr raddr;
1397 int page_size, prot;
1398
1399 if (ppc_xlate(cpu, eaddr, access_type, &raddr,
1400 &page_size, &prot, mmu_idx, !probe)) {
1401 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
1402 prot, mmu_idx, 1UL << page_size);
1403 return true;
1404 }
1405 if (probe) {
1406 return false;
1407 }
1408 raise_exception_err_ra(&cpu->env, cs->exception_index,
1409 cpu->env.error_code, retaddr);
1410 }
1411