xref: /qemu/target/ppc/mmu-hash64.c (revision 278f064e)
1 /*
2  *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *  Copyright (c) 2013 David Gibson, IBM Corporation
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "qemu/error-report.h"
25 #include "qemu/qemu-print.h"
26 #include "sysemu/hw_accel.h"
27 #include "kvm_ppc.h"
28 #include "mmu-hash64.h"
29 #include "exec/log.h"
30 #include "hw/hw.h"
31 #include "internal.h"
32 #include "mmu-book3s-v3.h"
33 #include "helper_regs.h"
34 
35 #ifdef CONFIG_TCG
36 #include "exec/helper-proto.h"
37 #endif
38 
39 /* #define DEBUG_SLB */
40 
41 #ifdef DEBUG_SLB
42 #  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
43 #else
44 #  define LOG_SLB(...) do { } while (0)
45 #endif
46 
47 /*
48  * SLB handling
49  */
50 
51 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
52 {
53     CPUPPCState *env = &cpu->env;
54     uint64_t esid_256M, esid_1T;
55     int n;
56 
57     LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
58 
59     esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
60     esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
61 
62     for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
63         ppc_slb_t *slb = &env->slb[n];
64 
65         LOG_SLB("%s: slot %d %016" PRIx64 " %016"
66                     PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
67         /*
68          * We check for 1T matches on all MMUs here - if the MMU
69          * doesn't have 1T segment support, we will have prevented 1T
70          * entries from being inserted in the slbmte code.
71          */
72         if (((slb->esid == esid_256M) &&
73              ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
74             || ((slb->esid == esid_1T) &&
75                 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
76             return slb;
77         }
78     }
79 
80     return NULL;
81 }
82 
83 void dump_slb(PowerPCCPU *cpu)
84 {
85     CPUPPCState *env = &cpu->env;
86     int i;
87     uint64_t slbe, slbv;
88 
89     cpu_synchronize_state(CPU(cpu));
90 
91     qemu_printf("SLB\tESID\t\t\tVSID\n");
92     for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
93         slbe = env->slb[i].esid;
94         slbv = env->slb[i].vsid;
95         if (slbe == 0 && slbv == 0) {
96             continue;
97         }
98         qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
99                     i, slbe, slbv);
100     }
101 }
102 
103 #ifdef CONFIG_TCG
104 void helper_slbia(CPUPPCState *env, uint32_t ih)
105 {
106     PowerPCCPU *cpu = env_archcpu(env);
107     int starting_entry;
108     int n;
109 
110     /*
111      * slbia must always flush all TLB (which is equivalent to ERAT in ppc
112      * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
113      * can overwrite a valid SLB without flushing its lookaside information.
114      *
115      * It would be possible to keep the TLB in synch with the SLB by flushing
116      * when a valid entry is overwritten by slbmte, and therefore slbia would
117      * not have to flush unless it evicts a valid SLB entry. However it is
118      * expected that slbmte is more common than slbia, and slbia is usually
119      * going to evict valid SLB entries, so that tradeoff is unlikely to be a
120      * good one.
121      *
122      * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate
123      * the same SLB entries (everything but entry 0), but differ in what
124      * "lookaside information" is invalidated. TCG can ignore this and flush
125      * everything.
126      *
127      * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are
128      * invalidated.
129      */
130 
131     env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
132 
133     starting_entry = 1; /* default for IH=0,1,2,6 */
134 
135     if (env->mmu_model == POWERPC_MMU_3_00) {
136         switch (ih) {
137         case 0x7:
138             /* invalidate no SLBs, but all lookaside information */
139             return;
140 
141         case 0x3:
142         case 0x4:
143             /* also considers SLB entry 0 */
144             starting_entry = 0;
145             break;
146 
147         case 0x5:
148             /* treat undefined values as ih==0, and warn */
149             qemu_log_mask(LOG_GUEST_ERROR,
150                           "slbia undefined IH field %u.\n", ih);
151             break;
152 
153         default:
154             /* 0,1,2,6 */
155             break;
156         }
157     }
158 
159     for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) {
160         ppc_slb_t *slb = &env->slb[n];
161 
162         if (!(slb->esid & SLB_ESID_V)) {
163             continue;
164         }
165         if (env->mmu_model == POWERPC_MMU_3_00) {
166             if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) {
167                 /* preserves entries with a class value of 0 */
168                 continue;
169             }
170         }
171 
172         slb->esid &= ~SLB_ESID_V;
173     }
174 }
175 
176 static void __helper_slbie(CPUPPCState *env, target_ulong addr,
177                            target_ulong global)
178 {
179     PowerPCCPU *cpu = env_archcpu(env);
180     ppc_slb_t *slb;
181 
182     slb = slb_lookup(cpu, addr);
183     if (!slb) {
184         return;
185     }
186 
187     if (slb->esid & SLB_ESID_V) {
188         slb->esid &= ~SLB_ESID_V;
189 
190         /*
191          * XXX: given the fact that segment size is 256 MB or 1TB,
192          *      and we still don't have a tlb_flush_mask(env, n, mask)
193          *      in QEMU, we just invalidate all TLBs
194          */
195         env->tlb_need_flush |=
196             (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
197     }
198 }
199 
200 void helper_slbie(CPUPPCState *env, target_ulong addr)
201 {
202     __helper_slbie(env, addr, false);
203 }
204 
205 void helper_slbieg(CPUPPCState *env, target_ulong addr)
206 {
207     __helper_slbie(env, addr, true);
208 }
209 #endif
210 
211 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
212                   target_ulong esid, target_ulong vsid)
213 {
214     CPUPPCState *env = &cpu->env;
215     ppc_slb_t *slb = &env->slb[slot];
216     const PPCHash64SegmentPageSizes *sps = NULL;
217     int i;
218 
219     if (slot >= cpu->hash64_opts->slb_size) {
220         return -1; /* Bad slot number */
221     }
222     if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
223         return -1; /* Reserved bits set */
224     }
225     if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
226         return -1; /* Bad segment size */
227     }
228     if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) {
229         return -1; /* 1T segment on MMU that doesn't support it */
230     }
231 
232     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
233         const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
234 
235         if (!sps1->page_shift) {
236             break;
237         }
238 
239         if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
240             sps = sps1;
241             break;
242         }
243     }
244 
245     if (!sps) {
246         error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
247                      " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
248                      slot, esid, vsid);
249         return -1;
250     }
251 
252     slb->esid = esid;
253     slb->vsid = vsid;
254     slb->sps = sps;
255 
256     LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx
257             " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
258             slb->esid, slb->vsid);
259 
260     return 0;
261 }
262 
263 #ifdef CONFIG_TCG
264 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
265                              target_ulong *rt)
266 {
267     CPUPPCState *env = &cpu->env;
268     int slot = rb & 0xfff;
269     ppc_slb_t *slb = &env->slb[slot];
270 
271     if (slot >= cpu->hash64_opts->slb_size) {
272         return -1;
273     }
274 
275     *rt = slb->esid;
276     return 0;
277 }
278 
279 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
280                              target_ulong *rt)
281 {
282     CPUPPCState *env = &cpu->env;
283     int slot = rb & 0xfff;
284     ppc_slb_t *slb = &env->slb[slot];
285 
286     if (slot >= cpu->hash64_opts->slb_size) {
287         return -1;
288     }
289 
290     *rt = slb->vsid;
291     return 0;
292 }
293 
294 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
295                              target_ulong *rt)
296 {
297     CPUPPCState *env = &cpu->env;
298     ppc_slb_t *slb;
299 
300     if (!msr_is_64bit(env, env->msr)) {
301         rb &= 0xffffffff;
302     }
303     slb = slb_lookup(cpu, rb);
304     if (slb == NULL) {
305         *rt = (target_ulong)-1ul;
306     } else {
307         *rt = slb->vsid;
308     }
309     return 0;
310 }
311 
312 void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
313 {
314     PowerPCCPU *cpu = env_archcpu(env);
315 
316     if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
317         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
318                                POWERPC_EXCP_INVAL, GETPC());
319     }
320 }
321 
322 target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
323 {
324     PowerPCCPU *cpu = env_archcpu(env);
325     target_ulong rt = 0;
326 
327     if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
328         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
329                                POWERPC_EXCP_INVAL, GETPC());
330     }
331     return rt;
332 }
333 
334 target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
335 {
336     PowerPCCPU *cpu = env_archcpu(env);
337     target_ulong rt = 0;
338 
339     if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
340         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
341                                POWERPC_EXCP_INVAL, GETPC());
342     }
343     return rt;
344 }
345 
346 target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
347 {
348     PowerPCCPU *cpu = env_archcpu(env);
349     target_ulong rt = 0;
350 
351     if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
352         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
353                                POWERPC_EXCP_INVAL, GETPC());
354     }
355     return rt;
356 }
357 #endif
358 
359 /* Check No-Execute or Guarded Storage */
360 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
361                                               ppc_hash_pte64_t pte)
362 {
363     /* Exec permissions CANNOT take away read or write permissions */
364     return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ?
365             PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC;
366 }
367 
368 /* Check Basic Storage Protection */
369 static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
370                                ppc_slb_t *slb, ppc_hash_pte64_t pte)
371 {
372     CPUPPCState *env = &cpu->env;
373     unsigned pp, key;
374     /*
375      * Some pp bit combinations have undefined behaviour, so default
376      * to no access in those cases
377      */
378     int prot = 0;
379 
380     key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
381              : (slb->vsid & SLB_VSID_KS));
382     pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
383 
384     if (key == 0) {
385         switch (pp) {
386         case 0x0:
387         case 0x1:
388         case 0x2:
389             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
390             break;
391 
392         case 0x3:
393         case 0x6:
394             prot = PAGE_READ | PAGE_EXEC;
395             break;
396         }
397     } else {
398         switch (pp) {
399         case 0x0:
400         case 0x6:
401             break;
402 
403         case 0x1:
404         case 0x3:
405             prot = PAGE_READ | PAGE_EXEC;
406             break;
407 
408         case 0x2:
409             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
410             break;
411         }
412     }
413 
414     return prot;
415 }
416 
417 /* Check the instruction access permissions specified in the IAMR */
418 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key)
419 {
420     CPUPPCState *env = &cpu->env;
421     int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3;
422 
423     /*
424      * An instruction fetch is permitted if the IAMR bit is 0.
425      * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
426      * can only take away EXEC permissions not READ or WRITE permissions.
427      * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
428      * EXEC permissions are allowed.
429      */
430     return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE :
431                                PAGE_READ | PAGE_WRITE | PAGE_EXEC;
432 }
433 
434 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
435 {
436     CPUPPCState *env = &cpu->env;
437     int key, amrbits;
438     int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
439 
440     /* Only recent MMUs implement Virtual Page Class Key Protection */
441     if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) {
442         return prot;
443     }
444 
445     key = HPTE64_R_KEY(pte.pte1);
446     amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3;
447 
448     /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
449     /*         env->spr[SPR_AMR]); */
450 
451     /*
452      * A store is permitted if the AMR bit is 0. Remove write
453      * protection if it is set.
454      */
455     if (amrbits & 0x2) {
456         prot &= ~PAGE_WRITE;
457     }
458     /*
459      * A load is permitted if the AMR bit is 0. Remove read
460      * protection if it is set.
461      */
462     if (amrbits & 0x1) {
463         prot &= ~PAGE_READ;
464     }
465 
466     switch (env->mmu_model) {
467     /*
468      * MMU version 2.07 and later support IAMR
469      * Check if the IAMR allows the instruction access - it will return
470      * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
471      * if it does (and prot will be unchanged indicating execution support).
472      */
473     case POWERPC_MMU_2_07:
474     case POWERPC_MMU_3_00:
475         prot &= ppc_hash64_iamr_prot(cpu, key);
476         break;
477     default:
478         break;
479     }
480 
481     return prot;
482 }
483 
484 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
485                                              hwaddr ptex, int n)
486 {
487     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
488     hwaddr base;
489     hwaddr plen = n * HASH_PTE_SIZE_64;
490     const ppc_hash_pte64_t *hptes;
491 
492     if (cpu->vhyp) {
493         PPCVirtualHypervisorClass *vhc =
494             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
495         return vhc->map_hptes(cpu->vhyp, ptex, n);
496     }
497     base = ppc_hash64_hpt_base(cpu);
498 
499     if (!base) {
500         return NULL;
501     }
502 
503     hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false,
504                               MEMTXATTRS_UNSPECIFIED);
505     if (plen < (n * HASH_PTE_SIZE_64)) {
506         hw_error("%s: Unable to map all requested HPTEs\n", __func__);
507     }
508     return hptes;
509 }
510 
511 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
512                             hwaddr ptex, int n)
513 {
514     if (cpu->vhyp) {
515         PPCVirtualHypervisorClass *vhc =
516             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
517         vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n);
518         return;
519     }
520 
521     address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
522                         false, n * HASH_PTE_SIZE_64);
523 }
524 
525 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps,
526                                 uint64_t pte0, uint64_t pte1)
527 {
528     int i;
529 
530     if (!(pte0 & HPTE64_V_LARGE)) {
531         if (sps->page_shift != 12) {
532             /* 4kiB page in a non 4kiB segment */
533             return 0;
534         }
535         /* Normal 4kiB page */
536         return 12;
537     }
538 
539     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
540         const PPCHash64PageSize *ps = &sps->enc[i];
541         uint64_t mask;
542 
543         if (!ps->page_shift) {
544             break;
545         }
546 
547         if (ps->page_shift == 12) {
548             /* L bit is set so this can't be a 4kiB page */
549             continue;
550         }
551 
552         mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
553 
554         if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
555             return ps->page_shift;
556         }
557     }
558 
559     return 0; /* Bad page size encoding */
560 }
561 
562 static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1)
563 {
564     /* Insert B into pte0 */
565     *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) |
566             ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) <<
567              (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT));
568 
569     /* Remove B from pte1 */
570     *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK;
571 }
572 
573 
574 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
575                                      const PPCHash64SegmentPageSizes *sps,
576                                      target_ulong ptem,
577                                      ppc_hash_pte64_t *pte, unsigned *pshift)
578 {
579     int i;
580     const ppc_hash_pte64_t *pteg;
581     target_ulong pte0, pte1;
582     target_ulong ptex;
583 
584     ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
585     pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
586     if (!pteg) {
587         return -1;
588     }
589     for (i = 0; i < HPTES_PER_GROUP; i++) {
590         pte0 = ppc_hash64_hpte0(cpu, pteg, i);
591         /*
592          * pte0 contains the valid bit and must be read before pte1,
593          * otherwise we might see an old pte1 with a new valid bit and
594          * thus an inconsistent hpte value
595          */
596         smp_rmb();
597         pte1 = ppc_hash64_hpte1(cpu, pteg, i);
598 
599         /* Convert format if necessary */
600         if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) {
601             ppc64_v3_new_to_old_hpte(&pte0, &pte1);
602         }
603 
604         /* This compares V, B, H (secondary) and the AVPN */
605         if (HPTE64_V_COMPARE(pte0, ptem)) {
606             *pshift = hpte_page_shift(sps, pte0, pte1);
607             /*
608              * If there is no match, ignore the PTE, it could simply
609              * be for a different segment size encoding and the
610              * architecture specifies we should not match. Linux will
611              * potentially leave behind PTEs for the wrong base page
612              * size when demoting segments.
613              */
614             if (*pshift == 0) {
615                 continue;
616             }
617             /*
618              * We don't do anything with pshift yet as qemu TLB only
619              * deals with 4K pages anyway
620              */
621             pte->pte0 = pte0;
622             pte->pte1 = pte1;
623             ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
624             return ptex + i;
625         }
626     }
627     ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
628     /*
629      * We didn't find a valid entry.
630      */
631     return -1;
632 }
633 
634 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
635                                      ppc_slb_t *slb, target_ulong eaddr,
636                                      ppc_hash_pte64_t *pte, unsigned *pshift)
637 {
638     CPUPPCState *env = &cpu->env;
639     hwaddr hash, ptex;
640     uint64_t vsid, epnmask, epn, ptem;
641     const PPCHash64SegmentPageSizes *sps = slb->sps;
642 
643     /*
644      * The SLB store path should prevent any bad page size encodings
645      * getting in there, so:
646      */
647     assert(sps);
648 
649     /* If ISL is set in LPCR we need to clamp the page size to 4K */
650     if (env->spr[SPR_LPCR] & LPCR_ISL) {
651         /* We assume that when using TCG, 4k is first entry of SPS */
652         sps = &cpu->hash64_opts->sps[0];
653         assert(sps->page_shift == 12);
654     }
655 
656     epnmask = ~((1ULL << sps->page_shift) - 1);
657 
658     if (slb->vsid & SLB_VSID_B) {
659         /* 1TB segment */
660         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
661         epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
662         hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
663     } else {
664         /* 256M segment */
665         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
666         epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
667         hash = vsid ^ (epn >> sps->page_shift);
668     }
669     ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
670     ptem |= HPTE64_V_VALID;
671 
672     /* Page address translation */
673     qemu_log_mask(CPU_LOG_MMU,
674             "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
675             " hash " TARGET_FMT_plx "\n",
676             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
677 
678     /* Primary PTEG lookup */
679     qemu_log_mask(CPU_LOG_MMU,
680             "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
681             " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
682             " hash=" TARGET_FMT_plx "\n",
683             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
684             vsid, ptem,  hash);
685     ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
686 
687     if (ptex == -1) {
688         /* Secondary PTEG lookup */
689         ptem |= HPTE64_V_SECONDARY;
690         qemu_log_mask(CPU_LOG_MMU,
691                 "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
692                 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
693                 " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
694                 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
695 
696         ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
697     }
698 
699     return ptex;
700 }
701 
702 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
703                                           uint64_t pte0, uint64_t pte1)
704 {
705     int i;
706 
707     if (!(pte0 & HPTE64_V_LARGE)) {
708         return 12;
709     }
710 
711     /*
712      * The encodings in env->sps need to be carefully chosen so that
713      * this gives an unambiguous result.
714      */
715     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
716         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
717         unsigned shift;
718 
719         if (!sps->page_shift) {
720             break;
721         }
722 
723         shift = hpte_page_shift(sps, pte0, pte1);
724         if (shift) {
725             return shift;
726         }
727     }
728 
729     return 0;
730 }
731 
732 static bool ppc_hash64_use_vrma(CPUPPCState *env)
733 {
734     switch (env->mmu_model) {
735     case POWERPC_MMU_3_00:
736         /*
737          * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
738          * register no longer exist
739          */
740         return true;
741 
742     default:
743         return !!(env->spr[SPR_LPCR] & LPCR_VPM0);
744     }
745 }
746 
747 static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
748 {
749     CPUPPCState *env = &POWERPC_CPU(cs)->env;
750     bool vpm;
751 
752     if (msr_ir) {
753         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
754     } else {
755         vpm = ppc_hash64_use_vrma(env);
756     }
757     if (vpm && !msr_hv) {
758         cs->exception_index = POWERPC_EXCP_HISI;
759     } else {
760         cs->exception_index = POWERPC_EXCP_ISI;
761     }
762     env->error_code = error_code;
763 }
764 
765 static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
766 {
767     CPUPPCState *env = &POWERPC_CPU(cs)->env;
768     bool vpm;
769 
770     if (msr_dr) {
771         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
772     } else {
773         vpm = ppc_hash64_use_vrma(env);
774     }
775     if (vpm && !msr_hv) {
776         cs->exception_index = POWERPC_EXCP_HDSI;
777         env->spr[SPR_HDAR] = dar;
778         env->spr[SPR_HDSISR] = dsisr;
779     } else {
780         cs->exception_index = POWERPC_EXCP_DSI;
781         env->spr[SPR_DAR] = dar;
782         env->spr[SPR_DSISR] = dsisr;
783    }
784     env->error_code = 0;
785 }
786 
787 
788 static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
789 {
790     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16;
791 
792     if (cpu->vhyp) {
793         PPCVirtualHypervisorClass *vhc =
794             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
795         vhc->hpte_set_r(cpu->vhyp, ptex, pte1);
796         return;
797     }
798     base = ppc_hash64_hpt_base(cpu);
799 
800 
801     /* The HW performs a non-atomic byte update */
802     stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
803 }
804 
805 static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
806 {
807     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15;
808 
809     if (cpu->vhyp) {
810         PPCVirtualHypervisorClass *vhc =
811             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
812         vhc->hpte_set_c(cpu->vhyp, ptex, pte1);
813         return;
814     }
815     base = ppc_hash64_hpt_base(cpu);
816 
817     /* The HW performs a non-atomic byte update */
818     stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
819 }
820 
821 static target_ulong rmls_limit(PowerPCCPU *cpu)
822 {
823     CPUPPCState *env = &cpu->env;
824     /*
825      * In theory the meanings of RMLS values are implementation
826      * dependent.  In practice, this seems to have been the set from
827      * POWER4+..POWER8, and RMLS is no longer supported in POWER9.
828      *
829      * Unsupported values mean the OS has shot itself in the
830      * foot. Return a 0-sized RMA in this case, which we expect
831      * to trigger an immediate DSI or ISI
832      */
833     static const target_ulong rma_sizes[16] = {
834         [0] = 256 * GiB,
835         [1] = 16 * GiB,
836         [2] = 1 * GiB,
837         [3] = 64 * MiB,
838         [4] = 256 * MiB,
839         [7] = 128 * MiB,
840         [8] = 32 * MiB,
841     };
842     target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT;
843 
844     return rma_sizes[rmls];
845 }
846 
847 static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
848 {
849     CPUPPCState *env = &cpu->env;
850     target_ulong lpcr = env->spr[SPR_LPCR];
851     uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
852     target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
853     int i;
854 
855     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
856         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
857 
858         if (!sps->page_shift) {
859             break;
860         }
861 
862         if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) {
863             slb->esid = SLB_ESID_V;
864             slb->vsid = vsid;
865             slb->sps = sps;
866             return 0;
867         }
868     }
869 
870     error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
871                  TARGET_FMT_lx, lpcr);
872 
873     return -1;
874 }
875 
876 int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
877                                 int rwx, int mmu_idx)
878 {
879     CPUState *cs = CPU(cpu);
880     CPUPPCState *env = &cpu->env;
881     ppc_slb_t vrma_slbe;
882     ppc_slb_t *slb;
883     unsigned apshift;
884     hwaddr ptex;
885     ppc_hash_pte64_t pte;
886     int exec_prot, pp_prot, amr_prot, prot;
887     MMUAccessType access_type;
888     int need_prot;
889     hwaddr raddr;
890 
891     assert((rwx == 0) || (rwx == 1) || (rwx == 2));
892     access_type = rwx;
893 
894     /*
895      * Note on LPCR usage: 970 uses HID4, but our special variant of
896      * store_spr copies relevant fields into env->spr[SPR_LPCR].
897      * Similarly we filter unimplemented bits when storing into LPCR
898      * depending on the MMU version. This code can thus just use the
899      * LPCR "as-is".
900      */
901 
902     /* 1. Handle real mode accesses */
903     if (access_type == MMU_INST_FETCH ? !msr_ir : !msr_dr) {
904         /*
905          * Translation is supposedly "off", but in real mode the top 4
906          * effective address bits are (mostly) ignored
907          */
908         raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
909 
910         if (cpu->vhyp) {
911             /*
912              * In virtual hypervisor mode, there's nothing to do:
913              *   EA == GPA == qemu guest address
914              */
915         } else if (msr_hv || !env->has_hv_mode) {
916             /* In HV mode, add HRMOR if top EA bit is clear */
917             if (!(eaddr >> 63)) {
918                 raddr |= env->spr[SPR_HRMOR];
919             }
920         } else if (ppc_hash64_use_vrma(env)) {
921             /* Emulated VRMA mode */
922             slb = &vrma_slbe;
923             if (build_vrma_slbe(cpu, slb) != 0) {
924                 /* Invalid VRMA setup, machine check */
925                 cs->exception_index = POWERPC_EXCP_MCHECK;
926                 env->error_code = 0;
927                 return 1;
928             }
929 
930             goto skip_slb_search;
931         } else {
932             target_ulong limit = rmls_limit(cpu);
933 
934             /* Emulated old-style RMO mode, bounds check against RMLS */
935             if (raddr >= limit) {
936                 switch (access_type) {
937                 case MMU_INST_FETCH:
938                     ppc_hash64_set_isi(cs, SRR1_PROTFAULT);
939                     break;
940                 case MMU_DATA_LOAD:
941                     ppc_hash64_set_dsi(cs, eaddr, DSISR_PROTFAULT);
942                     break;
943                 case MMU_DATA_STORE:
944                     ppc_hash64_set_dsi(cs, eaddr,
945                                        DSISR_PROTFAULT | DSISR_ISSTORE);
946                     break;
947                 default:
948                     g_assert_not_reached();
949                 }
950                 return 1;
951             }
952 
953             raddr |= env->spr[SPR_RMOR];
954         }
955         tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
956                      PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
957                      TARGET_PAGE_SIZE);
958         return 0;
959     }
960 
961     /* 2. Translation is on, so look up the SLB */
962     slb = slb_lookup(cpu, eaddr);
963     if (!slb) {
964         /* No entry found, check if in-memory segment tables are in use */
965         if (ppc64_use_proc_tbl(cpu)) {
966             /* TODO - Unsupported */
967             error_report("Segment Table Support Unimplemented");
968             exit(1);
969         }
970         /* Segment still not found, generate the appropriate interrupt */
971         switch (access_type) {
972         case MMU_INST_FETCH:
973             cs->exception_index = POWERPC_EXCP_ISEG;
974             env->error_code = 0;
975             break;
976         case MMU_DATA_LOAD:
977         case MMU_DATA_STORE:
978             cs->exception_index = POWERPC_EXCP_DSEG;
979             env->error_code = 0;
980             env->spr[SPR_DAR] = eaddr;
981             break;
982         default:
983             g_assert_not_reached();
984         }
985         return 1;
986     }
987 
988 skip_slb_search:
989 
990     /* 3. Check for segment level no-execute violation */
991     if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) {
992         ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD);
993         return 1;
994     }
995 
996     /* 4. Locate the PTE in the hash table */
997     ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
998     if (ptex == -1) {
999         switch (access_type) {
1000         case MMU_INST_FETCH:
1001             ppc_hash64_set_isi(cs, SRR1_NOPTE);
1002             break;
1003         case MMU_DATA_LOAD:
1004             ppc_hash64_set_dsi(cs, eaddr, DSISR_NOPTE);
1005             break;
1006         case MMU_DATA_STORE:
1007             ppc_hash64_set_dsi(cs, eaddr, DSISR_NOPTE | DSISR_ISSTORE);
1008             break;
1009         default:
1010             g_assert_not_reached();
1011         }
1012         return 1;
1013     }
1014     qemu_log_mask(CPU_LOG_MMU,
1015                   "found PTE at index %08" HWADDR_PRIx "\n", ptex);
1016 
1017     /* 5. Check access permissions */
1018 
1019     exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
1020     pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
1021     amr_prot = ppc_hash64_amr_prot(cpu, pte);
1022     prot = exec_prot & pp_prot & amr_prot;
1023 
1024     need_prot = prot_for_access_type(access_type);
1025     if (need_prot & ~prot) {
1026         /* Access right violation */
1027         qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
1028         if (access_type == MMU_INST_FETCH) {
1029             int srr1 = 0;
1030             if (PAGE_EXEC & ~exec_prot) {
1031                 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
1032             } else if (PAGE_EXEC & ~pp_prot) {
1033                 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */
1034             }
1035             if (PAGE_EXEC & ~amr_prot) {
1036                 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
1037             }
1038             ppc_hash64_set_isi(cs, srr1);
1039         } else {
1040             int dsisr = 0;
1041             if (need_prot & ~pp_prot) {
1042                 dsisr |= DSISR_PROTFAULT;
1043             }
1044             if (access_type == MMU_DATA_STORE) {
1045                 dsisr |= DSISR_ISSTORE;
1046             }
1047             if (need_prot & ~amr_prot) {
1048                 dsisr |= DSISR_AMR;
1049             }
1050             ppc_hash64_set_dsi(cs, eaddr, dsisr);
1051         }
1052         return 1;
1053     }
1054 
1055     qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
1056 
1057     /* 6. Update PTE referenced and changed bits if necessary */
1058 
1059     if (!(pte.pte1 & HPTE64_R_R)) {
1060         ppc_hash64_set_r(cpu, ptex, pte.pte1);
1061     }
1062     if (!(pte.pte1 & HPTE64_R_C)) {
1063         if (access_type == MMU_DATA_STORE) {
1064             ppc_hash64_set_c(cpu, ptex, pte.pte1);
1065         } else {
1066             /*
1067              * Treat the page as read-only for now, so that a later write
1068              * will pass through this function again to set the C bit
1069              */
1070             prot &= ~PAGE_WRITE;
1071         }
1072     }
1073 
1074     /* 7. Determine the real address from the PTE */
1075 
1076     raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
1077 
1078     tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
1079                  prot, mmu_idx, 1ULL << apshift);
1080 
1081     return 0;
1082 }
1083 
1084 hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
1085 {
1086     CPUPPCState *env = &cpu->env;
1087     ppc_slb_t vrma_slbe;
1088     ppc_slb_t *slb;
1089     hwaddr ptex, raddr;
1090     ppc_hash_pte64_t pte;
1091     unsigned apshift;
1092 
1093     /* Handle real mode */
1094     if (msr_dr == 0) {
1095         /* In real mode the top 4 effective address bits are ignored */
1096         raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
1097 
1098         if (cpu->vhyp) {
1099             /*
1100              * In virtual hypervisor mode, there's nothing to do:
1101              *   EA == GPA == qemu guest address
1102              */
1103             return raddr;
1104         } else if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
1105             /* In HV mode, add HRMOR if top EA bit is clear */
1106             return raddr | env->spr[SPR_HRMOR];
1107         } else if (ppc_hash64_use_vrma(env)) {
1108             /* Emulated VRMA mode */
1109             slb = &vrma_slbe;
1110             if (build_vrma_slbe(cpu, slb) != 0) {
1111                 return -1;
1112             }
1113         } else {
1114             target_ulong limit = rmls_limit(cpu);
1115 
1116             /* Emulated old-style RMO mode, bounds check against RMLS */
1117             if (raddr >= limit) {
1118                 return -1;
1119             }
1120             return raddr | env->spr[SPR_RMOR];
1121         }
1122     } else {
1123         slb = slb_lookup(cpu, addr);
1124         if (!slb) {
1125             return -1;
1126         }
1127     }
1128 
1129     ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
1130     if (ptex == -1) {
1131         return -1;
1132     }
1133 
1134     return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
1135         & TARGET_PAGE_MASK;
1136 }
1137 
1138 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
1139                                target_ulong pte0, target_ulong pte1)
1140 {
1141     /*
1142      * XXX: given the fact that there are too many segments to
1143      * invalidate, and we still don't have a tlb_flush_mask(env, n,
1144      * mask) in QEMU, we just invalidate all TLBs
1145      */
1146     cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
1147 }
1148 
1149 #ifdef CONFIG_TCG
1150 void helper_store_lpcr(CPUPPCState *env, target_ulong val)
1151 {
1152     PowerPCCPU *cpu = env_archcpu(env);
1153 
1154     ppc_store_lpcr(cpu, val);
1155 }
1156 #endif
1157 
1158 void ppc_hash64_init(PowerPCCPU *cpu)
1159 {
1160     CPUPPCState *env = &cpu->env;
1161     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
1162 
1163     if (!pcc->hash64_opts) {
1164         assert(!mmu_is_64bit(env->mmu_model));
1165         return;
1166     }
1167 
1168     cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts));
1169 }
1170 
1171 void ppc_hash64_finalize(PowerPCCPU *cpu)
1172 {
1173     g_free(cpu->hash64_opts);
1174 }
1175 
1176 const PPCHash64Options ppc_hash64_opts_basic = {
1177     .flags = 0,
1178     .slb_size = 64,
1179     .sps = {
1180         { .page_shift = 12, /* 4K */
1181           .slb_enc = 0,
1182           .enc = { { .page_shift = 12, .pte_enc = 0 } }
1183         },
1184         { .page_shift = 24, /* 16M */
1185           .slb_enc = 0x100,
1186           .enc = { { .page_shift = 24, .pte_enc = 0 } }
1187         },
1188     },
1189 };
1190 
1191 const PPCHash64Options ppc_hash64_opts_POWER7 = {
1192     .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE,
1193     .slb_size = 32,
1194     .sps = {
1195         {
1196             .page_shift = 12, /* 4K */
1197             .slb_enc = 0,
1198             .enc = { { .page_shift = 12, .pte_enc = 0 },
1199                      { .page_shift = 16, .pte_enc = 0x7 },
1200                      { .page_shift = 24, .pte_enc = 0x38 }, },
1201         },
1202         {
1203             .page_shift = 16, /* 64K */
1204             .slb_enc = SLB_VSID_64K,
1205             .enc = { { .page_shift = 16, .pte_enc = 0x1 },
1206                      { .page_shift = 24, .pte_enc = 0x8 }, },
1207         },
1208         {
1209             .page_shift = 24, /* 16M */
1210             .slb_enc = SLB_VSID_16M,
1211             .enc = { { .page_shift = 24, .pte_enc = 0 }, },
1212         },
1213         {
1214             .page_shift = 34, /* 16G */
1215             .slb_enc = SLB_VSID_16G,
1216             .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
1217         },
1218     }
1219 };
1220 
1221 
1222