xref: /qemu/target/ppc/mmu-hash64.c (revision 40fed8c1)
1 /*
2  *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *  Copyright (c) 2013 David Gibson, IBM Corporation
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "qemu/error-report.h"
26 #include "qemu/qemu-print.h"
27 #include "sysemu/hw_accel.h"
28 #include "kvm_ppc.h"
29 #include "mmu-hash64.h"
30 #include "exec/log.h"
31 #include "hw/hw.h"
32 #include "internal.h"
33 #include "mmu-book3s-v3.h"
34 #include "helper_regs.h"
35 
36 #ifdef CONFIG_TCG
37 #include "exec/helper-proto.h"
38 #endif
39 
40 /* #define DEBUG_SLB */
41 
42 #ifdef DEBUG_SLB
43 #  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
44 #else
45 #  define LOG_SLB(...) do { } while (0)
46 #endif
47 
48 /*
49  * SLB handling
50  */
51 
slb_lookup(PowerPCCPU * cpu,target_ulong eaddr)52 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
53 {
54     CPUPPCState *env = &cpu->env;
55     uint64_t esid_256M, esid_1T;
56     int n;
57 
58     LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
59 
60     esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
61     esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
62 
63     for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
64         ppc_slb_t *slb = &env->slb[n];
65 
66         LOG_SLB("%s: slot %d %016" PRIx64 " %016"
67                     PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
68         /*
69          * We check for 1T matches on all MMUs here - if the MMU
70          * doesn't have 1T segment support, we will have prevented 1T
71          * entries from being inserted in the slbmte code.
72          */
73         if (((slb->esid == esid_256M) &&
74              ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
75             || ((slb->esid == esid_1T) &&
76                 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
77             return slb;
78         }
79     }
80 
81     return NULL;
82 }
83 
dump_slb(PowerPCCPU * cpu)84 void dump_slb(PowerPCCPU *cpu)
85 {
86     CPUPPCState *env = &cpu->env;
87     int i;
88     uint64_t slbe, slbv;
89 
90     cpu_synchronize_state(CPU(cpu));
91 
92     qemu_printf("SLB\tESID\t\t\tVSID\n");
93     for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
94         slbe = env->slb[i].esid;
95         slbv = env->slb[i].vsid;
96         if (slbe == 0 && slbv == 0) {
97             continue;
98         }
99         qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
100                     i, slbe, slbv);
101     }
102 }
103 
104 #ifdef CONFIG_TCG
helper_SLBIA(CPUPPCState * env,uint32_t ih)105 void helper_SLBIA(CPUPPCState *env, uint32_t ih)
106 {
107     PowerPCCPU *cpu = env_archcpu(env);
108     int starting_entry;
109     int n;
110 
111     /*
112      * slbia must always flush all TLB (which is equivalent to ERAT in ppc
113      * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
114      * can overwrite a valid SLB without flushing its lookaside information.
115      *
116      * It would be possible to keep the TLB in synch with the SLB by flushing
117      * when a valid entry is overwritten by slbmte, and therefore slbia would
118      * not have to flush unless it evicts a valid SLB entry. However it is
119      * expected that slbmte is more common than slbia, and slbia is usually
120      * going to evict valid SLB entries, so that tradeoff is unlikely to be a
121      * good one.
122      *
123      * ISA v2.05 introduced IH field with values 0,1,2,6. These all invalidate
124      * the same SLB entries (everything but entry 0), but differ in what
125      * "lookaside information" is invalidated. TCG can ignore this and flush
126      * everything.
127      *
128      * ISA v3.0 introduced additional values 3,4,7, which change what SLBs are
129      * invalidated.
130      */
131 
132     env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
133 
134     starting_entry = 1; /* default for IH=0,1,2,6 */
135 
136     if (env->mmu_model == POWERPC_MMU_3_00) {
137         switch (ih) {
138         case 0x7:
139             /* invalidate no SLBs, but all lookaside information */
140             return;
141 
142         case 0x3:
143         case 0x4:
144             /* also considers SLB entry 0 */
145             starting_entry = 0;
146             break;
147 
148         case 0x5:
149             /* treat undefined values as ih==0, and warn */
150             qemu_log_mask(LOG_GUEST_ERROR,
151                           "slbia undefined IH field %u.\n", ih);
152             break;
153 
154         default:
155             /* 0,1,2,6 */
156             break;
157         }
158     }
159 
160     for (n = starting_entry; n < cpu->hash64_opts->slb_size; n++) {
161         ppc_slb_t *slb = &env->slb[n];
162 
163         if (!(slb->esid & SLB_ESID_V)) {
164             continue;
165         }
166         if (env->mmu_model == POWERPC_MMU_3_00) {
167             if (ih == 0x3 && (slb->vsid & SLB_VSID_C) == 0) {
168                 /* preserves entries with a class value of 0 */
169                 continue;
170             }
171         }
172 
173         slb->esid &= ~SLB_ESID_V;
174     }
175 }
176 
177 #if defined(TARGET_PPC64)
helper_SLBIAG(CPUPPCState * env,target_ulong rs,uint32_t l)178 void helper_SLBIAG(CPUPPCState *env, target_ulong rs, uint32_t l)
179 {
180     PowerPCCPU *cpu = env_archcpu(env);
181     int n;
182 
183     /*
184      * slbiag must always flush all TLB (which is equivalent to ERAT in ppc
185      * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
186      * can overwrite a valid SLB without flushing its lookaside information.
187      *
188      * It would be possible to keep the TLB in synch with the SLB by flushing
189      * when a valid entry is overwritten by slbmte, and therefore slbiag would
190      * not have to flush unless it evicts a valid SLB entry. However it is
191      * expected that slbmte is more common than slbiag, and slbiag is usually
192      * going to evict valid SLB entries, so that tradeoff is unlikely to be a
193      * good one.
194      */
195     env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
196 
197     for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
198         ppc_slb_t *slb = &env->slb[n];
199         slb->esid &= ~SLB_ESID_V;
200     }
201 }
202 #endif
203 
__helper_slbie(CPUPPCState * env,target_ulong addr,target_ulong global)204 static void __helper_slbie(CPUPPCState *env, target_ulong addr,
205                            target_ulong global)
206 {
207     PowerPCCPU *cpu = env_archcpu(env);
208     ppc_slb_t *slb;
209 
210     slb = slb_lookup(cpu, addr);
211     if (!slb) {
212         return;
213     }
214 
215     if (slb->esid & SLB_ESID_V) {
216         slb->esid &= ~SLB_ESID_V;
217 
218         /*
219          * XXX: given the fact that segment size is 256 MB or 1TB,
220          *      and we still don't have a tlb_flush_mask(env, n, mask)
221          *      in QEMU, we just invalidate all TLBs
222          */
223         env->tlb_need_flush |=
224             (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
225     }
226 }
227 
helper_SLBIE(CPUPPCState * env,target_ulong addr)228 void helper_SLBIE(CPUPPCState *env, target_ulong addr)
229 {
230     __helper_slbie(env, addr, false);
231 }
232 
helper_SLBIEG(CPUPPCState * env,target_ulong addr)233 void helper_SLBIEG(CPUPPCState *env, target_ulong addr)
234 {
235     __helper_slbie(env, addr, true);
236 }
237 #endif
238 
ppc_store_slb(PowerPCCPU * cpu,target_ulong slot,target_ulong esid,target_ulong vsid)239 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
240                   target_ulong esid, target_ulong vsid)
241 {
242     CPUPPCState *env = &cpu->env;
243     ppc_slb_t *slb = &env->slb[slot];
244     const PPCHash64SegmentPageSizes *sps = NULL;
245     int i;
246 
247     if (slot >= cpu->hash64_opts->slb_size) {
248         return -1; /* Bad slot number */
249     }
250     if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
251         return -1; /* Reserved bits set */
252     }
253     if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
254         return -1; /* Bad segment size */
255     }
256     if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) {
257         return -1; /* 1T segment on MMU that doesn't support it */
258     }
259 
260     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
261         const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
262 
263         if (!sps1->page_shift) {
264             break;
265         }
266 
267         if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
268             sps = sps1;
269             break;
270         }
271     }
272 
273     if (!sps) {
274         error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
275                      " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
276                      slot, esid, vsid);
277         return -1;
278     }
279 
280     slb->esid = esid;
281     slb->vsid = vsid;
282     slb->sps = sps;
283 
284     LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx
285             " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
286             slb->esid, slb->vsid);
287 
288     return 0;
289 }
290 
291 #ifdef CONFIG_TCG
ppc_load_slb_esid(PowerPCCPU * cpu,target_ulong rb,target_ulong * rt)292 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
293                              target_ulong *rt)
294 {
295     CPUPPCState *env = &cpu->env;
296     int slot = rb & 0xfff;
297     ppc_slb_t *slb = &env->slb[slot];
298 
299     if (slot >= cpu->hash64_opts->slb_size) {
300         return -1;
301     }
302 
303     *rt = slb->esid;
304     return 0;
305 }
306 
ppc_load_slb_vsid(PowerPCCPU * cpu,target_ulong rb,target_ulong * rt)307 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
308                              target_ulong *rt)
309 {
310     CPUPPCState *env = &cpu->env;
311     int slot = rb & 0xfff;
312     ppc_slb_t *slb = &env->slb[slot];
313 
314     if (slot >= cpu->hash64_opts->slb_size) {
315         return -1;
316     }
317 
318     *rt = slb->vsid;
319     return 0;
320 }
321 
ppc_find_slb_vsid(PowerPCCPU * cpu,target_ulong rb,target_ulong * rt)322 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
323                              target_ulong *rt)
324 {
325     CPUPPCState *env = &cpu->env;
326     ppc_slb_t *slb;
327 
328     if (!msr_is_64bit(env, env->msr)) {
329         rb &= 0xffffffff;
330     }
331     slb = slb_lookup(cpu, rb);
332     if (slb == NULL) {
333         *rt = (target_ulong)-1ul;
334     } else {
335         *rt = slb->vsid;
336     }
337     return 0;
338 }
339 
helper_SLBMTE(CPUPPCState * env,target_ulong rb,target_ulong rs)340 void helper_SLBMTE(CPUPPCState *env, target_ulong rb, target_ulong rs)
341 {
342     PowerPCCPU *cpu = env_archcpu(env);
343 
344     if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
345         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
346                                POWERPC_EXCP_INVAL, GETPC());
347     }
348 }
349 
helper_SLBMFEE(CPUPPCState * env,target_ulong rb)350 target_ulong helper_SLBMFEE(CPUPPCState *env, target_ulong rb)
351 {
352     PowerPCCPU *cpu = env_archcpu(env);
353     target_ulong rt = 0;
354 
355     if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
356         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
357                                POWERPC_EXCP_INVAL, GETPC());
358     }
359     return rt;
360 }
361 
helper_SLBFEE(CPUPPCState * env,target_ulong rb)362 target_ulong helper_SLBFEE(CPUPPCState *env, target_ulong rb)
363 {
364     PowerPCCPU *cpu = env_archcpu(env);
365     target_ulong rt = 0;
366 
367     if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
368         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
369                                POWERPC_EXCP_INVAL, GETPC());
370     }
371     return rt;
372 }
373 
helper_SLBMFEV(CPUPPCState * env,target_ulong rb)374 target_ulong helper_SLBMFEV(CPUPPCState *env, target_ulong rb)
375 {
376     PowerPCCPU *cpu = env_archcpu(env);
377     target_ulong rt = 0;
378 
379     if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
380         raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
381                                POWERPC_EXCP_INVAL, GETPC());
382     }
383     return rt;
384 }
385 #endif
386 
387 /* Check No-Execute or Guarded Storage */
ppc_hash64_pte_noexec_guard(PowerPCCPU * cpu,ppc_hash_pte64_t pte)388 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
389                                               ppc_hash_pte64_t pte)
390 {
391     /* Exec permissions CANNOT take away read or write permissions */
392     return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ?
393             PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC;
394 }
395 
396 /* Check Basic Storage Protection */
ppc_hash64_pte_prot(int mmu_idx,ppc_slb_t * slb,ppc_hash_pte64_t pte)397 static int ppc_hash64_pte_prot(int mmu_idx,
398                                ppc_slb_t *slb, ppc_hash_pte64_t pte)
399 {
400     unsigned pp, key;
401     /*
402      * Some pp bit combinations have undefined behaviour, so default
403      * to no access in those cases
404      */
405     int prot = 0;
406 
407     key = !!(mmuidx_pr(mmu_idx) ? (slb->vsid & SLB_VSID_KP)
408              : (slb->vsid & SLB_VSID_KS));
409     pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
410 
411     if (key == 0) {
412         switch (pp) {
413         case 0x0:
414         case 0x1:
415         case 0x2:
416             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
417             break;
418 
419         case 0x3:
420         case 0x6:
421             prot = PAGE_READ | PAGE_EXEC;
422             break;
423         }
424     } else {
425         switch (pp) {
426         case 0x0:
427         case 0x6:
428             break;
429 
430         case 0x1:
431         case 0x3:
432             prot = PAGE_READ | PAGE_EXEC;
433             break;
434 
435         case 0x2:
436             prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
437             break;
438         }
439     }
440 
441     return prot;
442 }
443 
444 /* Check the instruction access permissions specified in the IAMR */
ppc_hash64_iamr_prot(PowerPCCPU * cpu,int key)445 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key)
446 {
447     CPUPPCState *env = &cpu->env;
448     int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3;
449 
450     /*
451      * An instruction fetch is permitted if the IAMR bit is 0.
452      * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
453      * can only take away EXEC permissions not READ or WRITE permissions.
454      * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
455      * EXEC permissions are allowed.
456      */
457     return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE :
458                                PAGE_READ | PAGE_WRITE | PAGE_EXEC;
459 }
460 
ppc_hash64_amr_prot(PowerPCCPU * cpu,ppc_hash_pte64_t pte)461 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
462 {
463     CPUPPCState *env = &cpu->env;
464     int key, amrbits;
465     int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
466 
467     /* Only recent MMUs implement Virtual Page Class Key Protection */
468     if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) {
469         return prot;
470     }
471 
472     key = HPTE64_R_KEY(pte.pte1);
473     amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3;
474 
475     /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
476     /*         env->spr[SPR_AMR]); */
477 
478     /*
479      * A store is permitted if the AMR bit is 0. Remove write
480      * protection if it is set.
481      */
482     if (amrbits & 0x2) {
483         prot &= ~PAGE_WRITE;
484     }
485     /*
486      * A load is permitted if the AMR bit is 0. Remove read
487      * protection if it is set.
488      */
489     if (amrbits & 0x1) {
490         prot &= ~PAGE_READ;
491     }
492 
493     switch (env->mmu_model) {
494     /*
495      * MMU version 2.07 and later support IAMR
496      * Check if the IAMR allows the instruction access - it will return
497      * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
498      * if it does (and prot will be unchanged indicating execution support).
499      */
500     case POWERPC_MMU_2_07:
501     case POWERPC_MMU_3_00:
502         prot &= ppc_hash64_iamr_prot(cpu, key);
503         break;
504     default:
505         break;
506     }
507 
508     return prot;
509 }
510 
ppc_hash64_map_hptes(PowerPCCPU * cpu,hwaddr ptex,int n)511 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
512                                              hwaddr ptex, int n)
513 {
514     hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
515     hwaddr base;
516     hwaddr plen = n * HASH_PTE_SIZE_64;
517     const ppc_hash_pte64_t *hptes;
518 
519     if (cpu->vhyp) {
520         PPCVirtualHypervisorClass *vhc =
521             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
522         return vhc->map_hptes(cpu->vhyp, ptex, n);
523     }
524     base = ppc_hash64_hpt_base(cpu);
525 
526     if (!base) {
527         return NULL;
528     }
529 
530     hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false,
531                               MEMTXATTRS_UNSPECIFIED);
532     if (plen < (n * HASH_PTE_SIZE_64)) {
533         hw_error("%s: Unable to map all requested HPTEs\n", __func__);
534     }
535     return hptes;
536 }
537 
ppc_hash64_unmap_hptes(PowerPCCPU * cpu,const ppc_hash_pte64_t * hptes,hwaddr ptex,int n)538 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
539                             hwaddr ptex, int n)
540 {
541     if (cpu->vhyp) {
542         PPCVirtualHypervisorClass *vhc =
543             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
544         vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n);
545         return;
546     }
547 
548     address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
549                         false, n * HASH_PTE_SIZE_64);
550 }
551 
hpte_page_shift(const PPCHash64SegmentPageSizes * sps,uint64_t pte0,uint64_t pte1)552 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps,
553                                 uint64_t pte0, uint64_t pte1)
554 {
555     int i;
556 
557     if (!(pte0 & HPTE64_V_LARGE)) {
558         if (sps->page_shift != 12) {
559             /* 4kiB page in a non 4kiB segment */
560             return 0;
561         }
562         /* Normal 4kiB page */
563         return 12;
564     }
565 
566     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
567         const PPCHash64PageSize *ps = &sps->enc[i];
568         uint64_t mask;
569 
570         if (!ps->page_shift) {
571             break;
572         }
573 
574         if (ps->page_shift == 12) {
575             /* L bit is set so this can't be a 4kiB page */
576             continue;
577         }
578 
579         mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
580 
581         if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
582             return ps->page_shift;
583         }
584     }
585 
586     return 0; /* Bad page size encoding */
587 }
588 
ppc64_v3_new_to_old_hpte(target_ulong * pte0,target_ulong * pte1)589 static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1)
590 {
591     /* Insert B into pte0 */
592     *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) |
593             ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) <<
594              (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT));
595 
596     /* Remove B from pte1 */
597     *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK;
598 }
599 
600 
ppc_hash64_pteg_search(PowerPCCPU * cpu,hwaddr hash,const PPCHash64SegmentPageSizes * sps,target_ulong ptem,ppc_hash_pte64_t * pte,unsigned * pshift)601 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
602                                      const PPCHash64SegmentPageSizes *sps,
603                                      target_ulong ptem,
604                                      ppc_hash_pte64_t *pte, unsigned *pshift)
605 {
606     int i;
607     const ppc_hash_pte64_t *pteg;
608     target_ulong pte0, pte1;
609     target_ulong ptex;
610 
611     ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
612     pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
613     if (!pteg) {
614         return -1;
615     }
616     for (i = 0; i < HPTES_PER_GROUP; i++) {
617         pte0 = ppc_hash64_hpte0(cpu, pteg, i);
618         /*
619          * pte0 contains the valid bit and must be read before pte1,
620          * otherwise we might see an old pte1 with a new valid bit and
621          * thus an inconsistent hpte value
622          */
623         smp_rmb();
624         pte1 = ppc_hash64_hpte1(cpu, pteg, i);
625 
626         /* Convert format if necessary */
627         if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) {
628             ppc64_v3_new_to_old_hpte(&pte0, &pte1);
629         }
630 
631         /* This compares V, B, H (secondary) and the AVPN */
632         if (HPTE64_V_COMPARE(pte0, ptem)) {
633             *pshift = hpte_page_shift(sps, pte0, pte1);
634             /*
635              * If there is no match, ignore the PTE, it could simply
636              * be for a different segment size encoding and the
637              * architecture specifies we should not match. Linux will
638              * potentially leave behind PTEs for the wrong base page
639              * size when demoting segments.
640              */
641             if (*pshift == 0) {
642                 continue;
643             }
644             /*
645              * We don't do anything with pshift yet as qemu TLB only
646              * deals with 4K pages anyway
647              */
648             pte->pte0 = pte0;
649             pte->pte1 = pte1;
650             ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
651             return ptex + i;
652         }
653     }
654     ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
655     /*
656      * We didn't find a valid entry.
657      */
658     return -1;
659 }
660 
ppc_hash64_htab_lookup(PowerPCCPU * cpu,ppc_slb_t * slb,target_ulong eaddr,ppc_hash_pte64_t * pte,unsigned * pshift)661 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
662                                      ppc_slb_t *slb, target_ulong eaddr,
663                                      ppc_hash_pte64_t *pte, unsigned *pshift)
664 {
665     CPUPPCState *env = &cpu->env;
666     hwaddr hash, ptex;
667     uint64_t vsid, epnmask, epn, ptem;
668     const PPCHash64SegmentPageSizes *sps = slb->sps;
669 
670     /*
671      * The SLB store path should prevent any bad page size encodings
672      * getting in there, so:
673      */
674     assert(sps);
675 
676     /* If ISL is set in LPCR we need to clamp the page size to 4K */
677     if (env->spr[SPR_LPCR] & LPCR_ISL) {
678         /* We assume that when using TCG, 4k is first entry of SPS */
679         sps = &cpu->hash64_opts->sps[0];
680         assert(sps->page_shift == 12);
681     }
682 
683     epnmask = ~((1ULL << sps->page_shift) - 1);
684 
685     if (slb->vsid & SLB_VSID_B) {
686         /* 1TB segment */
687         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
688         epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
689         hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
690     } else {
691         /* 256M segment */
692         vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
693         epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
694         hash = vsid ^ (epn >> sps->page_shift);
695     }
696     ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
697     ptem |= HPTE64_V_VALID;
698 
699     /* Page address translation */
700     qemu_log_mask(CPU_LOG_MMU,
701             "htab_base " HWADDR_FMT_plx " htab_mask " HWADDR_FMT_plx
702             " hash " HWADDR_FMT_plx "\n",
703             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
704 
705     /* Primary PTEG lookup */
706     qemu_log_mask(CPU_LOG_MMU,
707             "0 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx
708             " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
709             " hash=" HWADDR_FMT_plx "\n",
710             ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
711             vsid, ptem,  hash);
712     ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
713 
714     if (ptex == -1) {
715         /* Secondary PTEG lookup */
716         ptem |= HPTE64_V_SECONDARY;
717         qemu_log_mask(CPU_LOG_MMU,
718                 "1 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx
719                 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
720                 " hash=" HWADDR_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
721                 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
722 
723         ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
724     }
725 
726     return ptex;
727 }
728 
ppc_hash64_hpte_page_shift_noslb(PowerPCCPU * cpu,uint64_t pte0,uint64_t pte1)729 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
730                                           uint64_t pte0, uint64_t pte1)
731 {
732     int i;
733 
734     if (!(pte0 & HPTE64_V_LARGE)) {
735         return 12;
736     }
737 
738     /*
739      * The encodings in env->sps need to be carefully chosen so that
740      * this gives an unambiguous result.
741      */
742     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
743         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
744         unsigned shift;
745 
746         if (!sps->page_shift) {
747             break;
748         }
749 
750         shift = hpte_page_shift(sps, pte0, pte1);
751         if (shift) {
752             return shift;
753         }
754     }
755 
756     return 0;
757 }
758 
ppc_hash64_use_vrma(CPUPPCState * env)759 static bool ppc_hash64_use_vrma(CPUPPCState *env)
760 {
761     switch (env->mmu_model) {
762     case POWERPC_MMU_3_00:
763         /*
764          * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
765          * register no longer exist
766          */
767         return true;
768 
769     default:
770         return !!(env->spr[SPR_LPCR] & LPCR_VPM0);
771     }
772 }
773 
ppc_hash64_set_isi(CPUState * cs,int mmu_idx,uint64_t slb_vsid,uint64_t error_code)774 static void ppc_hash64_set_isi(CPUState *cs, int mmu_idx, uint64_t slb_vsid,
775                                uint64_t error_code)
776 {
777     CPUPPCState *env = &POWERPC_CPU(cs)->env;
778     bool vpm;
779 
780     if (!mmuidx_real(mmu_idx)) {
781         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
782     } else {
783         vpm = ppc_hash64_use_vrma(env);
784     }
785     if (vpm && !mmuidx_hv(mmu_idx)) {
786         cs->exception_index = POWERPC_EXCP_HISI;
787         env->spr[SPR_ASDR] = slb_vsid;
788     } else {
789         cs->exception_index = POWERPC_EXCP_ISI;
790     }
791     env->error_code = error_code;
792 }
793 
ppc_hash64_set_dsi(CPUState * cs,int mmu_idx,uint64_t slb_vsid,uint64_t dar,uint64_t dsisr)794 static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t slb_vsid,
795                                uint64_t dar, uint64_t dsisr)
796 {
797     CPUPPCState *env = &POWERPC_CPU(cs)->env;
798     bool vpm;
799 
800     if (!mmuidx_real(mmu_idx)) {
801         vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
802     } else {
803         vpm = ppc_hash64_use_vrma(env);
804     }
805     if (vpm && !mmuidx_hv(mmu_idx)) {
806         cs->exception_index = POWERPC_EXCP_HDSI;
807         env->spr[SPR_HDAR] = dar;
808         env->spr[SPR_HDSISR] = dsisr;
809         env->spr[SPR_ASDR] = slb_vsid;
810     } else {
811         cs->exception_index = POWERPC_EXCP_DSI;
812         env->spr[SPR_DAR] = dar;
813         env->spr[SPR_DSISR] = dsisr;
814    }
815     env->error_code = 0;
816 }
817 
818 
ppc_hash64_set_r(PowerPCCPU * cpu,hwaddr ptex,uint64_t pte1)819 static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
820 {
821     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R;
822 
823     if (cpu->vhyp) {
824         PPCVirtualHypervisorClass *vhc =
825             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
826         vhc->hpte_set_r(cpu->vhyp, ptex, pte1);
827         return;
828     }
829     base = ppc_hash64_hpt_base(cpu);
830 
831 
832     /* The HW performs a non-atomic byte update */
833     stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
834 }
835 
ppc_hash64_set_c(PowerPCCPU * cpu,hwaddr ptex,uint64_t pte1)836 static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
837 {
838     hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C;
839 
840     if (cpu->vhyp) {
841         PPCVirtualHypervisorClass *vhc =
842             PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
843         vhc->hpte_set_c(cpu->vhyp, ptex, pte1);
844         return;
845     }
846     base = ppc_hash64_hpt_base(cpu);
847 
848     /* The HW performs a non-atomic byte update */
849     stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
850 }
851 
rmls_limit(PowerPCCPU * cpu)852 static target_ulong rmls_limit(PowerPCCPU *cpu)
853 {
854     CPUPPCState *env = &cpu->env;
855     /*
856      * In theory the meanings of RMLS values are implementation
857      * dependent.  In practice, this seems to have been the set from
858      * POWER4+..POWER8, and RMLS is no longer supported in POWER9.
859      *
860      * Unsupported values mean the OS has shot itself in the
861      * foot. Return a 0-sized RMA in this case, which we expect
862      * to trigger an immediate DSI or ISI
863      */
864     static const target_ulong rma_sizes[16] = {
865         [0] = 256 * GiB,
866         [1] = 16 * GiB,
867         [2] = 1 * GiB,
868         [3] = 64 * MiB,
869         [4] = 256 * MiB,
870         [7] = 128 * MiB,
871         [8] = 32 * MiB,
872     };
873     target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT;
874 
875     return rma_sizes[rmls];
876 }
877 
878 /* Return the LLP in SLB_VSID format */
get_vrma_llp(PowerPCCPU * cpu)879 static uint64_t get_vrma_llp(PowerPCCPU *cpu)
880 {
881     CPUPPCState *env = &cpu->env;
882     uint64_t llp;
883 
884     if (env->mmu_model == POWERPC_MMU_3_00) {
885         ppc_v3_pate_t pate;
886         uint64_t ps, l, lp;
887 
888         /*
889          * ISA v3.0 removes the LPCR[VRMASD] field and puts the VRMA base
890          * page size (L||LP equivalent) in the PS field in the HPT partition
891          * table entry.
892          */
893         if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
894             error_report("Bad VRMA with no partition table entry");
895             return 0;
896         }
897         ps = PATE0_GET_PS(pate.dw0);
898         /* PS has L||LP in 3 consecutive bits, put them into SLB LLP format */
899         l = (ps >> 2) & 0x1;
900         lp = ps & 0x3;
901         llp = (l << SLB_VSID_L_SHIFT) | (lp << SLB_VSID_LP_SHIFT);
902 
903     } else {
904         uint64_t lpcr = env->spr[SPR_LPCR];
905         target_ulong vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
906 
907         /* VRMASD LLP matches SLB format, just shift and mask it */
908         llp = (vrmasd << SLB_VSID_LP_SHIFT) & SLB_VSID_LLP_MASK;
909     }
910 
911     return llp;
912 }
913 
build_vrma_slbe(PowerPCCPU * cpu,ppc_slb_t * slb)914 static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
915 {
916     uint64_t llp = get_vrma_llp(cpu);
917     target_ulong vsid = SLB_VSID_VRMA | llp;
918     int i;
919 
920     for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
921         const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
922 
923         if (!sps->page_shift) {
924             break;
925         }
926 
927         if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) {
928             slb->esid = SLB_ESID_V;
929             slb->vsid = vsid;
930             slb->sps = sps;
931             return 0;
932         }
933     }
934 
935     error_report("Bad VRMA page size encoding 0x" TARGET_FMT_lx, llp);
936 
937     return -1;
938 }
939 
ppc_hash64_xlate(PowerPCCPU * cpu,vaddr eaddr,MMUAccessType access_type,hwaddr * raddrp,int * psizep,int * protp,int mmu_idx,bool guest_visible)940 bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
941                       hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
942                       bool guest_visible)
943 {
944     CPUState *cs = CPU(cpu);
945     CPUPPCState *env = &cpu->env;
946     ppc_slb_t vrma_slbe;
947     ppc_slb_t *slb;
948     unsigned apshift;
949     hwaddr ptex;
950     ppc_hash_pte64_t pte;
951     int exec_prot, pp_prot, amr_prot, prot;
952     int need_prot;
953     hwaddr raddr;
954 
955     /*
956      * Note on LPCR usage: 970 uses HID4, but our special variant of
957      * store_spr copies relevant fields into env->spr[SPR_LPCR].
958      * Similarly we filter unimplemented bits when storing into LPCR
959      * depending on the MMU version. This code can thus just use the
960      * LPCR "as-is".
961      */
962 
963     /* 1. Handle real mode accesses */
964     if (mmuidx_real(mmu_idx)) {
965         /*
966          * Translation is supposedly "off", but in real mode the top 4
967          * effective address bits are (mostly) ignored
968          */
969         raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
970 
971         if (cpu->vhyp) {
972             /*
973              * In virtual hypervisor mode, there's nothing to do:
974              *   EA == GPA == qemu guest address
975              */
976         } else if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) {
977             /* In HV mode, add HRMOR if top EA bit is clear */
978             if (!(eaddr >> 63)) {
979                 raddr |= env->spr[SPR_HRMOR];
980             }
981         } else if (ppc_hash64_use_vrma(env)) {
982             /* Emulated VRMA mode */
983             slb = &vrma_slbe;
984             if (build_vrma_slbe(cpu, slb) != 0) {
985                 /* Invalid VRMA setup, machine check */
986                 if (guest_visible) {
987                     cs->exception_index = POWERPC_EXCP_MCHECK;
988                     env->error_code = 0;
989                 }
990                 return false;
991             }
992 
993             goto skip_slb_search;
994         } else {
995             target_ulong limit = rmls_limit(cpu);
996 
997             /* Emulated old-style RMO mode, bounds check against RMLS */
998             if (raddr >= limit) {
999                 if (!guest_visible) {
1000                     return false;
1001                 }
1002                 switch (access_type) {
1003                 case MMU_INST_FETCH:
1004                     ppc_hash64_set_isi(cs, mmu_idx, 0, SRR1_PROTFAULT);
1005                     break;
1006                 case MMU_DATA_LOAD:
1007                     ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr, DSISR_PROTFAULT);
1008                     break;
1009                 case MMU_DATA_STORE:
1010                     ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr,
1011                                        DSISR_PROTFAULT | DSISR_ISSTORE);
1012                     break;
1013                 default:
1014                     g_assert_not_reached();
1015                 }
1016                 return false;
1017             }
1018 
1019             raddr |= env->spr[SPR_RMOR];
1020         }
1021 
1022         *raddrp = raddr;
1023         *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1024         *psizep = TARGET_PAGE_BITS;
1025         return true;
1026     }
1027 
1028     /* 2. Translation is on, so look up the SLB */
1029     slb = slb_lookup(cpu, eaddr);
1030     if (!slb) {
1031         /* No entry found, check if in-memory segment tables are in use */
1032         if (ppc64_use_proc_tbl(cpu)) {
1033             /* TODO - Unsupported */
1034             error_report("Segment Table Support Unimplemented");
1035             exit(1);
1036         }
1037         /* Segment still not found, generate the appropriate interrupt */
1038         if (!guest_visible) {
1039             return false;
1040         }
1041         switch (access_type) {
1042         case MMU_INST_FETCH:
1043             cs->exception_index = POWERPC_EXCP_ISEG;
1044             env->error_code = 0;
1045             break;
1046         case MMU_DATA_LOAD:
1047         case MMU_DATA_STORE:
1048             cs->exception_index = POWERPC_EXCP_DSEG;
1049             env->error_code = 0;
1050             env->spr[SPR_DAR] = eaddr;
1051             break;
1052         default:
1053             g_assert_not_reached();
1054         }
1055         return false;
1056     }
1057 
1058  skip_slb_search:
1059 
1060     /* 3. Check for segment level no-execute violation */
1061     if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) {
1062         if (guest_visible) {
1063             ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOEXEC_GUARD);
1064         }
1065         return false;
1066     }
1067 
1068     /* 4. Locate the PTE in the hash table */
1069     ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
1070     if (ptex == -1) {
1071         if (!guest_visible) {
1072             return false;
1073         }
1074         switch (access_type) {
1075         case MMU_INST_FETCH:
1076             ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOPTE);
1077             break;
1078         case MMU_DATA_LOAD:
1079             ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, DSISR_NOPTE);
1080             break;
1081         case MMU_DATA_STORE:
1082             ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr,
1083                                DSISR_NOPTE | DSISR_ISSTORE);
1084             break;
1085         default:
1086             g_assert_not_reached();
1087         }
1088         return false;
1089     }
1090     qemu_log_mask(CPU_LOG_MMU,
1091                   "found PTE at index %08" HWADDR_PRIx "\n", ptex);
1092 
1093     /* 5. Check access permissions */
1094 
1095     exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
1096     pp_prot = ppc_hash64_pte_prot(mmu_idx, slb, pte);
1097     amr_prot = ppc_hash64_amr_prot(cpu, pte);
1098     prot = exec_prot & pp_prot & amr_prot;
1099 
1100     need_prot = prot_for_access_type(access_type);
1101     if (need_prot & ~prot) {
1102         /* Access right violation */
1103         qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
1104         if (!guest_visible) {
1105             return false;
1106         }
1107         if (access_type == MMU_INST_FETCH) {
1108             int srr1 = 0;
1109             if (PAGE_EXEC & ~exec_prot) {
1110                 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
1111             } else if (PAGE_EXEC & ~pp_prot) {
1112                 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */
1113             }
1114             if (PAGE_EXEC & ~amr_prot) {
1115                 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
1116             }
1117             ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, srr1);
1118         } else {
1119             int dsisr = 0;
1120             if (need_prot & ~pp_prot) {
1121                 dsisr |= DSISR_PROTFAULT;
1122             }
1123             if (access_type == MMU_DATA_STORE) {
1124                 dsisr |= DSISR_ISSTORE;
1125             }
1126             if (need_prot & ~amr_prot) {
1127                 dsisr |= DSISR_AMR;
1128             }
1129             ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, dsisr);
1130         }
1131         return false;
1132     }
1133 
1134     qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
1135 
1136     /* 6. Update PTE referenced and changed bits if necessary */
1137 
1138     if (!(pte.pte1 & HPTE64_R_R)) {
1139         ppc_hash64_set_r(cpu, ptex, pte.pte1);
1140     }
1141     if (!(pte.pte1 & HPTE64_R_C)) {
1142         if (access_type == MMU_DATA_STORE) {
1143             ppc_hash64_set_c(cpu, ptex, pte.pte1);
1144         } else {
1145             /*
1146              * Treat the page as read-only for now, so that a later write
1147              * will pass through this function again to set the C bit
1148              */
1149             prot &= ~PAGE_WRITE;
1150         }
1151     }
1152 
1153     /* 7. Determine the real address from the PTE */
1154 
1155     *raddrp = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
1156     *protp = prot;
1157     *psizep = apshift;
1158     return true;
1159 }
1160 
ppc_hash64_tlb_flush_hpte(PowerPCCPU * cpu,target_ulong ptex,target_ulong pte0,target_ulong pte1)1161 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
1162                                target_ulong pte0, target_ulong pte1)
1163 {
1164     /*
1165      * XXX: given the fact that there are too many segments to
1166      * invalidate, and we still don't have a tlb_flush_mask(env, n,
1167      * mask) in QEMU, we just invalidate all TLBs
1168      */
1169     cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
1170 }
1171 
1172 #ifdef CONFIG_TCG
helper_store_lpcr(CPUPPCState * env,target_ulong val)1173 void helper_store_lpcr(CPUPPCState *env, target_ulong val)
1174 {
1175     PowerPCCPU *cpu = env_archcpu(env);
1176 
1177     ppc_store_lpcr(cpu, val);
1178 }
1179 #endif
1180 
ppc_hash64_init(PowerPCCPU * cpu)1181 void ppc_hash64_init(PowerPCCPU *cpu)
1182 {
1183     CPUPPCState *env = &cpu->env;
1184     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
1185 
1186     if (!pcc->hash64_opts) {
1187         assert(!mmu_is_64bit(env->mmu_model));
1188         return;
1189     }
1190 
1191     cpu->hash64_opts = g_memdup2(pcc->hash64_opts, sizeof(*cpu->hash64_opts));
1192 }
1193 
ppc_hash64_finalize(PowerPCCPU * cpu)1194 void ppc_hash64_finalize(PowerPCCPU *cpu)
1195 {
1196     g_free(cpu->hash64_opts);
1197 }
1198 
1199 const PPCHash64Options ppc_hash64_opts_basic = {
1200     .flags = 0,
1201     .slb_size = 64,
1202     .sps = {
1203         { .page_shift = 12, /* 4K */
1204           .slb_enc = 0,
1205           .enc = { { .page_shift = 12, .pte_enc = 0 } }
1206         },
1207         { .page_shift = 24, /* 16M */
1208           .slb_enc = 0x100,
1209           .enc = { { .page_shift = 24, .pte_enc = 0 } }
1210         },
1211     },
1212 };
1213 
1214 const PPCHash64Options ppc_hash64_opts_POWER7 = {
1215     .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE,
1216     .slb_size = 32,
1217     .sps = {
1218         {
1219             .page_shift = 12, /* 4K */
1220             .slb_enc = 0,
1221             .enc = { { .page_shift = 12, .pte_enc = 0 },
1222                      { .page_shift = 16, .pte_enc = 0x7 },
1223                      { .page_shift = 24, .pte_enc = 0x38 }, },
1224         },
1225         {
1226             .page_shift = 16, /* 64K */
1227             .slb_enc = SLB_VSID_64K,
1228             .enc = { { .page_shift = 16, .pte_enc = 0x1 },
1229                      { .page_shift = 24, .pte_enc = 0x8 }, },
1230         },
1231         {
1232             .page_shift = 24, /* 16M */
1233             .slb_enc = SLB_VSID_16M,
1234             .enc = { { .page_shift = 24, .pte_enc = 0 }, },
1235         },
1236         {
1237             .page_shift = 34, /* 16G */
1238             .slb_enc = SLB_VSID_16G,
1239             .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
1240         },
1241     }
1242 };
1243 
1244 
1245