1 /*
2 * PowerPC Radix MMU mulation helpers for QEMU.
3 *
4 * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/page-protection.h"
24 #include "qemu/error-report.h"
25 #include "sysemu/kvm.h"
26 #include "kvm_ppc.h"
27 #include "exec/log.h"
28 #include "internal.h"
29 #include "mmu-radix64.h"
30 #include "mmu-book3s-v3.h"
31
ppc_radix64_get_fully_qualified_addr(const CPUPPCState * env,vaddr eaddr,uint64_t * lpid,uint64_t * pid)32 static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
33 vaddr eaddr,
34 uint64_t *lpid, uint64_t *pid)
35 {
36 /* When EA(2:11) are nonzero, raise a segment interrupt */
37 if (eaddr & ~R_EADDR_VALID_MASK) {
38 return false;
39 }
40
41 if (FIELD_EX64(env->msr, MSR, HV)) { /* MSR[HV] -> Hypervisor/bare metal */
42 switch (eaddr & R_EADDR_QUADRANT) {
43 case R_EADDR_QUADRANT0:
44 *lpid = 0;
45 *pid = env->spr[SPR_BOOKS_PID];
46 break;
47 case R_EADDR_QUADRANT1:
48 *lpid = env->spr[SPR_LPIDR];
49 *pid = env->spr[SPR_BOOKS_PID];
50 break;
51 case R_EADDR_QUADRANT2:
52 *lpid = env->spr[SPR_LPIDR];
53 *pid = 0;
54 break;
55 case R_EADDR_QUADRANT3:
56 *lpid = 0;
57 *pid = 0;
58 break;
59 default:
60 g_assert_not_reached();
61 }
62 } else { /* !MSR[HV] -> Guest */
63 switch (eaddr & R_EADDR_QUADRANT) {
64 case R_EADDR_QUADRANT0: /* Guest application */
65 *lpid = env->spr[SPR_LPIDR];
66 *pid = env->spr[SPR_BOOKS_PID];
67 break;
68 case R_EADDR_QUADRANT1: /* Illegal */
69 case R_EADDR_QUADRANT2:
70 return false;
71 case R_EADDR_QUADRANT3: /* Guest OS */
72 *lpid = env->spr[SPR_LPIDR];
73 *pid = 0; /* pid set to 0 -> addresses guest operating system */
74 break;
75 default:
76 g_assert_not_reached();
77 }
78 }
79
80 return true;
81 }
82
ppc_radix64_raise_segi(PowerPCCPU * cpu,MMUAccessType access_type,vaddr eaddr)83 static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type,
84 vaddr eaddr)
85 {
86 CPUState *cs = CPU(cpu);
87 CPUPPCState *env = &cpu->env;
88
89 switch (access_type) {
90 case MMU_INST_FETCH:
91 /* Instruction Segment Interrupt */
92 cs->exception_index = POWERPC_EXCP_ISEG;
93 break;
94 case MMU_DATA_STORE:
95 case MMU_DATA_LOAD:
96 /* Data Segment Interrupt */
97 cs->exception_index = POWERPC_EXCP_DSEG;
98 env->spr[SPR_DAR] = eaddr;
99 break;
100 default:
101 g_assert_not_reached();
102 }
103 env->error_code = 0;
104 }
105
access_str(MMUAccessType access_type)106 static inline const char *access_str(MMUAccessType access_type)
107 {
108 return access_type == MMU_DATA_LOAD ? "reading" :
109 (access_type == MMU_DATA_STORE ? "writing" : "execute");
110 }
111
ppc_radix64_raise_si(PowerPCCPU * cpu,MMUAccessType access_type,vaddr eaddr,uint32_t cause)112 static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type,
113 vaddr eaddr, uint32_t cause)
114 {
115 CPUState *cs = CPU(cpu);
116 CPUPPCState *env = &cpu->env;
117
118 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" cause %08x\n",
119 __func__, access_str(access_type),
120 eaddr, cause);
121
122 switch (access_type) {
123 case MMU_INST_FETCH:
124 /* Instruction Storage Interrupt */
125 cs->exception_index = POWERPC_EXCP_ISI;
126 env->error_code = cause;
127 break;
128 case MMU_DATA_STORE:
129 cause |= DSISR_ISSTORE;
130 /* fall through */
131 case MMU_DATA_LOAD:
132 /* Data Storage Interrupt */
133 cs->exception_index = POWERPC_EXCP_DSI;
134 env->spr[SPR_DSISR] = cause;
135 env->spr[SPR_DAR] = eaddr;
136 env->error_code = 0;
137 break;
138 default:
139 g_assert_not_reached();
140 }
141 }
142
ppc_radix64_raise_hsi(PowerPCCPU * cpu,MMUAccessType access_type,vaddr eaddr,hwaddr g_raddr,uint32_t cause)143 static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type,
144 vaddr eaddr, hwaddr g_raddr, uint32_t cause)
145 {
146 CPUState *cs = CPU(cpu);
147 CPUPPCState *env = &cpu->env;
148
149 env->error_code = 0;
150 if (cause & DSISR_PRTABLE_FAULT) {
151 /* HDSI PRTABLE_FAULT gets the originating access type in error_code */
152 env->error_code = access_type;
153 access_type = MMU_DATA_LOAD;
154 }
155
156 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" 0x%"
157 HWADDR_PRIx" cause %08x\n",
158 __func__, access_str(access_type),
159 eaddr, g_raddr, cause);
160
161 switch (access_type) {
162 case MMU_INST_FETCH:
163 /* H Instruction Storage Interrupt */
164 cs->exception_index = POWERPC_EXCP_HISI;
165 env->spr[SPR_ASDR] = g_raddr;
166 env->error_code = cause;
167 break;
168 case MMU_DATA_STORE:
169 cause |= DSISR_ISSTORE;
170 /* fall through */
171 case MMU_DATA_LOAD:
172 /* H Data Storage Interrupt */
173 cs->exception_index = POWERPC_EXCP_HDSI;
174 env->spr[SPR_HDSISR] = cause;
175 env->spr[SPR_HDAR] = eaddr;
176 env->spr[SPR_ASDR] = g_raddr;
177 break;
178 default:
179 g_assert_not_reached();
180 }
181 }
182
ppc_radix64_check_prot(PowerPCCPU * cpu,MMUAccessType access_type,uint64_t pte,int * fault_cause,int * prot,int mmu_idx,bool partition_scoped)183 static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
184 uint64_t pte, int *fault_cause, int *prot,
185 int mmu_idx, bool partition_scoped)
186 {
187 CPUPPCState *env = &cpu->env;
188 int need_prot;
189
190 /* Check Page Attributes (pte58:59) */
191 if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) {
192 /*
193 * Radix PTE entries with the non-idempotent I/O attribute are treated
194 * as guarded storage
195 */
196 *fault_cause |= SRR1_NOEXEC_GUARD;
197 return true;
198 }
199
200 /* Determine permissions allowed by Encoded Access Authority */
201 if (!partition_scoped && (pte & R_PTE_EAA_PRIV) &&
202 FIELD_EX64(env->msr, MSR, PR)) {
203 *prot = 0;
204 } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) ||
205 partition_scoped) {
206 *prot = ppc_radix64_get_prot_eaa(pte);
207 } else { /* !MSR_PR && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
208 *prot = ppc_radix64_get_prot_eaa(pte);
209 *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
210 }
211
212 /* Check if requested access type is allowed */
213 need_prot = prot_for_access_type(access_type);
214 if (need_prot & ~*prot) { /* Page Protected for that Access */
215 *fault_cause |= access_type == MMU_INST_FETCH ? SRR1_NOEXEC_GUARD :
216 DSISR_PROTFAULT;
217 return true;
218 }
219
220 return false;
221 }
222
ppc_radix64_check_rc(MMUAccessType access_type,uint64_t pte)223 static int ppc_radix64_check_rc(MMUAccessType access_type, uint64_t pte)
224 {
225 switch (access_type) {
226 case MMU_DATA_STORE:
227 if (!(pte & R_PTE_C)) {
228 break;
229 }
230 /* fall through */
231 case MMU_INST_FETCH:
232 case MMU_DATA_LOAD:
233 if (!(pte & R_PTE_R)) {
234 break;
235 }
236
237 /* R/C bits are already set appropriately for this access */
238 return 0;
239 }
240
241 return 1;
242 }
243
ppc_radix64_is_valid_level(int level,int psize,uint64_t nls)244 static bool ppc_radix64_is_valid_level(int level, int psize, uint64_t nls)
245 {
246 bool ret;
247
248 /*
249 * Check if this is a valid level, according to POWER9 and POWER10
250 * Processor User's Manuals, sections 4.10.4.1 and 5.10.6.1, respectively:
251 * Supported Radix Tree Configurations and Resulting Page Sizes.
252 *
253 * Note: these checks are specific to POWER9 and POWER10 CPUs. Any future
254 * CPUs that supports a different Radix MMU configuration will need their
255 * own implementation.
256 */
257 switch (level) {
258 case 0: /* Root Page Dir */
259 ret = psize == 52 && nls == 13;
260 break;
261 case 1:
262 case 2:
263 ret = nls == 9;
264 break;
265 case 3:
266 ret = nls == 9 || nls == 5;
267 break;
268 default:
269 ret = false;
270 }
271
272 if (unlikely(!ret)) {
273 qemu_log_mask(LOG_GUEST_ERROR, "invalid radix configuration: "
274 "level %d size %d nls %"PRIu64"\n",
275 level, psize, nls);
276 }
277 return ret;
278 }
279
ppc_radix64_next_level(AddressSpace * as,vaddr eaddr,uint64_t * pte_addr,uint64_t * nls,int * psize,uint64_t * pte,int * fault_cause)280 static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
281 uint64_t *pte_addr, uint64_t *nls,
282 int *psize, uint64_t *pte, int *fault_cause)
283 {
284 uint64_t index, mask, nlb, pde;
285
286 /* Read page <directory/table> entry from guest address space */
287 pde = ldq_phys(as, *pte_addr);
288 if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
289 *fault_cause |= DSISR_NOPTE;
290 return 1;
291 }
292
293 *pte = pde;
294 *psize -= *nls;
295 if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
296 *nls = pde & R_PDE_NLS;
297 index = eaddr >> (*psize - *nls); /* Shift */
298 index &= ((1UL << *nls) - 1); /* Mask */
299 nlb = pde & R_PDE_NLB;
300 mask = MAKE_64BIT_MASK(0, *nls + 3);
301
302 if (nlb & mask) {
303 qemu_log_mask(LOG_GUEST_ERROR,
304 "%s: misaligned page dir/table base: 0x%" PRIx64
305 " page dir size: 0x%" PRIx64 "\n",
306 __func__, nlb, mask + 1);
307 nlb &= ~mask;
308 }
309 *pte_addr = nlb + index * sizeof(pde);
310 }
311 return 0;
312 }
313
ppc_radix64_walk_tree(AddressSpace * as,vaddr eaddr,uint64_t base_addr,uint64_t nls,hwaddr * raddr,int * psize,uint64_t * pte,int * fault_cause,hwaddr * pte_addr)314 static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
315 uint64_t base_addr, uint64_t nls,
316 hwaddr *raddr, int *psize, uint64_t *pte,
317 int *fault_cause, hwaddr *pte_addr)
318 {
319 uint64_t index, pde, rpn, mask;
320 int level = 0;
321
322 index = eaddr >> (*psize - nls); /* Shift */
323 index &= ((1UL << nls) - 1); /* Mask */
324 mask = MAKE_64BIT_MASK(0, nls + 3);
325
326 if (base_addr & mask) {
327 qemu_log_mask(LOG_GUEST_ERROR,
328 "%s: misaligned page dir base: 0x%" PRIx64
329 " page dir size: 0x%" PRIx64 "\n",
330 __func__, base_addr, mask + 1);
331 base_addr &= ~mask;
332 }
333 *pte_addr = base_addr + index * sizeof(pde);
334
335 do {
336 int ret;
337
338 if (!ppc_radix64_is_valid_level(level++, *psize, nls)) {
339 *fault_cause |= DSISR_R_BADCONFIG;
340 return 1;
341 }
342
343 ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde,
344 fault_cause);
345 if (ret) {
346 return ret;
347 }
348 } while (!(pde & R_PTE_LEAF));
349
350 *pte = pde;
351 rpn = pde & R_PTE_RPN;
352 mask = (1UL << *psize) - 1;
353
354 /* Or high bits of rpn and low bits to ea to form whole real addr */
355 *raddr = (rpn & ~mask) | (eaddr & mask);
356 return 0;
357 }
358
validate_pate(PowerPCCPU * cpu,uint64_t lpid,ppc_v3_pate_t * pate)359 static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
360 {
361 CPUPPCState *env = &cpu->env;
362
363 if (!(pate->dw0 & PATE0_HR)) {
364 return false;
365 }
366 if (lpid == 0 && !FIELD_EX64(env->msr, MSR, HV)) {
367 return false;
368 }
369 if ((pate->dw0 & PATE1_R_PRTS) < 5) {
370 return false;
371 }
372 /* More checks ... */
373 return true;
374 }
375
ppc_radix64_partition_scoped_xlate(PowerPCCPU * cpu,MMUAccessType orig_access_type,vaddr eaddr,hwaddr g_raddr,ppc_v3_pate_t pate,hwaddr * h_raddr,int * h_prot,int * h_page_size,bool pde_addr,int mmu_idx,uint64_t lpid,bool guest_visible)376 static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu,
377 MMUAccessType orig_access_type,
378 vaddr eaddr, hwaddr g_raddr,
379 ppc_v3_pate_t pate,
380 hwaddr *h_raddr, int *h_prot,
381 int *h_page_size, bool pde_addr,
382 int mmu_idx, uint64_t lpid,
383 bool guest_visible)
384 {
385 MMUAccessType access_type = orig_access_type;
386 int fault_cause = 0;
387 hwaddr pte_addr;
388 uint64_t pte;
389
390 if (pde_addr) {
391 /*
392 * Translation of process-scoped tables/directories is performed as
393 * a read-access.
394 */
395 access_type = MMU_DATA_LOAD;
396 }
397
398 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
399 " mmu_idx %u 0x%"HWADDR_PRIx"\n",
400 __func__, access_str(access_type),
401 eaddr, mmu_idx, g_raddr);
402
403 *h_page_size = PRTBE_R_GET_RTS(pate.dw0);
404 /* No valid pte or access denied due to protection */
405 if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
406 pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size,
407 &pte, &fault_cause, &pte_addr) ||
408 ppc_radix64_check_prot(cpu, access_type, pte,
409 &fault_cause, h_prot, mmu_idx, true)) {
410 if (pde_addr) { /* address being translated was that of a guest pde */
411 fault_cause |= DSISR_PRTABLE_FAULT;
412 }
413 if (guest_visible) {
414 ppc_radix64_raise_hsi(cpu, orig_access_type,
415 eaddr, g_raddr, fault_cause);
416 }
417 return 1;
418 }
419
420 if (guest_visible) {
421 if (ppc_radix64_check_rc(access_type, pte)) {
422 /*
423 * Per ISA 3.1 Book III, 7.5.3 and 7.5.5, failure to set R/C during
424 * partition-scoped translation when effLPID = 0 results in normal
425 * (non-Hypervisor) Data and Instruction Storage Interrupts
426 * respectively.
427 *
428 * ISA 3.0 is ambiguous about this, but tests on POWER9 hardware
429 * seem to exhibit the same behavior.
430 */
431 if (lpid > 0) {
432 ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr,
433 DSISR_ATOMIC_RC);
434 } else {
435 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_ATOMIC_RC);
436 }
437 return 1;
438 }
439 }
440
441 return 0;
442 }
443
444 /*
445 * The spapr vhc has a flat partition scope provided by qemu memory when
446 * not nested.
447 *
448 * When running a nested guest, the addressing is 2-level radix on top of the
449 * vhc memory, so it works practically identically to the bare metal 2-level
450 * radix. So that code is selected directly. A cleaner and more flexible nested
451 * hypervisor implementation would allow the vhc to provide a ->nested_xlate()
452 * function but that is not required for the moment.
453 */
vhyp_flat_addressing(PowerPCCPU * cpu)454 static bool vhyp_flat_addressing(PowerPCCPU *cpu)
455 {
456 if (cpu->vhyp) {
457 return !vhyp_cpu_in_nested(cpu);
458 }
459 return false;
460 }
461
ppc_radix64_process_scoped_xlate(PowerPCCPU * cpu,MMUAccessType access_type,vaddr eaddr,uint64_t pid,ppc_v3_pate_t pate,hwaddr * g_raddr,int * g_prot,int * g_page_size,int mmu_idx,uint64_t lpid,bool guest_visible)462 static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
463 MMUAccessType access_type,
464 vaddr eaddr, uint64_t pid,
465 ppc_v3_pate_t pate, hwaddr *g_raddr,
466 int *g_prot, int *g_page_size,
467 int mmu_idx, uint64_t lpid,
468 bool guest_visible)
469 {
470 CPUState *cs = CPU(cpu);
471 CPUPPCState *env = &cpu->env;
472 uint64_t offset, size, prtb, prtbe_addr, prtbe0, base_addr, nls, index, pte;
473 int fault_cause = 0, h_page_size, h_prot;
474 hwaddr h_raddr, pte_addr;
475 int ret;
476
477 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
478 " mmu_idx %u pid %"PRIu64"\n",
479 __func__, access_str(access_type),
480 eaddr, mmu_idx, pid);
481
482 prtb = (pate.dw1 & PATE1_R_PRTB);
483 size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
484 if (prtb & (size - 1)) {
485 /* Process Table not properly aligned */
486 if (guest_visible) {
487 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
488 }
489 return 1;
490 }
491
492 /* Index Process Table by PID to Find Corresponding Process Table Entry */
493 offset = pid * sizeof(struct prtb_entry);
494 if (offset >= size) {
495 /* offset exceeds size of the process table */
496 if (guest_visible) {
497 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
498 }
499 return 1;
500 }
501 prtbe_addr = prtb + offset;
502
503 if (vhyp_flat_addressing(cpu)) {
504 prtbe0 = ldq_phys(cs->as, prtbe_addr);
505 } else {
506 /*
507 * Process table addresses are subject to partition-scoped
508 * translation
509 *
510 * On a Radix host, the partition-scoped page table for LPID=0
511 * is only used to translate the effective addresses of the
512 * process table entries.
513 */
514 /* mmu_idx is 5 because we're translating from hypervisor scope */
515 ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
516 prtbe_addr, pate, &h_raddr,
517 &h_prot, &h_page_size, true,
518 5, lpid, guest_visible);
519 if (ret) {
520 return ret;
521 }
522 prtbe0 = ldq_phys(cs->as, h_raddr);
523 }
524
525 /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
526 *g_page_size = PRTBE_R_GET_RTS(prtbe0);
527 base_addr = prtbe0 & PRTBE_R_RPDB;
528 nls = prtbe0 & PRTBE_R_RPDS;
529 if (FIELD_EX64(env->msr, MSR, HV) || vhyp_flat_addressing(cpu)) {
530 /*
531 * Can treat process table addresses as real addresses
532 */
533 ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr,
534 nls, g_raddr, g_page_size, &pte,
535 &fault_cause, &pte_addr);
536 if (ret) {
537 /* No valid PTE */
538 if (guest_visible) {
539 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
540 }
541 return ret;
542 }
543 } else {
544 uint64_t rpn, mask;
545 int level = 0;
546
547 index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */
548 index &= ((1UL << nls) - 1); /* Mask */
549 pte_addr = base_addr + (index * sizeof(pte));
550
551 /*
552 * Each process table address is subject to a partition-scoped
553 * translation
554 */
555 do {
556 /* mmu_idx is 5 because we're translating from hypervisor scope */
557 ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
558 pte_addr, pate, &h_raddr,
559 &h_prot, &h_page_size,
560 true, 5, lpid,
561 guest_visible);
562 if (ret) {
563 return ret;
564 }
565
566 if (!ppc_radix64_is_valid_level(level++, *g_page_size, nls)) {
567 fault_cause |= DSISR_R_BADCONFIG;
568 ret = 1;
569 } else {
570 ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK,
571 &h_raddr, &nls, g_page_size,
572 &pte, &fault_cause);
573 }
574
575 if (ret) {
576 /* No valid pte */
577 if (guest_visible) {
578 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
579 }
580 return ret;
581 }
582 pte_addr = h_raddr;
583 } while (!(pte & R_PTE_LEAF));
584
585 rpn = pte & R_PTE_RPN;
586 mask = (1UL << *g_page_size) - 1;
587
588 /* Or high bits of rpn and low bits to ea to form whole real addr */
589 *g_raddr = (rpn & ~mask) | (eaddr & mask);
590 }
591
592 if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause,
593 g_prot, mmu_idx, false)) {
594 /* Access denied due to protection */
595 if (guest_visible) {
596 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
597 }
598 return 1;
599 }
600
601 if (guest_visible) {
602 /* R/C bits not appropriately set for access */
603 if (ppc_radix64_check_rc(access_type, pte)) {
604 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_ATOMIC_RC);
605 return 1;
606 }
607 }
608
609 return 0;
610 }
611
612 /*
613 * Radix tree translation is a 2 steps translation process:
614 *
615 * 1. Process-scoped translation: Guest Eff Addr -> Guest Real Addr
616 * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr
617 *
618 * MSR[HV]
619 * +-------------+----------------+---------------+
620 * | | HV = 0 | HV = 1 |
621 * +-------------+----------------+---------------+
622 * | Relocation | Partition | No |
623 * | = Off | Scoped | Translation |
624 * Relocation +-------------+----------------+---------------+
625 * | Relocation | Partition & | Process |
626 * | = On | Process Scoped | Scoped |
627 * +-------------+----------------+---------------+
628 */
ppc_radix64_xlate_impl(PowerPCCPU * cpu,vaddr eaddr,MMUAccessType access_type,hwaddr * raddr,int * psizep,int * protp,int mmu_idx,bool guest_visible)629 static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr,
630 MMUAccessType access_type, hwaddr *raddr,
631 int *psizep, int *protp, int mmu_idx,
632 bool guest_visible)
633 {
634 CPUPPCState *env = &cpu->env;
635 uint64_t lpid, pid;
636 ppc_v3_pate_t pate;
637 int psize, prot;
638 hwaddr g_raddr;
639 bool relocation;
640
641 assert(!(mmuidx_hv(mmu_idx) && cpu->vhyp));
642
643 relocation = !mmuidx_real(mmu_idx);
644
645 /* HV or virtual hypervisor Real Mode Access */
646 if (!relocation && (mmuidx_hv(mmu_idx) || vhyp_flat_addressing(cpu))) {
647 /* In real mode top 4 effective addr bits (mostly) ignored */
648 *raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
649
650 /* In HV mode, add HRMOR if top EA bit is clear */
651 if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) {
652 if (!(eaddr >> 63)) {
653 *raddr |= env->spr[SPR_HRMOR];
654 }
655 }
656 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
657 *psizep = TARGET_PAGE_BITS;
658 return true;
659 }
660
661 /*
662 * Check UPRT (we avoid the check in real mode to deal with
663 * transitional states during kexec.
664 */
665 if (guest_visible && !ppc64_use_proc_tbl(cpu)) {
666 qemu_log_mask(LOG_GUEST_ERROR,
667 "LPCR:UPRT not set in radix mode ! LPCR="
668 TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
669 }
670
671 /* Virtual Mode Access - get the fully qualified address */
672 if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
673 if (guest_visible) {
674 ppc_radix64_raise_segi(cpu, access_type, eaddr);
675 }
676 return false;
677 }
678
679 /* Get Partition Table */
680 if (cpu->vhyp) {
681 PPCVirtualHypervisorClass *vhc;
682 vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
683 if (!vhc->get_pate(cpu->vhyp, cpu, lpid, &pate)) {
684 if (guest_visible) {
685 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
686 DSISR_R_BADCONFIG);
687 }
688 return false;
689 }
690 } else {
691 if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
692 if (guest_visible) {
693 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
694 DSISR_R_BADCONFIG);
695 }
696 return false;
697 }
698 if (!validate_pate(cpu, lpid, &pate)) {
699 if (guest_visible) {
700 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
701 DSISR_R_BADCONFIG);
702 }
703 return false;
704 }
705 }
706
707 *psizep = INT_MAX;
708 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
709
710 /*
711 * Perform process-scoped translation if relocation enabled.
712 *
713 * - Translates an effective address to a host real address in
714 * quadrants 0 and 3 when HV=1.
715 *
716 * - Translates an effective address to a guest real address.
717 */
718 if (relocation) {
719 int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid,
720 pate, &g_raddr, &prot,
721 &psize, mmu_idx, lpid,
722 guest_visible);
723 if (ret) {
724 return false;
725 }
726 *psizep = MIN(*psizep, psize);
727 *protp &= prot;
728 } else {
729 g_raddr = eaddr & R_EADDR_MASK;
730 }
731
732 if (vhyp_flat_addressing(cpu)) {
733 *raddr = g_raddr;
734 } else {
735 /*
736 * Perform partition-scoped translation if !HV or HV access to
737 * quadrants 1 or 2. Translates a guest real address to a host
738 * real address.
739 */
740 if (lpid || !mmuidx_hv(mmu_idx)) {
741 int ret;
742
743 ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
744 g_raddr, pate, raddr,
745 &prot, &psize, false,
746 mmu_idx, lpid,
747 guest_visible);
748 if (ret) {
749 return false;
750 }
751 *psizep = MIN(*psizep, psize);
752 *protp &= prot;
753 } else {
754 *raddr = g_raddr;
755 }
756 }
757
758 return true;
759 }
760
ppc_radix64_xlate(PowerPCCPU * cpu,vaddr eaddr,MMUAccessType access_type,hwaddr * raddrp,int * psizep,int * protp,int mmu_idx,bool guest_visible)761 bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
762 hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
763 bool guest_visible)
764 {
765 bool ret = ppc_radix64_xlate_impl(cpu, eaddr, access_type, raddrp,
766 psizep, protp, mmu_idx, guest_visible);
767
768 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
769 " mmu_idx %u (prot %c%c%c) -> 0x%"HWADDR_PRIx"\n",
770 __func__, access_str(access_type),
771 eaddr, mmu_idx,
772 *protp & PAGE_READ ? 'r' : '-',
773 *protp & PAGE_WRITE ? 'w' : '-',
774 *protp & PAGE_EXEC ? 'x' : '-',
775 *raddrp);
776
777 return ret;
778 }
779