xref: /qemu/target/sparc/mmu_helper.c (revision 2e8f72ac)
1 /*
2  *  Sparc MMU helpers
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/qemu-print.h"
24 #include "trace.h"
25 
26 /* Sparc MMU emulation */
27 
28 #if defined(CONFIG_USER_ONLY)
29 
30 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
31                         MMUAccessType access_type, int mmu_idx,
32                         bool probe, uintptr_t retaddr)
33 {
34     SPARCCPU *cpu = SPARC_CPU(cs);
35     CPUSPARCState *env = &cpu->env;
36 
37     if (access_type == MMU_INST_FETCH) {
38         cs->exception_index = TT_TFAULT;
39     } else {
40         cs->exception_index = TT_DFAULT;
41 #ifdef TARGET_SPARC64
42         env->dmmu.mmuregs[4] = address;
43 #else
44         env->mmuregs[4] = address;
45 #endif
46     }
47     cpu_loop_exit_restore(cs, retaddr);
48 }
49 
50 #else
51 
52 #ifndef TARGET_SPARC64
53 /*
54  * Sparc V8 Reference MMU (SRMMU)
55  */
56 static const int access_table[8][8] = {
57     { 0, 0, 0, 0, 8, 0, 12, 12 },
58     { 0, 0, 0, 0, 8, 0, 0, 0 },
59     { 8, 8, 0, 0, 0, 8, 12, 12 },
60     { 8, 8, 0, 0, 0, 8, 0, 0 },
61     { 8, 0, 8, 0, 8, 8, 12, 12 },
62     { 8, 0, 8, 0, 8, 0, 8, 0 },
63     { 8, 8, 8, 0, 8, 8, 12, 12 },
64     { 8, 8, 8, 0, 8, 8, 8, 0 }
65 };
66 
67 static const int perm_table[2][8] = {
68     {
69         PAGE_READ,
70         PAGE_READ | PAGE_WRITE,
71         PAGE_READ | PAGE_EXEC,
72         PAGE_READ | PAGE_WRITE | PAGE_EXEC,
73         PAGE_EXEC,
74         PAGE_READ | PAGE_WRITE,
75         PAGE_READ | PAGE_EXEC,
76         PAGE_READ | PAGE_WRITE | PAGE_EXEC
77     },
78     {
79         PAGE_READ,
80         PAGE_READ | PAGE_WRITE,
81         PAGE_READ | PAGE_EXEC,
82         PAGE_READ | PAGE_WRITE | PAGE_EXEC,
83         PAGE_EXEC,
84         PAGE_READ,
85         0,
86         0,
87     }
88 };
89 
90 static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
91                                 int *prot, int *access_index, MemTxAttrs *attrs,
92                                 target_ulong address, int rw, int mmu_idx,
93                                 target_ulong *page_size)
94 {
95     int access_perms = 0;
96     hwaddr pde_ptr;
97     uint32_t pde;
98     int error_code = 0, is_dirty, is_user;
99     unsigned long page_offset;
100     CPUState *cs = env_cpu(env);
101     MemTxResult result;
102 
103     is_user = mmu_idx == MMU_USER_IDX;
104 
105     if (mmu_idx == MMU_PHYS_IDX) {
106         *page_size = TARGET_PAGE_SIZE;
107         /* Boot mode: instruction fetches are taken from PROM */
108         if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) {
109             *physical = env->prom_addr | (address & 0x7ffffULL);
110             *prot = PAGE_READ | PAGE_EXEC;
111             return 0;
112         }
113         *physical = address;
114         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
115         return 0;
116     }
117 
118     *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1);
119     *physical = 0xffffffffffff0000ULL;
120 
121     /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
122     /* Context base + context number */
123     pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
124     pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result);
125     if (result != MEMTX_OK) {
126         return 4 << 2; /* Translation fault, L = 0 */
127     }
128 
129     /* Ctx pde */
130     switch (pde & PTE_ENTRYTYPE_MASK) {
131     default:
132     case 0: /* Invalid */
133         return 1 << 2;
134     case 2: /* L0 PTE, maybe should not happen? */
135     case 3: /* Reserved */
136         return 4 << 2;
137     case 1: /* L0 PDE */
138         pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
139         pde = address_space_ldl(cs->as, pde_ptr,
140                                 MEMTXATTRS_UNSPECIFIED, &result);
141         if (result != MEMTX_OK) {
142             return (1 << 8) | (4 << 2); /* Translation fault, L = 1 */
143         }
144 
145         switch (pde & PTE_ENTRYTYPE_MASK) {
146         default:
147         case 0: /* Invalid */
148             return (1 << 8) | (1 << 2);
149         case 3: /* Reserved */
150             return (1 << 8) | (4 << 2);
151         case 1: /* L1 PDE */
152             pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
153             pde = address_space_ldl(cs->as, pde_ptr,
154                                     MEMTXATTRS_UNSPECIFIED, &result);
155             if (result != MEMTX_OK) {
156                 return (2 << 8) | (4 << 2); /* Translation fault, L = 2 */
157             }
158 
159             switch (pde & PTE_ENTRYTYPE_MASK) {
160             default:
161             case 0: /* Invalid */
162                 return (2 << 8) | (1 << 2);
163             case 3: /* Reserved */
164                 return (2 << 8) | (4 << 2);
165             case 1: /* L2 PDE */
166                 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
167                 pde = address_space_ldl(cs->as, pde_ptr,
168                                         MEMTXATTRS_UNSPECIFIED, &result);
169                 if (result != MEMTX_OK) {
170                     return (3 << 8) | (4 << 2); /* Translation fault, L = 3 */
171                 }
172 
173                 switch (pde & PTE_ENTRYTYPE_MASK) {
174                 default:
175                 case 0: /* Invalid */
176                     return (3 << 8) | (1 << 2);
177                 case 1: /* PDE, should not happen */
178                 case 3: /* Reserved */
179                     return (3 << 8) | (4 << 2);
180                 case 2: /* L3 PTE */
181                     page_offset = 0;
182                 }
183                 *page_size = TARGET_PAGE_SIZE;
184                 break;
185             case 2: /* L2 PTE */
186                 page_offset = address & 0x3f000;
187                 *page_size = 0x40000;
188             }
189             break;
190         case 2: /* L1 PTE */
191             page_offset = address & 0xfff000;
192             *page_size = 0x1000000;
193         }
194     }
195 
196     /* check access */
197     access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
198     error_code = access_table[*access_index][access_perms];
199     if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) {
200         return error_code;
201     }
202 
203     /* update page modified and dirty bits */
204     is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
205     if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
206         pde |= PG_ACCESSED_MASK;
207         if (is_dirty) {
208             pde |= PG_MODIFIED_MASK;
209         }
210         stl_phys_notdirty(cs->as, pde_ptr, pde);
211     }
212 
213     /* the page can be put in the TLB */
214     *prot = perm_table[is_user][access_perms];
215     if (!(pde & PG_MODIFIED_MASK)) {
216         /* only set write access if already dirty... otherwise wait
217            for dirty access */
218         *prot &= ~PAGE_WRITE;
219     }
220 
221     /* Even if large ptes, we map only one 4KB page in the cache to
222        avoid filling it too fast */
223     *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset;
224     return error_code;
225 }
226 
227 /* Perform address translation */
228 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
229                         MMUAccessType access_type, int mmu_idx,
230                         bool probe, uintptr_t retaddr)
231 {
232     SPARCCPU *cpu = SPARC_CPU(cs);
233     CPUSPARCState *env = &cpu->env;
234     hwaddr paddr;
235     target_ulong vaddr;
236     target_ulong page_size;
237     int error_code = 0, prot, access_index;
238     MemTxAttrs attrs = {};
239 
240     /*
241      * TODO: If we ever need tlb_vaddr_to_host for this target,
242      * then we must figure out how to manipulate FSR and FAR
243      * when both MMU_NF and probe are set.  In the meantime,
244      * do not support this use case.
245      */
246     assert(!probe);
247 
248     address &= TARGET_PAGE_MASK;
249     error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs,
250                                       address, access_type,
251                                       mmu_idx, &page_size);
252     vaddr = address;
253     if (likely(error_code == 0)) {
254         qemu_log_mask(CPU_LOG_MMU,
255                       "Translate at %" VADDR_PRIx " -> "
256                       TARGET_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
257                       address, paddr, vaddr);
258         tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
259         return true;
260     }
261 
262     if (env->mmuregs[3]) { /* Fault status register */
263         env->mmuregs[3] = 1; /* overflow (not read before another fault) */
264     }
265     env->mmuregs[3] |= (access_index << 5) | error_code | 2;
266     env->mmuregs[4] = address; /* Fault address register */
267 
268     if ((env->mmuregs[0] & MMU_NF) || env->psret == 0)  {
269         /* No fault mode: if a mapping is available, just override
270            permissions. If no mapping is available, redirect accesses to
271            neverland. Fake/overridden mappings will be flushed when
272            switching to normal mode. */
273         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
274         tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
275         return true;
276     } else {
277         if (access_type == MMU_INST_FETCH) {
278             cs->exception_index = TT_TFAULT;
279         } else {
280             cs->exception_index = TT_DFAULT;
281         }
282         cpu_loop_exit_restore(cs, retaddr);
283     }
284 }
285 
286 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
287 {
288     CPUState *cs = env_cpu(env);
289     hwaddr pde_ptr;
290     uint32_t pde;
291     MemTxResult result;
292 
293     /*
294      * TODO: MMU probe operations are supposed to set the fault
295      * status registers, but we don't do this.
296      */
297 
298     /* Context base + context number */
299     pde_ptr = (hwaddr)(env->mmuregs[1] << 4) +
300         (env->mmuregs[2] << 2);
301     pde = address_space_ldl(cs->as, pde_ptr, MEMTXATTRS_UNSPECIFIED, &result);
302     if (result != MEMTX_OK) {
303         return 0;
304     }
305 
306     switch (pde & PTE_ENTRYTYPE_MASK) {
307     default:
308     case 0: /* Invalid */
309     case 2: /* PTE, maybe should not happen? */
310     case 3: /* Reserved */
311         return 0;
312     case 1: /* L1 PDE */
313         if (mmulev == 3) {
314             return pde;
315         }
316         pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
317         pde = address_space_ldl(cs->as, pde_ptr,
318                                 MEMTXATTRS_UNSPECIFIED, &result);
319         if (result != MEMTX_OK) {
320             return 0;
321         }
322 
323         switch (pde & PTE_ENTRYTYPE_MASK) {
324         default:
325         case 0: /* Invalid */
326         case 3: /* Reserved */
327             return 0;
328         case 2: /* L1 PTE */
329             return pde;
330         case 1: /* L2 PDE */
331             if (mmulev == 2) {
332                 return pde;
333             }
334             pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
335             pde = address_space_ldl(cs->as, pde_ptr,
336                                     MEMTXATTRS_UNSPECIFIED, &result);
337             if (result != MEMTX_OK) {
338                 return 0;
339             }
340 
341             switch (pde & PTE_ENTRYTYPE_MASK) {
342             default:
343             case 0: /* Invalid */
344             case 3: /* Reserved */
345                 return 0;
346             case 2: /* L2 PTE */
347                 return pde;
348             case 1: /* L3 PDE */
349                 if (mmulev == 1) {
350                     return pde;
351                 }
352                 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
353                 pde = address_space_ldl(cs->as, pde_ptr,
354                                         MEMTXATTRS_UNSPECIFIED, &result);
355                 if (result != MEMTX_OK) {
356                     return 0;
357                 }
358 
359                 switch (pde & PTE_ENTRYTYPE_MASK) {
360                 default:
361                 case 0: /* Invalid */
362                 case 1: /* PDE, should not happen */
363                 case 3: /* Reserved */
364                     return 0;
365                 case 2: /* L3 PTE */
366                     return pde;
367                 }
368             }
369         }
370     }
371     return 0;
372 }
373 
374 void dump_mmu(CPUSPARCState *env)
375 {
376     CPUState *cs = env_cpu(env);
377     target_ulong va, va1, va2;
378     unsigned int n, m, o;
379     hwaddr pa;
380     uint32_t pde;
381 
382     qemu_printf("Root ptr: " TARGET_FMT_plx ", ctx: %d\n",
383                 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]);
384     for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
385         pde = mmu_probe(env, va, 2);
386         if (pde) {
387             pa = cpu_get_phys_page_debug(cs, va);
388             qemu_printf("VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx
389                         " PDE: " TARGET_FMT_lx "\n", va, pa, pde);
390             for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
391                 pde = mmu_probe(env, va1, 1);
392                 if (pde) {
393                     pa = cpu_get_phys_page_debug(cs, va1);
394                     qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
395                                 TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n",
396                                 va1, pa, pde);
397                     for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
398                         pde = mmu_probe(env, va2, 0);
399                         if (pde) {
400                             pa = cpu_get_phys_page_debug(cs, va2);
401                             qemu_printf("  VA: " TARGET_FMT_lx ", PA: "
402                                         TARGET_FMT_plx " PTE: "
403                                         TARGET_FMT_lx "\n",
404                                         va2, pa, pde);
405                         }
406                     }
407                 }
408             }
409         }
410     }
411 }
412 
413 /* Gdb expects all registers windows to be flushed in ram. This function handles
414  * reads (and only reads) in stack frames as if windows were flushed. We assume
415  * that the sparc ABI is followed.
416  */
417 int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address,
418                               uint8_t *buf, int len, bool is_write)
419 {
420     SPARCCPU *cpu = SPARC_CPU(cs);
421     CPUSPARCState *env = &cpu->env;
422     target_ulong addr = address;
423     int i;
424     int len1;
425     int cwp = env->cwp;
426 
427     if (!is_write) {
428         for (i = 0; i < env->nwindows; i++) {
429             int off;
430             target_ulong fp = env->regbase[cwp * 16 + 22];
431 
432             /* Assume fp == 0 means end of frame.  */
433             if (fp == 0) {
434                 break;
435             }
436 
437             cwp = cpu_cwp_inc(env, cwp + 1);
438 
439             /* Invalid window ? */
440             if (env->wim & (1 << cwp)) {
441                 break;
442             }
443 
444             /* According to the ABI, the stack is growing downward.  */
445             if (addr + len < fp) {
446                 break;
447             }
448 
449             /* Not in this frame.  */
450             if (addr > fp + 64) {
451                 continue;
452             }
453 
454             /* Handle access before this window.  */
455             if (addr < fp) {
456                 len1 = fp - addr;
457                 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) {
458                     return -1;
459                 }
460                 addr += len1;
461                 len -= len1;
462                 buf += len1;
463             }
464 
465             /* Access byte per byte to registers. Not very efficient but speed
466              * is not critical.
467              */
468             off = addr - fp;
469             len1 = 64 - off;
470 
471             if (len1 > len) {
472                 len1 = len;
473             }
474 
475             for (; len1; len1--) {
476                 int reg = cwp * 16 + 8 + (off >> 2);
477                 union {
478                     uint32_t v;
479                     uint8_t c[4];
480                 } u;
481                 u.v = cpu_to_be32(env->regbase[reg]);
482                 *buf++ = u.c[off & 3];
483                 addr++;
484                 len--;
485                 off++;
486             }
487 
488             if (len == 0) {
489                 return 0;
490             }
491         }
492     }
493     return cpu_memory_rw_debug(cs, addr, buf, len, is_write);
494 }
495 
496 #else /* !TARGET_SPARC64 */
497 
498 /* 41 bit physical address space */
499 static inline hwaddr ultrasparc_truncate_physical(uint64_t x)
500 {
501     return x & 0x1ffffffffffULL;
502 }
503 
504 /*
505  * UltraSparc IIi I/DMMUs
506  */
507 
508 /* Returns true if TTE tag is valid and matches virtual address value
509    in context requires virtual address mask value calculated from TTE
510    entry size */
511 static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
512                                        uint64_t address, uint64_t context,
513                                        hwaddr *physical)
514 {
515     uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte));
516 
517     /* valid, context match, virtual address match? */
518     if (TTE_IS_VALID(tlb->tte) &&
519         (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context))
520         && compare_masked(address, tlb->tag, mask)) {
521         /* decode physical address */
522         *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
523         return 1;
524     }
525 
526     return 0;
527 }
528 
529 static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical,
530                                      int *prot, MemTxAttrs *attrs,
531                                      target_ulong address, int rw, int mmu_idx)
532 {
533     CPUState *cs = env_cpu(env);
534     unsigned int i;
535     uint64_t context;
536     uint64_t sfsr = 0;
537     bool is_user = false;
538 
539     switch (mmu_idx) {
540     case MMU_PHYS_IDX:
541         g_assert_not_reached();
542     case MMU_USER_IDX:
543         is_user = true;
544         /* fallthru */
545     case MMU_KERNEL_IDX:
546         context = env->dmmu.mmu_primary_context & 0x1fff;
547         sfsr |= SFSR_CT_PRIMARY;
548         break;
549     case MMU_USER_SECONDARY_IDX:
550         is_user = true;
551         /* fallthru */
552     case MMU_KERNEL_SECONDARY_IDX:
553         context = env->dmmu.mmu_secondary_context & 0x1fff;
554         sfsr |= SFSR_CT_SECONDARY;
555         break;
556     case MMU_NUCLEUS_IDX:
557         sfsr |= SFSR_CT_NUCLEUS;
558         /* FALLTHRU */
559     default:
560         context = 0;
561         break;
562     }
563 
564     if (rw == 1) {
565         sfsr |= SFSR_WRITE_BIT;
566     } else if (rw == 4) {
567         sfsr |= SFSR_NF_BIT;
568     }
569 
570     for (i = 0; i < 64; i++) {
571         /* ctx match, vaddr match, valid? */
572         if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
573             int do_fault = 0;
574 
575             if (TTE_IS_IE(env->dtlb[i].tte)) {
576                 attrs->byte_swap = true;
577             }
578 
579             /* access ok? */
580             /* multiple bits in SFSR.FT may be set on TT_DFAULT */
581             if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
582                 do_fault = 1;
583                 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */
584                 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl);
585             }
586             if (rw == 4) {
587                 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) {
588                     do_fault = 1;
589                     sfsr |= SFSR_FT_NF_E_BIT;
590                 }
591             } else {
592                 if (TTE_IS_NFO(env->dtlb[i].tte)) {
593                     do_fault = 1;
594                     sfsr |= SFSR_FT_NFO_BIT;
595                 }
596             }
597 
598             if (do_fault) {
599                 /* faults above are reported with TT_DFAULT. */
600                 cs->exception_index = TT_DFAULT;
601             } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) {
602                 do_fault = 1;
603                 cs->exception_index = TT_DPROT;
604 
605                 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl);
606             }
607 
608             if (!do_fault) {
609                 *prot = PAGE_READ;
610                 if (TTE_IS_W_OK(env->dtlb[i].tte)) {
611                     *prot |= PAGE_WRITE;
612                 }
613 
614                 TTE_SET_USED(env->dtlb[i].tte);
615 
616                 return 0;
617             }
618 
619             if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
620                 sfsr |= SFSR_OW_BIT; /* overflow (not read before
621                                         another fault) */
622             }
623 
624             if (env->pstate & PS_PRIV) {
625                 sfsr |= SFSR_PR_BIT;
626             }
627 
628             /* FIXME: ASI field in SFSR must be set */
629             env->dmmu.sfsr = sfsr | SFSR_VALID_BIT;
630 
631             env->dmmu.sfar = address; /* Fault address register */
632 
633             env->dmmu.tag_access = (address & ~0x1fffULL) | context;
634 
635             return 1;
636         }
637     }
638 
639     trace_mmu_helper_dmiss(address, context);
640 
641     /*
642      * On MMU misses:
643      * - UltraSPARC IIi: SFSR and SFAR unmodified
644      * - JPS1: SFAR updated and some fields of SFSR updated
645      */
646     env->dmmu.tag_access = (address & ~0x1fffULL) | context;
647     cs->exception_index = TT_DMISS;
648     return 1;
649 }
650 
651 static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical,
652                                      int *prot, MemTxAttrs *attrs,
653                                      target_ulong address, int mmu_idx)
654 {
655     CPUState *cs = env_cpu(env);
656     unsigned int i;
657     uint64_t context;
658     bool is_user = false;
659 
660     switch (mmu_idx) {
661     case MMU_PHYS_IDX:
662     case MMU_USER_SECONDARY_IDX:
663     case MMU_KERNEL_SECONDARY_IDX:
664         g_assert_not_reached();
665     case MMU_USER_IDX:
666         is_user = true;
667         /* fallthru */
668     case MMU_KERNEL_IDX:
669         context = env->dmmu.mmu_primary_context & 0x1fff;
670         break;
671     default:
672         context = 0;
673         break;
674     }
675 
676     if (env->tl == 0) {
677         /* PRIMARY context */
678         context = env->dmmu.mmu_primary_context & 0x1fff;
679     } else {
680         /* NUCLEUS context */
681         context = 0;
682     }
683 
684     for (i = 0; i < 64; i++) {
685         /* ctx match, vaddr match, valid? */
686         if (ultrasparc_tag_match(&env->itlb[i],
687                                  address, context, physical)) {
688             /* access ok? */
689             if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) {
690                 /* Fault status register */
691                 if (env->immu.sfsr & SFSR_VALID_BIT) {
692                     env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before
693                                                      another fault) */
694                 } else {
695                     env->immu.sfsr = 0;
696                 }
697                 if (env->pstate & PS_PRIV) {
698                     env->immu.sfsr |= SFSR_PR_BIT;
699                 }
700                 if (env->tl > 0) {
701                     env->immu.sfsr |= SFSR_CT_NUCLEUS;
702                 }
703 
704                 /* FIXME: ASI field in SFSR must be set */
705                 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT;
706                 cs->exception_index = TT_TFAULT;
707 
708                 env->immu.tag_access = (address & ~0x1fffULL) | context;
709 
710                 trace_mmu_helper_tfault(address, context);
711 
712                 return 1;
713             }
714             *prot = PAGE_EXEC;
715             TTE_SET_USED(env->itlb[i].tte);
716             return 0;
717         }
718     }
719 
720     trace_mmu_helper_tmiss(address, context);
721 
722     /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
723     env->immu.tag_access = (address & ~0x1fffULL) | context;
724     cs->exception_index = TT_TMISS;
725     return 1;
726 }
727 
728 static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
729                                 int *prot, int *access_index, MemTxAttrs *attrs,
730                                 target_ulong address, int rw, int mmu_idx,
731                                 target_ulong *page_size)
732 {
733     /* ??? We treat everything as a small page, then explicitly flush
734        everything when an entry is evicted.  */
735     *page_size = TARGET_PAGE_SIZE;
736 
737     /* safety net to catch wrong softmmu index use from dynamic code */
738     if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) {
739         if (rw == 2) {
740             trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx,
741                                                 env->dmmu.mmu_primary_context,
742                                                 env->dmmu.mmu_secondary_context,
743                                                 address);
744         } else {
745             trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx,
746                                                 env->dmmu.mmu_primary_context,
747                                                 env->dmmu.mmu_secondary_context,
748                                                 address);
749         }
750     }
751 
752     if (mmu_idx == MMU_PHYS_IDX) {
753         *physical = ultrasparc_truncate_physical(address);
754         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
755         return 0;
756     }
757 
758     if (rw == 2) {
759         return get_physical_address_code(env, physical, prot, attrs, address,
760                                          mmu_idx);
761     } else {
762         return get_physical_address_data(env, physical, prot, attrs, address,
763                                          rw, mmu_idx);
764     }
765 }
766 
767 /* Perform address translation */
768 bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
769                         MMUAccessType access_type, int mmu_idx,
770                         bool probe, uintptr_t retaddr)
771 {
772     SPARCCPU *cpu = SPARC_CPU(cs);
773     CPUSPARCState *env = &cpu->env;
774     target_ulong vaddr;
775     hwaddr paddr;
776     target_ulong page_size;
777     MemTxAttrs attrs = {};
778     int error_code = 0, prot, access_index;
779 
780     address &= TARGET_PAGE_MASK;
781     error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs,
782                                       address, access_type,
783                                       mmu_idx, &page_size);
784     if (likely(error_code == 0)) {
785         vaddr = address;
786 
787         trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl,
788                                    env->dmmu.mmu_primary_context,
789                                    env->dmmu.mmu_secondary_context);
790 
791         tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx,
792                                 page_size);
793         return true;
794     }
795     if (probe) {
796         return false;
797     }
798     cpu_loop_exit_restore(cs, retaddr);
799 }
800 
801 void dump_mmu(CPUSPARCState *env)
802 {
803     unsigned int i;
804     const char *mask;
805 
806     qemu_printf("MMU contexts: Primary: %" PRId64 ", Secondary: %"
807                 PRId64 "\n",
808                 env->dmmu.mmu_primary_context,
809                 env->dmmu.mmu_secondary_context);
810     qemu_printf("DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64
811                 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target);
812     if ((env->lsu & DMMU_E) == 0) {
813         qemu_printf("DMMU disabled\n");
814     } else {
815         qemu_printf("DMMU dump\n");
816         for (i = 0; i < 64; i++) {
817             switch (TTE_PGSIZE(env->dtlb[i].tte)) {
818             default:
819             case 0x0:
820                 mask = "  8k";
821                 break;
822             case 0x1:
823                 mask = " 64k";
824                 break;
825             case 0x2:
826                 mask = "512k";
827                 break;
828             case 0x3:
829                 mask = "  4M";
830                 break;
831             }
832             if (TTE_IS_VALID(env->dtlb[i].tte)) {
833                 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
834                             ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n",
835                             i,
836                             env->dtlb[i].tag & (uint64_t)~0x1fffULL,
837                             TTE_PA(env->dtlb[i].tte),
838                             mask,
839                             TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user",
840                             TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO",
841                             TTE_IS_LOCKED(env->dtlb[i].tte) ?
842                             "locked" : "unlocked",
843                             TTE_IS_IE(env->dtlb[i].tte) ?
844                             "yes" : "no",
845                             env->dtlb[i].tag & (uint64_t)0x1fffULL,
846                             TTE_IS_GLOBAL(env->dtlb[i].tte) ?
847                             "global" : "local");
848             }
849         }
850     }
851     if ((env->lsu & IMMU_E) == 0) {
852         qemu_printf("IMMU disabled\n");
853     } else {
854         qemu_printf("IMMU dump\n");
855         for (i = 0; i < 64; i++) {
856             switch (TTE_PGSIZE(env->itlb[i].tte)) {
857             default:
858             case 0x0:
859                 mask = "  8k";
860                 break;
861             case 0x1:
862                 mask = " 64k";
863                 break;
864             case 0x2:
865                 mask = "512k";
866                 break;
867             case 0x3:
868                 mask = "  4M";
869                 break;
870             }
871             if (TTE_IS_VALID(env->itlb[i].tte)) {
872                 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
873                             ", %s, %s, %s, ctx %" PRId64 " %s\n",
874                             i,
875                             env->itlb[i].tag & (uint64_t)~0x1fffULL,
876                             TTE_PA(env->itlb[i].tte),
877                             mask,
878                             TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user",
879                             TTE_IS_LOCKED(env->itlb[i].tte) ?
880                             "locked" : "unlocked",
881                             env->itlb[i].tag & (uint64_t)0x1fffULL,
882                             TTE_IS_GLOBAL(env->itlb[i].tte) ?
883                             "global" : "local");
884             }
885         }
886     }
887 }
888 
889 #endif /* TARGET_SPARC64 */
890 
891 static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys,
892                                    target_ulong addr, int rw, int mmu_idx)
893 {
894     target_ulong page_size;
895     int prot, access_index;
896     MemTxAttrs attrs = {};
897 
898     return get_physical_address(env, phys, &prot, &access_index, &attrs, addr,
899                                 rw, mmu_idx, &page_size);
900 }
901 
902 #if defined(TARGET_SPARC64)
903 hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
904                                            int mmu_idx)
905 {
906     hwaddr phys_addr;
907 
908     if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) {
909         return -1;
910     }
911     return phys_addr;
912 }
913 #endif
914 
915 hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
916 {
917     SPARCCPU *cpu = SPARC_CPU(cs);
918     CPUSPARCState *env = &cpu->env;
919     hwaddr phys_addr;
920     int mmu_idx = cpu_mmu_index(env, false);
921 
922     if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
923         if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) {
924             return -1;
925         }
926     }
927     return phys_addr;
928 }
929 #endif
930