xref: /qemu/target/hppa/mem_helper.c (revision 7c0dfcf9)
1 /*
2  *  HPPA memory access helper routines
3  *
4  *  Copyright (c) 2017 Helge Deller
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
27 
28 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
29 {
30     /*
31      * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
32      * an algorithm in which a 62-bit absolute address is transformed to
33      * a 64-bit physical address.  This must then be combined with that
34      * pictured in Figure H-11 "Physical Address Space Mapping", in which
35      * the full physical address is truncated to the N-bit physical address
36      * supported by the implementation.
37      *
38      * Since the supported physical address space is below 54 bits, the
39      * H-8 algorithm is moot and all that is left is to truncate.
40      */
41     QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
42     return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
43 }
44 
45 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
46 {
47     /*
48      * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
49      * combined with Figure H-11, as above.
50      */
51     if (likely(extract32(addr, 28, 4) != 0xf)) {
52         /* Memory address space */
53         addr = (uint32_t)addr;
54     } else if (extract32(addr, 24, 4) != 0) {
55         /* I/O address space */
56         addr = (int32_t)addr;
57     } else {
58         /*
59          * PDC address space:
60          * Figures H-10 and H-11 of the parisc2.0 spec do not specify
61          * where to map into the 64-bit PDC address space.
62          * We map with an offset which equals the 32-bit address, which
63          * is what can be seen on physical machines too.
64          */
65         addr = (uint32_t)addr;
66         addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
67     }
68     return addr;
69 }
70 
71 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
72 {
73     IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
74 
75     if (i) {
76         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
77         trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
78                                   ent->itree.start, ent->itree.last, ent->pa);
79         return ent;
80     }
81     trace_hppa_tlb_find_entry_not_found(env, addr);
82     return NULL;
83 }
84 
85 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
86                                bool force_flush_btlb)
87 {
88     CPUState *cs = env_cpu(env);
89     bool is_btlb;
90 
91     if (!ent->entry_valid) {
92         return;
93     }
94 
95     trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
96                              ent->itree.last, ent->pa);
97 
98     tlb_flush_range_by_mmuidx(cs, ent->itree.start,
99                               ent->itree.last - ent->itree.start + 1,
100                               HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
101 
102     /* Never clear BTLBs, unless forced to do so. */
103     is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
104     if (is_btlb && !force_flush_btlb) {
105         return;
106     }
107 
108     interval_tree_remove(&ent->itree, &env->tlb_root);
109     memset(ent, 0, sizeof(*ent));
110 
111     if (!is_btlb) {
112         ent->unused_next = env->tlb_unused;
113         env->tlb_unused = ent;
114     }
115 }
116 
117 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
118 {
119     IntervalTreeNode *i, *n;
120 
121     i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
122     for (; i ; i = n) {
123         HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
124 
125         /*
126          * Find the next entry now: In the normal case the current entry
127          * will be removed, but in the BTLB case it will remain.
128          */
129         n = interval_tree_iter_next(i, va_b, va_e);
130         hppa_flush_tlb_ent(env, ent, false);
131     }
132 }
133 
134 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
135 {
136     HPPATLBEntry *ent = env->tlb_unused;
137 
138     if (ent == NULL) {
139         uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
140         uint32_t i = env->tlb_last;
141 
142         if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
143             i = btlb_entries;
144         }
145         env->tlb_last = i + 1;
146 
147         ent = &env->tlb[i];
148         hppa_flush_tlb_ent(env, ent, false);
149     }
150 
151     env->tlb_unused = ent->unused_next;
152     return ent;
153 }
154 
155 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
156                               int type, hwaddr *pphys, int *pprot,
157                               HPPATLBEntry **tlb_entry)
158 {
159     hwaddr phys;
160     int prot, r_prot, w_prot, x_prot, priv;
161     HPPATLBEntry *ent;
162     int ret = -1;
163 
164     if (tlb_entry) {
165         *tlb_entry = NULL;
166     }
167 
168     /* Virtual translation disabled.  Map absolute to physical.  */
169     if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
170         switch (mmu_idx) {
171         case MMU_ABS_W_IDX:
172             phys = hppa_abs_to_phys_pa2_w1(addr);
173             break;
174         case MMU_ABS_IDX:
175             if (hppa_is_pa20(env)) {
176                 phys = hppa_abs_to_phys_pa2_w0(addr);
177             } else {
178                 phys = (uint32_t)addr;
179             }
180             break;
181         default:
182             g_assert_not_reached();
183         }
184         prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
185         goto egress;
186     }
187 
188     /* Find a valid tlb entry that matches the virtual address.  */
189     ent = hppa_find_tlb(env, addr);
190     if (ent == NULL) {
191         phys = 0;
192         prot = 0;
193         ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
194         goto egress;
195     }
196 
197     if (tlb_entry) {
198         *tlb_entry = ent;
199     }
200 
201     /* We now know the physical address.  */
202     phys = ent->pa + (addr - ent->itree.start);
203 
204     /* Map TLB access_rights field to QEMU protection.  */
205     priv = MMU_IDX_TO_PRIV(mmu_idx);
206     r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
207     w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
208     x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
209     switch (ent->ar_type) {
210     case 0: /* read-only: data page */
211         prot = r_prot;
212         break;
213     case 1: /* read/write: dynamic data page */
214         prot = r_prot | w_prot;
215         break;
216     case 2: /* read/execute: normal code page */
217         prot = r_prot | x_prot;
218         break;
219     case 3: /* read/write/execute: dynamic code page */
220         prot = r_prot | w_prot | x_prot;
221         break;
222     default: /* execute: promote to privilege level type & 3 */
223         prot = x_prot;
224         break;
225     }
226 
227     /* access_id == 0 means public page and no check is performed */
228     if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
229         /* If bits [31:1] match, and bit 0 is set, suppress write.  */
230         int match = ent->access_id * 2 + 1;
231 
232         if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
233             match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
234             prot &= PAGE_READ | PAGE_EXEC;
235             if (type == PAGE_WRITE) {
236                 ret = EXCP_DMPI;
237                 goto egress;
238             }
239         }
240     }
241 
242     /* No guest access type indicates a non-architectural access from
243        within QEMU.  Bypass checks for access, D, B and T bits.  */
244     if (type == 0) {
245         goto egress;
246     }
247 
248     if (unlikely(!(prot & type))) {
249         /* The access isn't allowed -- Inst/Data Memory Protection Fault.  */
250         ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
251         goto egress;
252     }
253 
254     /* In reverse priority order, check for conditions which raise faults.
255        As we go, remove PROT bits that cover the condition we want to check.
256        In this way, the resulting PROT will force a re-check of the
257        architectural TLB entry for the next access.  */
258     if (unlikely(!ent->d)) {
259         if (type & PAGE_WRITE) {
260             /* The D bit is not set -- TLB Dirty Bit Fault.  */
261             ret = EXCP_TLB_DIRTY;
262         }
263         prot &= PAGE_READ | PAGE_EXEC;
264     }
265     if (unlikely(ent->b)) {
266         if (type & PAGE_WRITE) {
267             /* The B bit is set -- Data Memory Break Fault.  */
268             ret = EXCP_DMB;
269         }
270         prot &= PAGE_READ | PAGE_EXEC;
271     }
272     if (unlikely(ent->t)) {
273         if (!(type & PAGE_EXEC)) {
274             /* The T bit is set -- Page Reference Fault.  */
275             ret = EXCP_PAGE_REF;
276         }
277         prot &= PAGE_EXEC;
278     }
279 
280  egress:
281     *pphys = phys;
282     *pprot = prot;
283     trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
284     return ret;
285 }
286 
287 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
288 {
289     HPPACPU *cpu = HPPA_CPU(cs);
290     hwaddr phys;
291     int prot, excp, mmu_idx;
292 
293     /* If the (data) mmu is disabled, bypass translation.  */
294     /* ??? We really ought to know if the code mmu is disabled too,
295        in order to get the correct debugging dumps.  */
296     mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
297                cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
298 
299     excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
300                                      &phys, &prot, NULL);
301 
302     /* Since we're translating for debugging, the only error that is a
303        hard error is no translation at all.  Otherwise, while a real cpu
304        access might not have permission, the debugger does.  */
305     return excp == EXCP_DTLB_MISS ? -1 : phys;
306 }
307 
308 void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled)
309 {
310     if (env->psw & PSW_Q) {
311         /*
312          * For pa1.x, the offset and space never overlap, and so we
313          * simply extract the high and low part of the virtual address.
314          *
315          * For pa2.0, the formation of these are described in section
316          * "Interruption Parameter Registers", page 2-15.
317          */
318         env->cr[CR_IOR] = (uint32_t)addr;
319         env->cr[CR_ISR] = addr >> 32;
320 
321         if (hppa_is_pa20(env)) {
322             if (mmu_disabled) {
323                 /*
324                  * If data translation was disabled, the ISR contains
325                  * the upper portion of the abs address, zero-extended.
326                  */
327                 env->cr[CR_ISR] &= 0x3fffffff;
328             } else {
329                 /*
330                  * If data translation was enabled, the upper two bits
331                  * of the IOR (the b field) are equal to the two space
332                  * bits from the base register used to form the gva.
333                  */
334                 uint64_t b;
335 
336                 b = env->unwind_breg ? env->gr[env->unwind_breg] : 0;
337                 b >>= (env->psw & PSW_W ? 62 : 30);
338                 env->cr[CR_IOR] |= b << 62;
339             }
340         }
341     }
342 }
343 
344 G_NORETURN static void
345 raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr,
346                          vaddr addr, bool mmu_disabled)
347 {
348     CPUState *cs = env_cpu(env);
349 
350     cs->exception_index = excp;
351     hppa_set_ior_and_isr(env, addr, mmu_disabled);
352 
353     cpu_loop_exit_restore(cs, retaddr);
354 }
355 
356 void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
357                                      vaddr addr, unsigned size,
358                                      MMUAccessType access_type,
359                                      int mmu_idx, MemTxAttrs attrs,
360                                      MemTxResult response, uintptr_t retaddr)
361 {
362     CPUHPPAState *env = cpu_env(cs);
363 
364     qemu_log_mask(LOG_GUEST_ERROR, "HPMC at " TARGET_FMT_lx ":" TARGET_FMT_lx
365                 " while accessing I/O at %#08" HWADDR_PRIx "\n",
366                 env->iasq_f, env->iaoq_f, physaddr);
367 
368     /* FIXME: Enable HPMC exceptions when firmware has clean device probing */
369     if (0) {
370         raise_exception_with_ior(env, EXCP_HPMC, retaddr, addr,
371                                  MMU_IDX_MMU_DISABLED(mmu_idx));
372     }
373 }
374 
375 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
376                        MMUAccessType type, int mmu_idx,
377                        bool probe, uintptr_t retaddr)
378 {
379     HPPACPU *cpu = HPPA_CPU(cs);
380     CPUHPPAState *env = &cpu->env;
381     HPPATLBEntry *ent;
382     int prot, excp, a_prot;
383     hwaddr phys;
384 
385     switch (type) {
386     case MMU_INST_FETCH:
387         a_prot = PAGE_EXEC;
388         break;
389     case MMU_DATA_STORE:
390         a_prot = PAGE_WRITE;
391         break;
392     default:
393         a_prot = PAGE_READ;
394         break;
395     }
396 
397     excp = hppa_get_physical_address(env, addr, mmu_idx,
398                                      a_prot, &phys, &prot, &ent);
399     if (unlikely(excp >= 0)) {
400         if (probe) {
401             return false;
402         }
403         trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
404 
405         /* Failure.  Raise the indicated exception.  */
406         raise_exception_with_ior(env, excp, retaddr, addr,
407                                  MMU_IDX_MMU_DISABLED(mmu_idx));
408     }
409 
410     trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
411                                 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
412 
413     /*
414      * Success!  Store the translation into the QEMU TLB.
415      * Note that we always install a single-page entry, because that
416      * is what works best with softmmu -- anything else will trigger
417      * the large page protection mask.  We do not require this,
418      * because we record the large page here in the hppa tlb.
419      */
420     tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
421                  prot, mmu_idx, TARGET_PAGE_SIZE);
422     return true;
423 }
424 
425 /* Insert (Insn/Data) TLB Address.  Note this is PA 1.1 only.  */
426 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
427 {
428     HPPATLBEntry *ent;
429 
430     /* Zap any old entries covering ADDR. */
431     addr &= TARGET_PAGE_MASK;
432     hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
433 
434     ent = env->tlb_partial;
435     if (ent == NULL) {
436         ent = hppa_alloc_tlb_ent(env);
437         env->tlb_partial = ent;
438     }
439 
440     /* Note that ent->entry_valid == 0 already.  */
441     ent->itree.start = addr;
442     ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
443     ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
444     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
445 }
446 
447 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
448                                  target_ulong reg)
449 {
450     ent->access_id = extract32(reg, 1, 18);
451     ent->u = extract32(reg, 19, 1);
452     ent->ar_pl2 = extract32(reg, 20, 2);
453     ent->ar_pl1 = extract32(reg, 22, 2);
454     ent->ar_type = extract32(reg, 24, 3);
455     ent->b = extract32(reg, 27, 1);
456     ent->d = extract32(reg, 28, 1);
457     ent->t = extract32(reg, 29, 1);
458     ent->entry_valid = 1;
459 
460     interval_tree_insert(&ent->itree, &env->tlb_root);
461     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
462                          ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
463 }
464 
465 /* Insert (Insn/Data) TLB Protection.  Note this is PA 1.1 only.  */
466 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
467 {
468     HPPATLBEntry *ent = env->tlb_partial;
469 
470     if (ent) {
471         env->tlb_partial = NULL;
472         if (ent->itree.start <= addr && addr <= ent->itree.last) {
473             set_access_bits_pa11(env, ent, reg);
474             return;
475         }
476     }
477     qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
478 }
479 
480 static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
481                        target_ulong r2, vaddr va_b)
482 {
483     HPPATLBEntry *ent;
484     vaddr va_e;
485     uint64_t va_size;
486     int mask_shift;
487 
488     mask_shift = 2 * (r1 & 0xf);
489     va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
490     va_b &= -va_size;
491     va_e = va_b + va_size - 1;
492 
493     hppa_flush_tlb_range(env, va_b, va_e);
494     ent = hppa_alloc_tlb_ent(env);
495 
496     ent->itree.start = va_b;
497     ent->itree.last = va_e;
498 
499     /* Extract all 52 bits present in the page table entry. */
500     ent->pa = r1 << (TARGET_PAGE_BITS - 5);
501     /* Align per the page size. */
502     ent->pa &= TARGET_PAGE_MASK << mask_shift;
503     /* Ignore the bits beyond physical address space. */
504     ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
505 
506     ent->t = extract64(r2, 61, 1);
507     ent->d = extract64(r2, 60, 1);
508     ent->b = extract64(r2, 59, 1);
509     ent->ar_type = extract64(r2, 56, 3);
510     ent->ar_pl1 = extract64(r2, 54, 2);
511     ent->ar_pl2 = extract64(r2, 52, 2);
512     ent->u = extract64(r2, 51, 1);
513     /* o = bit 50 */
514     /* p = bit 49 */
515     ent->access_id = extract64(r2, 1, 31);
516     ent->entry_valid = 1;
517 
518     interval_tree_insert(&ent->itree, &env->tlb_root);
519     trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
520     trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
521                          ent->ar_pl2, ent->ar_pl1, ent->ar_type,
522                          ent->b, ent->d, ent->t);
523 }
524 
525 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
526 {
527     vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
528     itlbt_pa20(env, r1, r2, va_b);
529 }
530 
531 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
532 {
533     vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
534     itlbt_pa20(env, r1, r2, va_b);
535 }
536 
537 /* Purge (Insn/Data) TLB. */
538 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
539 {
540     CPUHPPAState *env = cpu_env(cpu);
541     vaddr start = data.target_ptr;
542     vaddr end;
543 
544     /*
545      * PA2.0 allows a range of pages encoded into GR[b], which we have
546      * copied into the bottom bits of the otherwise page-aligned address.
547      * PA1.x will always provide zero here, for a single page flush.
548      */
549     end = start & 0xf;
550     start &= TARGET_PAGE_MASK;
551     end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
552     end = start + end - 1;
553 
554     hppa_flush_tlb_range(env, start, end);
555 }
556 
557 /* This is local to the current cpu. */
558 void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
559 {
560     trace_hppa_tlb_ptlb_local(env);
561     ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
562 }
563 
564 /* This is synchronous across all processors.  */
565 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
566 {
567     CPUState *src = env_cpu(env);
568     CPUState *cpu;
569     bool wait = false;
570 
571     trace_hppa_tlb_ptlb(env);
572     run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
573 
574     CPU_FOREACH(cpu) {
575         if (cpu != src) {
576             async_run_on_cpu(cpu, ptlb_work, data);
577             wait = true;
578         }
579     }
580     if (wait) {
581         async_safe_run_on_cpu(src, ptlb_work, data);
582     } else {
583         ptlb_work(src, data);
584     }
585 }
586 
587 void hppa_ptlbe(CPUHPPAState *env)
588 {
589     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
590     uint32_t i;
591 
592     /* Zap the (non-btlb) tlb entries themselves. */
593     memset(&env->tlb[btlb_entries], 0,
594            sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
595     env->tlb_last = btlb_entries;
596     env->tlb_partial = NULL;
597 
598     /* Put them all onto the unused list. */
599     env->tlb_unused = &env->tlb[btlb_entries];
600     for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
601         env->tlb[i].unused_next = &env->tlb[i + 1];
602     }
603 
604     /* Re-initialize the interval tree with only the btlb entries. */
605     memset(&env->tlb_root, 0, sizeof(env->tlb_root));
606     for (i = 0; i < btlb_entries; ++i) {
607         if (env->tlb[i].entry_valid) {
608             interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
609         }
610     }
611 
612     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
613 }
614 
615 /* Purge (Insn/Data) TLB entry.  This affects an implementation-defined
616    number of pages/entries (we choose all), and is local to the cpu.  */
617 void HELPER(ptlbe)(CPUHPPAState *env)
618 {
619     trace_hppa_tlb_ptlbe(env);
620     qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
621     hppa_ptlbe(env);
622 }
623 
624 void cpu_hppa_change_prot_id(CPUHPPAState *env)
625 {
626     tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
627 }
628 
629 void HELPER(change_prot_id)(CPUHPPAState *env)
630 {
631     cpu_hppa_change_prot_id(env);
632 }
633 
634 target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
635 {
636     hwaddr phys;
637     int prot, excp;
638 
639     excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
640                                      &phys, &prot, NULL);
641     if (excp >= 0) {
642         if (excp == EXCP_DTLB_MISS) {
643             excp = EXCP_NA_DTLB_MISS;
644         }
645         trace_hppa_tlb_lpa_failed(env, addr);
646         raise_exception_with_ior(env, excp, GETPC(), addr, false);
647     }
648     trace_hppa_tlb_lpa_success(env, addr, phys);
649     return phys;
650 }
651 
652 /* Return the ar_type of the TLB at VADDR, or -1.  */
653 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
654 {
655     HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
656     return ent ? ent->ar_type : -1;
657 }
658 
659 /*
660  * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
661  * allow operating systems to modify the Block TLB (BTLB) entries.
662  * For implementation details see page 1-13 in
663  * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
664  */
665 void HELPER(diag_btlb)(CPUHPPAState *env)
666 {
667     unsigned int phys_page, len, slot;
668     int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
669     uintptr_t ra = GETPC();
670     HPPATLBEntry *btlb;
671     uint64_t virt_page;
672     uint32_t *vaddr;
673     uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
674 
675     /* BTLBs are not supported on 64-bit CPUs */
676     if (btlb_entries == 0) {
677         env->gr[28] = -1; /* nonexistent procedure */
678         return;
679     }
680 
681     env->gr[28] = 0; /* PDC_OK */
682 
683     switch (env->gr[25]) {
684     case 0:
685         /* return BTLB parameters */
686         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
687         vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t),
688                              MMU_DATA_STORE, mmu_idx, ra);
689         if (vaddr == NULL) {
690             env->gr[28] = -10; /* invalid argument */
691         } else {
692             vaddr[0] = cpu_to_be32(1);
693             vaddr[1] = cpu_to_be32(16 * 1024);
694             vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
695             vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
696         }
697         break;
698     case 1:
699         /* insert BTLB entry */
700         virt_page = env->gr[24];        /* upper 32 bits */
701         virt_page <<= 32;
702         virt_page |= env->gr[23];       /* lower 32 bits */
703         phys_page = env->gr[22];
704         len = env->gr[21];
705         slot = env->gr[19];
706         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
707                     "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
708                     "into slot %d\n",
709                     (long long) virt_page << TARGET_PAGE_BITS,
710                     (long long) (virt_page + len) << TARGET_PAGE_BITS,
711                     (long long) virt_page, phys_page, len, slot);
712         if (slot < btlb_entries) {
713             btlb = &env->tlb[slot];
714 
715             /* Force flush of possibly existing BTLB entry. */
716             hppa_flush_tlb_ent(env, btlb, true);
717 
718             /* Create new BTLB entry */
719             btlb->itree.start = virt_page << TARGET_PAGE_BITS;
720             btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
721             btlb->pa = phys_page << TARGET_PAGE_BITS;
722             set_access_bits_pa11(env, btlb, env->gr[20]);
723             btlb->t = 0;
724             btlb->d = 1;
725         } else {
726             env->gr[28] = -10; /* invalid argument */
727         }
728         break;
729     case 2:
730         /* Purge BTLB entry */
731         slot = env->gr[22];
732         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
733                                     slot);
734         if (slot < btlb_entries) {
735             btlb = &env->tlb[slot];
736             hppa_flush_tlb_ent(env, btlb, true);
737         } else {
738             env->gr[28] = -10; /* invalid argument */
739         }
740         break;
741     case 3:
742         /* Purge all BTLB entries */
743         qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
744         for (slot = 0; slot < btlb_entries; slot++) {
745             btlb = &env->tlb[slot];
746             hppa_flush_tlb_ent(env, btlb, true);
747         }
748         break;
749     default:
750         env->gr[28] = -2; /* nonexistent option */
751         break;
752     }
753 }
754