xref: /qemu/target/riscv/cpu_helper.c (revision b83a80e8)
1 /*
2  * RISC-V CPU helpers for qemu.
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "trace.h"
27 #include "semihosting/common-semi.h"
28 
29 int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
30 {
31 #ifdef CONFIG_USER_ONLY
32     return 0;
33 #else
34     return env->priv;
35 #endif
36 }
37 
38 void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
39                           target_ulong *cs_base, uint32_t *pflags)
40 {
41     CPUState *cs = env_cpu(env);
42     RISCVCPU *cpu = RISCV_CPU(cs);
43 
44     uint32_t flags = 0;
45 
46     *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
47     *cs_base = 0;
48 
49     if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
50         /*
51          * If env->vl equals to VLMAX, we can use generic vector operation
52          * expanders (GVEC) to accerlate the vector operations.
53          * However, as LMUL could be a fractional number. The maximum
54          * vector size can be operated might be less than 8 bytes,
55          * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
56          * only when maxsz >= 8 bytes.
57          */
58         uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
59         uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
60         uint32_t maxsz = vlmax << sew;
61         bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
62                            (maxsz >= 8);
63         flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
64         flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
65         flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
66                     FIELD_EX64(env->vtype, VTYPE, VLMUL));
67         flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
68     } else {
69         flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
70     }
71 
72 #ifdef CONFIG_USER_ONLY
73     flags |= TB_FLAGS_MSTATUS_FS;
74     flags |= TB_FLAGS_MSTATUS_VS;
75 #else
76     flags |= cpu_mmu_index(env, 0);
77     if (riscv_cpu_fp_enabled(env)) {
78         flags |= env->mstatus & MSTATUS_FS;
79     }
80 
81     if (riscv_cpu_vector_enabled(env)) {
82         flags |= env->mstatus & MSTATUS_VS;
83     }
84 
85     if (riscv_has_ext(env, RVH)) {
86         if (env->priv == PRV_M ||
87             (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
88             (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
89                 get_field(env->hstatus, HSTATUS_HU))) {
90             flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1);
91         }
92 
93         flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS,
94                            get_field(env->mstatus_hs, MSTATUS_FS));
95 
96         flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS,
97                            get_field(env->mstatus_hs, MSTATUS_VS));
98     }
99 #endif
100 
101     flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
102     if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) {
103         flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
104     }
105     if (env->cur_pmbase != 0) {
106         flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
107     }
108 
109     *pflags = flags;
110 }
111 
112 void riscv_cpu_update_mask(CPURISCVState *env)
113 {
114     target_ulong mask = -1, base = 0;
115     /*
116      * TODO: Current RVJ spec does not specify
117      * how the extension interacts with XLEN.
118      */
119 #ifndef CONFIG_USER_ONLY
120     if (riscv_has_ext(env, RVJ)) {
121         switch (env->priv) {
122         case PRV_M:
123             if (env->mmte & M_PM_ENABLE) {
124                 mask = env->mpmmask;
125                 base = env->mpmbase;
126             }
127             break;
128         case PRV_S:
129             if (env->mmte & S_PM_ENABLE) {
130                 mask = env->spmmask;
131                 base = env->spmbase;
132             }
133             break;
134         case PRV_U:
135             if (env->mmte & U_PM_ENABLE) {
136                 mask = env->upmmask;
137                 base = env->upmbase;
138             }
139             break;
140         default:
141             g_assert_not_reached();
142         }
143     }
144 #endif
145     if (env->xl == MXL_RV32) {
146         env->cur_pmmask = mask & UINT32_MAX;
147         env->cur_pmbase = base & UINT32_MAX;
148     } else {
149         env->cur_pmmask = mask;
150         env->cur_pmbase = base;
151     }
152 }
153 
154 #ifndef CONFIG_USER_ONLY
155 static int riscv_cpu_local_irq_pending(CPURISCVState *env)
156 {
157     target_ulong virt_enabled = riscv_cpu_virt_enabled(env);
158 
159     target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
160     target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
161 
162     target_ulong pending = env->mip & env->mie;
163 
164     target_ulong mie    = env->priv < PRV_M ||
165                           (env->priv == PRV_M && mstatus_mie);
166     target_ulong sie    = env->priv < PRV_S ||
167                           (env->priv == PRV_S && mstatus_sie);
168     target_ulong hsie   = virt_enabled || sie;
169     target_ulong vsie   = virt_enabled && sie;
170 
171     target_ulong irqs =
172             (pending & ~env->mideleg & -mie) |
173             (pending &  env->mideleg & ~env->hideleg & -hsie) |
174             (pending &  env->mideleg &  env->hideleg & -vsie);
175 
176     if (irqs) {
177         return ctz64(irqs); /* since non-zero */
178     } else {
179         return RISCV_EXCP_NONE; /* indicates no pending interrupt */
180     }
181 }
182 
183 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
184 {
185     if (interrupt_request & CPU_INTERRUPT_HARD) {
186         RISCVCPU *cpu = RISCV_CPU(cs);
187         CPURISCVState *env = &cpu->env;
188         int interruptno = riscv_cpu_local_irq_pending(env);
189         if (interruptno >= 0) {
190             cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
191             riscv_cpu_do_interrupt(cs);
192             return true;
193         }
194     }
195     return false;
196 }
197 
198 /* Return true is floating point support is currently enabled */
199 bool riscv_cpu_fp_enabled(CPURISCVState *env)
200 {
201     if (env->mstatus & MSTATUS_FS) {
202         if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) {
203             return false;
204         }
205         return true;
206     }
207 
208     return false;
209 }
210 
211 /* Return true is vector support is currently enabled */
212 bool riscv_cpu_vector_enabled(CPURISCVState *env)
213 {
214     if (env->mstatus & MSTATUS_VS) {
215         if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) {
216             return false;
217         }
218         return true;
219     }
220 
221     return false;
222 }
223 
224 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
225 {
226     uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
227                             MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
228                             MSTATUS64_UXL | MSTATUS_VS;
229     bool current_virt = riscv_cpu_virt_enabled(env);
230 
231     g_assert(riscv_has_ext(env, RVH));
232 
233     if (current_virt) {
234         /* Current V=1 and we are about to change to V=0 */
235         env->vsstatus = env->mstatus & mstatus_mask;
236         env->mstatus &= ~mstatus_mask;
237         env->mstatus |= env->mstatus_hs;
238 
239         env->vstvec = env->stvec;
240         env->stvec = env->stvec_hs;
241 
242         env->vsscratch = env->sscratch;
243         env->sscratch = env->sscratch_hs;
244 
245         env->vsepc = env->sepc;
246         env->sepc = env->sepc_hs;
247 
248         env->vscause = env->scause;
249         env->scause = env->scause_hs;
250 
251         env->vstval = env->stval;
252         env->stval = env->stval_hs;
253 
254         env->vsatp = env->satp;
255         env->satp = env->satp_hs;
256     } else {
257         /* Current V=0 and we are about to change to V=1 */
258         env->mstatus_hs = env->mstatus & mstatus_mask;
259         env->mstatus &= ~mstatus_mask;
260         env->mstatus |= env->vsstatus;
261 
262         env->stvec_hs = env->stvec;
263         env->stvec = env->vstvec;
264 
265         env->sscratch_hs = env->sscratch;
266         env->sscratch = env->vsscratch;
267 
268         env->sepc_hs = env->sepc;
269         env->sepc = env->vsepc;
270 
271         env->scause_hs = env->scause;
272         env->scause = env->vscause;
273 
274         env->stval_hs = env->stval;
275         env->stval = env->vstval;
276 
277         env->satp_hs = env->satp;
278         env->satp = env->vsatp;
279     }
280 }
281 
282 bool riscv_cpu_virt_enabled(CPURISCVState *env)
283 {
284     if (!riscv_has_ext(env, RVH)) {
285         return false;
286     }
287 
288     return get_field(env->virt, VIRT_ONOFF);
289 }
290 
291 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
292 {
293     if (!riscv_has_ext(env, RVH)) {
294         return;
295     }
296 
297     /* Flush the TLB on all virt mode changes. */
298     if (get_field(env->virt, VIRT_ONOFF) != enable) {
299         tlb_flush(env_cpu(env));
300     }
301 
302     env->virt = set_field(env->virt, VIRT_ONOFF, enable);
303 }
304 
305 bool riscv_cpu_two_stage_lookup(int mmu_idx)
306 {
307     return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK;
308 }
309 
310 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
311 {
312     CPURISCVState *env = &cpu->env;
313     if (env->miclaim & interrupts) {
314         return -1;
315     } else {
316         env->miclaim |= interrupts;
317         return 0;
318     }
319 }
320 
321 uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
322 {
323     CPURISCVState *env = &cpu->env;
324     CPUState *cs = CPU(cpu);
325     uint32_t old = env->mip;
326     bool locked = false;
327 
328     if (!qemu_mutex_iothread_locked()) {
329         locked = true;
330         qemu_mutex_lock_iothread();
331     }
332 
333     env->mip = (env->mip & ~mask) | (value & mask);
334 
335     if (env->mip) {
336         cpu_interrupt(cs, CPU_INTERRUPT_HARD);
337     } else {
338         cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
339     }
340 
341     if (locked) {
342         qemu_mutex_unlock_iothread();
343     }
344 
345     return old;
346 }
347 
348 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
349                              uint32_t arg)
350 {
351     env->rdtime_fn = fn;
352     env->rdtime_fn_arg = arg;
353 }
354 
355 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
356 {
357     if (newpriv > PRV_M) {
358         g_assert_not_reached();
359     }
360     if (newpriv == PRV_H) {
361         newpriv = PRV_U;
362     }
363     /* tlb_flush is unnecessary as mode is contained in mmu_idx */
364     env->priv = newpriv;
365     env->xl = cpu_recompute_xl(env);
366     riscv_cpu_update_mask(env);
367 
368     /*
369      * Clear the load reservation - otherwise a reservation placed in one
370      * context/process can be used by another, resulting in an SC succeeding
371      * incorrectly. Version 2.2 of the ISA specification explicitly requires
372      * this behaviour, while later revisions say that the kernel "should" use
373      * an SC instruction to force the yielding of a load reservation on a
374      * preemptive context switch. As a result, do both.
375      */
376     env->load_res = -1;
377 }
378 
379 /*
380  * get_physical_address_pmp - check PMP permission for this physical address
381  *
382  * Match the PMP region and check permission for this physical address and it's
383  * TLB page. Returns 0 if the permission checking was successful
384  *
385  * @env: CPURISCVState
386  * @prot: The returned protection attributes
387  * @tlb_size: TLB page size containing addr. It could be modified after PMP
388  *            permission checking. NULL if not set TLB page for addr.
389  * @addr: The physical address to be checked permission
390  * @access_type: The type of MMU access
391  * @mode: Indicates current privilege level.
392  */
393 static int get_physical_address_pmp(CPURISCVState *env, int *prot,
394                                     target_ulong *tlb_size, hwaddr addr,
395                                     int size, MMUAccessType access_type,
396                                     int mode)
397 {
398     pmp_priv_t pmp_priv;
399     target_ulong tlb_size_pmp = 0;
400 
401     if (!riscv_feature(env, RISCV_FEATURE_PMP)) {
402         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
403         return TRANSLATE_SUCCESS;
404     }
405 
406     if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv,
407                             mode)) {
408         *prot = 0;
409         return TRANSLATE_PMP_FAIL;
410     }
411 
412     *prot = pmp_priv_to_page_prot(pmp_priv);
413     if (tlb_size != NULL) {
414         if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) {
415             *tlb_size = tlb_size_pmp;
416         }
417     }
418 
419     return TRANSLATE_SUCCESS;
420 }
421 
422 /* get_physical_address - get the physical address for this virtual address
423  *
424  * Do a page table walk to obtain the physical address corresponding to a
425  * virtual address. Returns 0 if the translation was successful
426  *
427  * Adapted from Spike's mmu_t::translate and mmu_t::walk
428  *
429  * @env: CPURISCVState
430  * @physical: This will be set to the calculated physical address
431  * @prot: The returned protection attributes
432  * @addr: The virtual address to be translated
433  * @fault_pte_addr: If not NULL, this will be set to fault pte address
434  *                  when a error occurs on pte address translation.
435  *                  This will already be shifted to match htval.
436  * @access_type: The type of MMU access
437  * @mmu_idx: Indicates current privilege level
438  * @first_stage: Are we in first stage translation?
439  *               Second stage is used for hypervisor guest translation
440  * @two_stage: Are we going to perform two stage translation
441  * @is_debug: Is this access from a debugger or the monitor?
442  */
443 static int get_physical_address(CPURISCVState *env, hwaddr *physical,
444                                 int *prot, target_ulong addr,
445                                 target_ulong *fault_pte_addr,
446                                 int access_type, int mmu_idx,
447                                 bool first_stage, bool two_stage,
448                                 bool is_debug)
449 {
450     /* NOTE: the env->pc value visible here will not be
451      * correct, but the value visible to the exception handler
452      * (riscv_cpu_do_interrupt) is correct */
453     MemTxResult res;
454     MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
455     int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
456     bool use_background = false;
457 
458     /*
459      * Check if we should use the background registers for the two
460      * stage translation. We don't need to check if we actually need
461      * two stage translation as that happened before this function
462      * was called. Background registers will be used if the guest has
463      * forced a two stage translation to be on (in HS or M mode).
464      */
465     if (!riscv_cpu_virt_enabled(env) && two_stage) {
466         use_background = true;
467     }
468 
469     /* MPRV does not affect the virtual-machine load/store
470        instructions, HLV, HLVX, and HSV. */
471     if (riscv_cpu_two_stage_lookup(mmu_idx)) {
472         mode = get_field(env->hstatus, HSTATUS_SPVP);
473     } else if (mode == PRV_M && access_type != MMU_INST_FETCH) {
474         if (get_field(env->mstatus, MSTATUS_MPRV)) {
475             mode = get_field(env->mstatus, MSTATUS_MPP);
476         }
477     }
478 
479     if (first_stage == false) {
480         /* We are in stage 2 translation, this is similar to stage 1. */
481         /* Stage 2 is always taken as U-mode */
482         mode = PRV_U;
483     }
484 
485     if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
486         *physical = addr;
487         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
488         return TRANSLATE_SUCCESS;
489     }
490 
491     *prot = 0;
492 
493     hwaddr base;
494     int levels, ptidxbits, ptesize, vm, sum, mxr, widened;
495 
496     if (first_stage == true) {
497         mxr = get_field(env->mstatus, MSTATUS_MXR);
498     } else {
499         mxr = get_field(env->vsstatus, MSTATUS_MXR);
500     }
501 
502     if (first_stage == true) {
503         if (use_background) {
504             if (riscv_cpu_mxl(env) == MXL_RV32) {
505                 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
506                 vm = get_field(env->vsatp, SATP32_MODE);
507             } else {
508                 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
509                 vm = get_field(env->vsatp, SATP64_MODE);
510             }
511         } else {
512             if (riscv_cpu_mxl(env) == MXL_RV32) {
513                 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
514                 vm = get_field(env->satp, SATP32_MODE);
515             } else {
516                 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
517                 vm = get_field(env->satp, SATP64_MODE);
518             }
519         }
520         widened = 0;
521     } else {
522         if (riscv_cpu_mxl(env) == MXL_RV32) {
523             base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
524             vm = get_field(env->hgatp, SATP32_MODE);
525         } else {
526             base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
527             vm = get_field(env->hgatp, SATP64_MODE);
528         }
529         widened = 2;
530     }
531     /* status.SUM will be ignored if execute on background */
532     sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug;
533     switch (vm) {
534     case VM_1_10_SV32:
535       levels = 2; ptidxbits = 10; ptesize = 4; break;
536     case VM_1_10_SV39:
537       levels = 3; ptidxbits = 9; ptesize = 8; break;
538     case VM_1_10_SV48:
539       levels = 4; ptidxbits = 9; ptesize = 8; break;
540     case VM_1_10_SV57:
541       levels = 5; ptidxbits = 9; ptesize = 8; break;
542     case VM_1_10_MBARE:
543         *physical = addr;
544         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
545         return TRANSLATE_SUCCESS;
546     default:
547       g_assert_not_reached();
548     }
549 
550     CPUState *cs = env_cpu(env);
551     int va_bits = PGSHIFT + levels * ptidxbits + widened;
552     target_ulong mask, masked_msbs;
553 
554     if (TARGET_LONG_BITS > (va_bits - 1)) {
555         mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
556     } else {
557         mask = 0;
558     }
559     masked_msbs = (addr >> (va_bits - 1)) & mask;
560 
561     if (masked_msbs != 0 && masked_msbs != mask) {
562         return TRANSLATE_FAIL;
563     }
564 
565     int ptshift = (levels - 1) * ptidxbits;
566     int i;
567 
568 #if !TCG_OVERSIZED_GUEST
569 restart:
570 #endif
571     for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
572         target_ulong idx;
573         if (i == 0) {
574             idx = (addr >> (PGSHIFT + ptshift)) &
575                            ((1 << (ptidxbits + widened)) - 1);
576         } else {
577             idx = (addr >> (PGSHIFT + ptshift)) &
578                            ((1 << ptidxbits) - 1);
579         }
580 
581         /* check that physical address of PTE is legal */
582         hwaddr pte_addr;
583 
584         if (two_stage && first_stage) {
585             int vbase_prot;
586             hwaddr vbase;
587 
588             /* Do the second stage translation on the base PTE address. */
589             int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
590                                                  base, NULL, MMU_DATA_LOAD,
591                                                  mmu_idx, false, true,
592                                                  is_debug);
593 
594             if (vbase_ret != TRANSLATE_SUCCESS) {
595                 if (fault_pte_addr) {
596                     *fault_pte_addr = (base + idx * ptesize) >> 2;
597                 }
598                 return TRANSLATE_G_STAGE_FAIL;
599             }
600 
601             pte_addr = vbase + idx * ptesize;
602         } else {
603             pte_addr = base + idx * ptesize;
604         }
605 
606         int pmp_prot;
607         int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
608                                                sizeof(target_ulong),
609                                                MMU_DATA_LOAD, PRV_S);
610         if (pmp_ret != TRANSLATE_SUCCESS) {
611             return TRANSLATE_PMP_FAIL;
612         }
613 
614         target_ulong pte;
615         if (riscv_cpu_mxl(env) == MXL_RV32) {
616             pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
617         } else {
618             pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
619         }
620 
621         if (res != MEMTX_OK) {
622             return TRANSLATE_FAIL;
623         }
624 
625         hwaddr ppn = pte >> PTE_PPN_SHIFT;
626 
627         if (!(pte & PTE_V)) {
628             /* Invalid PTE */
629             return TRANSLATE_FAIL;
630         } else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
631             /* Inner PTE, continue walking */
632             base = ppn << PGSHIFT;
633         } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
634             /* Reserved leaf PTE flags: PTE_W */
635             return TRANSLATE_FAIL;
636         } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
637             /* Reserved leaf PTE flags: PTE_W + PTE_X */
638             return TRANSLATE_FAIL;
639         } else if ((pte & PTE_U) && ((mode != PRV_U) &&
640                    (!sum || access_type == MMU_INST_FETCH))) {
641             /* User PTE flags when not U mode and mstatus.SUM is not set,
642                or the access type is an instruction fetch */
643             return TRANSLATE_FAIL;
644         } else if (!(pte & PTE_U) && (mode != PRV_S)) {
645             /* Supervisor PTE flags when not S mode */
646             return TRANSLATE_FAIL;
647         } else if (ppn & ((1ULL << ptshift) - 1)) {
648             /* Misaligned PPN */
649             return TRANSLATE_FAIL;
650         } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
651                    ((pte & PTE_X) && mxr))) {
652             /* Read access check failed */
653             return TRANSLATE_FAIL;
654         } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
655             /* Write access check failed */
656             return TRANSLATE_FAIL;
657         } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
658             /* Fetch access check failed */
659             return TRANSLATE_FAIL;
660         } else {
661             /* if necessary, set accessed and dirty bits. */
662             target_ulong updated_pte = pte | PTE_A |
663                 (access_type == MMU_DATA_STORE ? PTE_D : 0);
664 
665             /* Page table updates need to be atomic with MTTCG enabled */
666             if (updated_pte != pte) {
667                 /*
668                  * - if accessed or dirty bits need updating, and the PTE is
669                  *   in RAM, then we do so atomically with a compare and swap.
670                  * - if the PTE is in IO space or ROM, then it can't be updated
671                  *   and we return TRANSLATE_FAIL.
672                  * - if the PTE changed by the time we went to update it, then
673                  *   it is no longer valid and we must re-walk the page table.
674                  */
675                 MemoryRegion *mr;
676                 hwaddr l = sizeof(target_ulong), addr1;
677                 mr = address_space_translate(cs->as, pte_addr,
678                     &addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
679                 if (memory_region_is_ram(mr)) {
680                     target_ulong *pte_pa =
681                         qemu_map_ram_ptr(mr->ram_block, addr1);
682 #if TCG_OVERSIZED_GUEST
683                     /* MTTCG is not enabled on oversized TCG guests so
684                      * page table updates do not need to be atomic */
685                     *pte_pa = pte = updated_pte;
686 #else
687                     target_ulong old_pte =
688                         qatomic_cmpxchg(pte_pa, pte, updated_pte);
689                     if (old_pte != pte) {
690                         goto restart;
691                     } else {
692                         pte = updated_pte;
693                     }
694 #endif
695                 } else {
696                     /* misconfigured PTE in ROM (AD bits are not preset) or
697                      * PTE is in IO space and can't be updated atomically */
698                     return TRANSLATE_FAIL;
699                 }
700             }
701 
702             /* for superpage mappings, make a fake leaf PTE for the TLB's
703                benefit. */
704             target_ulong vpn = addr >> PGSHIFT;
705             *physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) |
706                         (addr & ~TARGET_PAGE_MASK);
707 
708             /* set permissions on the TLB entry */
709             if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
710                 *prot |= PAGE_READ;
711             }
712             if ((pte & PTE_X)) {
713                 *prot |= PAGE_EXEC;
714             }
715             /* add write permission on stores or if the page is already dirty,
716                so that we TLB miss on later writes to update the dirty bit */
717             if ((pte & PTE_W) &&
718                     (access_type == MMU_DATA_STORE || (pte & PTE_D))) {
719                 *prot |= PAGE_WRITE;
720             }
721             return TRANSLATE_SUCCESS;
722         }
723     }
724     return TRANSLATE_FAIL;
725 }
726 
727 static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
728                                 MMUAccessType access_type, bool pmp_violation,
729                                 bool first_stage, bool two_stage)
730 {
731     CPUState *cs = env_cpu(env);
732     int page_fault_exceptions, vm;
733     uint64_t stap_mode;
734 
735     if (riscv_cpu_mxl(env) == MXL_RV32) {
736         stap_mode = SATP32_MODE;
737     } else {
738         stap_mode = SATP64_MODE;
739     }
740 
741     if (first_stage) {
742         vm = get_field(env->satp, stap_mode);
743     } else {
744         vm = get_field(env->hgatp, stap_mode);
745     }
746 
747     page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation;
748 
749     switch (access_type) {
750     case MMU_INST_FETCH:
751         if (riscv_cpu_virt_enabled(env) && !first_stage) {
752             cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
753         } else {
754             cs->exception_index = page_fault_exceptions ?
755                 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
756         }
757         break;
758     case MMU_DATA_LOAD:
759         if (two_stage && !first_stage) {
760             cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
761         } else {
762             cs->exception_index = page_fault_exceptions ?
763                 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
764         }
765         break;
766     case MMU_DATA_STORE:
767         if (two_stage && !first_stage) {
768             cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
769         } else {
770             cs->exception_index = page_fault_exceptions ?
771                 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
772         }
773         break;
774     default:
775         g_assert_not_reached();
776     }
777     env->badaddr = address;
778     env->two_stage_lookup = two_stage;
779 }
780 
781 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
782 {
783     RISCVCPU *cpu = RISCV_CPU(cs);
784     CPURISCVState *env = &cpu->env;
785     hwaddr phys_addr;
786     int prot;
787     int mmu_idx = cpu_mmu_index(&cpu->env, false);
788 
789     if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
790                              true, riscv_cpu_virt_enabled(env), true)) {
791         return -1;
792     }
793 
794     if (riscv_cpu_virt_enabled(env)) {
795         if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
796                                  0, mmu_idx, false, true, true)) {
797             return -1;
798         }
799     }
800 
801     return phys_addr & TARGET_PAGE_MASK;
802 }
803 
804 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
805                                      vaddr addr, unsigned size,
806                                      MMUAccessType access_type,
807                                      int mmu_idx, MemTxAttrs attrs,
808                                      MemTxResult response, uintptr_t retaddr)
809 {
810     RISCVCPU *cpu = RISCV_CPU(cs);
811     CPURISCVState *env = &cpu->env;
812 
813     if (access_type == MMU_DATA_STORE) {
814         cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
815     } else if (access_type == MMU_DATA_LOAD) {
816         cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
817     } else {
818         cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
819     }
820 
821     env->badaddr = addr;
822     env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
823                             riscv_cpu_two_stage_lookup(mmu_idx);
824     riscv_raise_exception(&cpu->env, cs->exception_index, retaddr);
825 }
826 
827 void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
828                                    MMUAccessType access_type, int mmu_idx,
829                                    uintptr_t retaddr)
830 {
831     RISCVCPU *cpu = RISCV_CPU(cs);
832     CPURISCVState *env = &cpu->env;
833     switch (access_type) {
834     case MMU_INST_FETCH:
835         cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
836         break;
837     case MMU_DATA_LOAD:
838         cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
839         break;
840     case MMU_DATA_STORE:
841         cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
842         break;
843     default:
844         g_assert_not_reached();
845     }
846     env->badaddr = addr;
847     env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
848                             riscv_cpu_two_stage_lookup(mmu_idx);
849     riscv_raise_exception(env, cs->exception_index, retaddr);
850 }
851 
852 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
853                         MMUAccessType access_type, int mmu_idx,
854                         bool probe, uintptr_t retaddr)
855 {
856     RISCVCPU *cpu = RISCV_CPU(cs);
857     CPURISCVState *env = &cpu->env;
858     vaddr im_address;
859     hwaddr pa = 0;
860     int prot, prot2, prot_pmp;
861     bool pmp_violation = false;
862     bool first_stage_error = true;
863     bool two_stage_lookup = false;
864     int ret = TRANSLATE_FAIL;
865     int mode = mmu_idx;
866     /* default TLB page size */
867     target_ulong tlb_size = TARGET_PAGE_SIZE;
868 
869     env->guest_phys_fault_addr = 0;
870 
871     qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
872                   __func__, address, access_type, mmu_idx);
873 
874     /* MPRV does not affect the virtual-machine load/store
875        instructions, HLV, HLVX, and HSV. */
876     if (riscv_cpu_two_stage_lookup(mmu_idx)) {
877         mode = get_field(env->hstatus, HSTATUS_SPVP);
878     } else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
879                get_field(env->mstatus, MSTATUS_MPRV)) {
880         mode = get_field(env->mstatus, MSTATUS_MPP);
881         if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) {
882             two_stage_lookup = true;
883         }
884     }
885 
886     if (riscv_cpu_virt_enabled(env) ||
887         ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
888          access_type != MMU_INST_FETCH)) {
889         /* Two stage lookup */
890         ret = get_physical_address(env, &pa, &prot, address,
891                                    &env->guest_phys_fault_addr, access_type,
892                                    mmu_idx, true, true, false);
893 
894         /*
895          * A G-stage exception may be triggered during two state lookup.
896          * And the env->guest_phys_fault_addr has already been set in
897          * get_physical_address().
898          */
899         if (ret == TRANSLATE_G_STAGE_FAIL) {
900             first_stage_error = false;
901             access_type = MMU_DATA_LOAD;
902         }
903 
904         qemu_log_mask(CPU_LOG_MMU,
905                       "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
906                       TARGET_FMT_plx " prot %d\n",
907                       __func__, address, ret, pa, prot);
908 
909         if (ret == TRANSLATE_SUCCESS) {
910             /* Second stage lookup */
911             im_address = pa;
912 
913             ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
914                                        access_type, mmu_idx, false, true,
915                                        false);
916 
917             qemu_log_mask(CPU_LOG_MMU,
918                     "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
919                     TARGET_FMT_plx " prot %d\n",
920                     __func__, im_address, ret, pa, prot2);
921 
922             prot &= prot2;
923 
924             if (ret == TRANSLATE_SUCCESS) {
925                 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
926                                                size, access_type, mode);
927 
928                 qemu_log_mask(CPU_LOG_MMU,
929                               "%s PMP address=" TARGET_FMT_plx " ret %d prot"
930                               " %d tlb_size " TARGET_FMT_lu "\n",
931                               __func__, pa, ret, prot_pmp, tlb_size);
932 
933                 prot &= prot_pmp;
934             }
935 
936             if (ret != TRANSLATE_SUCCESS) {
937                 /*
938                  * Guest physical address translation failed, this is a HS
939                  * level exception
940                  */
941                 first_stage_error = false;
942                 env->guest_phys_fault_addr = (im_address |
943                                               (address &
944                                                (TARGET_PAGE_SIZE - 1))) >> 2;
945             }
946         }
947     } else {
948         /* Single stage lookup */
949         ret = get_physical_address(env, &pa, &prot, address, NULL,
950                                    access_type, mmu_idx, true, false, false);
951 
952         qemu_log_mask(CPU_LOG_MMU,
953                       "%s address=%" VADDR_PRIx " ret %d physical "
954                       TARGET_FMT_plx " prot %d\n",
955                       __func__, address, ret, pa, prot);
956 
957         if (ret == TRANSLATE_SUCCESS) {
958             ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
959                                            size, access_type, mode);
960 
961             qemu_log_mask(CPU_LOG_MMU,
962                           "%s PMP address=" TARGET_FMT_plx " ret %d prot"
963                           " %d tlb_size " TARGET_FMT_lu "\n",
964                           __func__, pa, ret, prot_pmp, tlb_size);
965 
966             prot &= prot_pmp;
967         }
968     }
969 
970     if (ret == TRANSLATE_PMP_FAIL) {
971         pmp_violation = true;
972     }
973 
974     if (ret == TRANSLATE_SUCCESS) {
975         tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
976                      prot, mmu_idx, tlb_size);
977         return true;
978     } else if (probe) {
979         return false;
980     } else {
981         raise_mmu_exception(env, address, access_type, pmp_violation,
982                             first_stage_error,
983                             riscv_cpu_virt_enabled(env) ||
984                                 riscv_cpu_two_stage_lookup(mmu_idx));
985         riscv_raise_exception(env, cs->exception_index, retaddr);
986     }
987 
988     return true;
989 }
990 #endif /* !CONFIG_USER_ONLY */
991 
992 /*
993  * Handle Traps
994  *
995  * Adapted from Spike's processor_t::take_trap.
996  *
997  */
998 void riscv_cpu_do_interrupt(CPUState *cs)
999 {
1000 #if !defined(CONFIG_USER_ONLY)
1001 
1002     RISCVCPU *cpu = RISCV_CPU(cs);
1003     CPURISCVState *env = &cpu->env;
1004     bool write_gva = false;
1005     uint64_t s;
1006 
1007     /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
1008      * so we mask off the MSB and separate into trap type and cause.
1009      */
1010     bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
1011     target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
1012     target_ulong deleg = async ? env->mideleg : env->medeleg;
1013     target_ulong tval = 0;
1014     target_ulong htval = 0;
1015     target_ulong mtval2 = 0;
1016 
1017     if  (cause == RISCV_EXCP_SEMIHOST) {
1018         if (env->priv >= PRV_S) {
1019             env->gpr[xA0] = do_common_semihosting(cs);
1020             env->pc += 4;
1021             return;
1022         }
1023         cause = RISCV_EXCP_BREAKPOINT;
1024     }
1025 
1026     if (!async) {
1027         /* set tval to badaddr for traps with address information */
1028         switch (cause) {
1029         case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
1030         case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
1031         case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
1032         case RISCV_EXCP_INST_ADDR_MIS:
1033         case RISCV_EXCP_INST_ACCESS_FAULT:
1034         case RISCV_EXCP_LOAD_ADDR_MIS:
1035         case RISCV_EXCP_STORE_AMO_ADDR_MIS:
1036         case RISCV_EXCP_LOAD_ACCESS_FAULT:
1037         case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
1038         case RISCV_EXCP_INST_PAGE_FAULT:
1039         case RISCV_EXCP_LOAD_PAGE_FAULT:
1040         case RISCV_EXCP_STORE_PAGE_FAULT:
1041             write_gva = true;
1042             tval = env->badaddr;
1043             break;
1044         case RISCV_EXCP_ILLEGAL_INST:
1045             tval = env->bins;
1046             break;
1047         default:
1048             break;
1049         }
1050         /* ecall is dispatched as one cause so translate based on mode */
1051         if (cause == RISCV_EXCP_U_ECALL) {
1052             assert(env->priv <= 3);
1053 
1054             if (env->priv == PRV_M) {
1055                 cause = RISCV_EXCP_M_ECALL;
1056             } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) {
1057                 cause = RISCV_EXCP_VS_ECALL;
1058             } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) {
1059                 cause = RISCV_EXCP_S_ECALL;
1060             } else if (env->priv == PRV_U) {
1061                 cause = RISCV_EXCP_U_ECALL;
1062             }
1063         }
1064     }
1065 
1066     trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
1067                      riscv_cpu_get_trap_name(cause, async));
1068 
1069     qemu_log_mask(CPU_LOG_INT,
1070                   "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
1071                   "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
1072                   __func__, env->mhartid, async, cause, env->pc, tval,
1073                   riscv_cpu_get_trap_name(cause, async));
1074 
1075     if (env->priv <= PRV_S &&
1076             cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
1077         /* handle the trap in S-mode */
1078         if (riscv_has_ext(env, RVH)) {
1079             target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
1080 
1081             if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) {
1082                 /* Trap to VS mode */
1083                 /*
1084                  * See if we need to adjust cause. Yes if its VS mode interrupt
1085                  * no if hypervisor has delegated one of hs mode's interrupt
1086                  */
1087                 if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
1088                     cause == IRQ_VS_EXT) {
1089                     cause = cause - 1;
1090                 }
1091                 write_gva = false;
1092             } else if (riscv_cpu_virt_enabled(env)) {
1093                 /* Trap into HS mode, from virt */
1094                 riscv_cpu_swap_hypervisor_regs(env);
1095                 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
1096                                          env->priv);
1097                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
1098                                          riscv_cpu_virt_enabled(env));
1099 
1100 
1101                 htval = env->guest_phys_fault_addr;
1102 
1103                 riscv_cpu_set_virt_enabled(env, 0);
1104             } else {
1105                 /* Trap into HS mode */
1106                 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
1107                 htval = env->guest_phys_fault_addr;
1108                 write_gva = false;
1109             }
1110             env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
1111         }
1112 
1113         s = env->mstatus;
1114         s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
1115         s = set_field(s, MSTATUS_SPP, env->priv);
1116         s = set_field(s, MSTATUS_SIE, 0);
1117         env->mstatus = s;
1118         env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
1119         env->sepc = env->pc;
1120         env->stval = tval;
1121         env->htval = htval;
1122         env->pc = (env->stvec >> 2 << 2) +
1123             ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
1124         riscv_cpu_set_mode(env, PRV_S);
1125     } else {
1126         /* handle the trap in M-mode */
1127         if (riscv_has_ext(env, RVH)) {
1128             if (riscv_cpu_virt_enabled(env)) {
1129                 riscv_cpu_swap_hypervisor_regs(env);
1130             }
1131             env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
1132                                      riscv_cpu_virt_enabled(env));
1133             if (riscv_cpu_virt_enabled(env) && tval) {
1134                 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
1135             }
1136 
1137             mtval2 = env->guest_phys_fault_addr;
1138 
1139             /* Trapping to M mode, virt is disabled */
1140             riscv_cpu_set_virt_enabled(env, 0);
1141         }
1142 
1143         s = env->mstatus;
1144         s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
1145         s = set_field(s, MSTATUS_MPP, env->priv);
1146         s = set_field(s, MSTATUS_MIE, 0);
1147         env->mstatus = s;
1148         env->mcause = cause | ~(((target_ulong)-1) >> async);
1149         env->mepc = env->pc;
1150         env->mtval = tval;
1151         env->mtval2 = mtval2;
1152         env->pc = (env->mtvec >> 2 << 2) +
1153             ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
1154         riscv_cpu_set_mode(env, PRV_M);
1155     }
1156 
1157     /* NOTE: it is not necessary to yield load reservations here. It is only
1158      * necessary for an SC from "another hart" to cause a load reservation
1159      * to be yielded. Refer to the memory consistency model section of the
1160      * RISC-V ISA Specification.
1161      */
1162 
1163     env->two_stage_lookup = false;
1164 #endif
1165     cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */
1166 }
1167