xref: /qemu/target/riscv/cpu.h (revision 4a1babe5)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "hw/qdev-properties.h"
26 #include "exec/cpu-defs.h"
27 #include "exec/gdbstub.h"
28 #include "qemu/cpu-float.h"
29 #include "qom/object.h"
30 #include "qemu/int128.h"
31 #include "cpu_bits.h"
32 #include "cpu_cfg.h"
33 #include "qapi/qapi-types-common.h"
34 #include "cpu-qom.h"
35 
36 typedef struct CPUArchState CPURISCVState;
37 
38 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
39 
40 #if defined(TARGET_RISCV32)
41 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE32
42 #elif defined(TARGET_RISCV64)
43 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE64
44 #endif
45 
46 #define TCG_GUEST_DEFAULT_MO 0
47 
48 /*
49  * RISC-V-specific extra insn start words:
50  * 1: Original instruction opcode
51  */
52 #define TARGET_INSN_START_EXTRA_WORDS 1
53 
54 #define RV(x) ((target_ulong)1 << (x - 'A'))
55 
56 /*
57  * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
58  * when adding new MISA bits here.
59  */
60 #define RVI RV('I')
61 #define RVE RV('E') /* E and I are mutually exclusive */
62 #define RVM RV('M')
63 #define RVA RV('A')
64 #define RVF RV('F')
65 #define RVD RV('D')
66 #define RVV RV('V')
67 #define RVC RV('C')
68 #define RVS RV('S')
69 #define RVU RV('U')
70 #define RVH RV('H')
71 #define RVJ RV('J')
72 #define RVG RV('G')
73 #define RVB RV('B')
74 
75 extern const uint32_t misa_bits[];
76 const char *riscv_get_misa_ext_name(uint32_t bit);
77 const char *riscv_get_misa_ext_description(uint32_t bit);
78 
79 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
80 
81 typedef struct riscv_cpu_profile {
82     struct riscv_cpu_profile *parent;
83     const char *name;
84     uint32_t misa_ext;
85     bool enabled;
86     bool user_set;
87     int priv_spec;
88     int satp_mode;
89     const int32_t ext_offsets[];
90 } RISCVCPUProfile;
91 
92 #define RISCV_PROFILE_EXT_LIST_END -1
93 #define RISCV_PROFILE_ATTR_UNUSED -1
94 
95 extern RISCVCPUProfile *riscv_profiles[];
96 
97 /* Privileged specification version */
98 #define PRIV_VER_1_10_0_STR "v1.10.0"
99 #define PRIV_VER_1_11_0_STR "v1.11.0"
100 #define PRIV_VER_1_12_0_STR "v1.12.0"
101 enum {
102     PRIV_VERSION_1_10_0 = 0,
103     PRIV_VERSION_1_11_0,
104     PRIV_VERSION_1_12_0,
105 
106     PRIV_VERSION_LATEST = PRIV_VERSION_1_12_0,
107 };
108 
109 #define VEXT_VERSION_1_00_0 0x00010000
110 #define VEXT_VER_1_00_0_STR "v1.0"
111 
112 enum {
113     TRANSLATE_SUCCESS,
114     TRANSLATE_FAIL,
115     TRANSLATE_PMP_FAIL,
116     TRANSLATE_G_STAGE_FAIL
117 };
118 
119 /* Extension context status */
120 typedef enum {
121     EXT_STATUS_DISABLED = 0,
122     EXT_STATUS_INITIAL,
123     EXT_STATUS_CLEAN,
124     EXT_STATUS_DIRTY,
125 } RISCVExtStatus;
126 
127 #define MMU_USER_IDX 3
128 
129 #define MAX_RISCV_PMPS (16)
130 
131 #if !defined(CONFIG_USER_ONLY)
132 #include "pmp.h"
133 #include "debug.h"
134 #endif
135 
136 #define RV_VLEN_MAX 1024
137 #define RV_MAX_MHPMEVENTS 32
138 #define RV_MAX_MHPMCOUNTERS 32
139 
140 FIELD(VTYPE, VLMUL, 0, 3)
141 FIELD(VTYPE, VSEW, 3, 3)
142 FIELD(VTYPE, VTA, 6, 1)
143 FIELD(VTYPE, VMA, 7, 1)
144 FIELD(VTYPE, VEDIV, 8, 2)
145 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
146 
147 typedef struct PMUCTRState {
148     /* Current value of a counter */
149     target_ulong mhpmcounter_val;
150     /* Current value of a counter in RV32 */
151     target_ulong mhpmcounterh_val;
152     /* Snapshot values of counter */
153     target_ulong mhpmcounter_prev;
154     /* Snapshort value of a counter in RV32 */
155     target_ulong mhpmcounterh_prev;
156     bool started;
157     /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
158     target_ulong irq_overflow_left;
159 } PMUCTRState;
160 
161 struct CPUArchState {
162     target_ulong gpr[32];
163     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
164 
165     /* vector coprocessor state. */
166     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
167     target_ulong vxrm;
168     target_ulong vxsat;
169     target_ulong vl;
170     target_ulong vstart;
171     target_ulong vtype;
172     bool vill;
173 
174     target_ulong pc;
175     target_ulong load_res;
176     target_ulong load_val;
177 
178     /* Floating-Point state */
179     uint64_t fpr[32]; /* assume both F and D extensions */
180     target_ulong frm;
181     float_status fp_status;
182 
183     target_ulong badaddr;
184     target_ulong bins;
185 
186     target_ulong guest_phys_fault_addr;
187 
188     target_ulong priv_ver;
189     target_ulong vext_ver;
190 
191     /* RISCVMXL, but uint32_t for vmstate migration */
192     uint32_t misa_mxl;      /* current mxl */
193     uint32_t misa_ext;      /* current extensions */
194     uint32_t misa_ext_mask; /* max ext for this cpu */
195     uint32_t xl;            /* current xlen */
196 
197     /* 128-bit helpers upper part return value */
198     target_ulong retxh;
199 
200     target_ulong jvt;
201 
202 #ifdef CONFIG_USER_ONLY
203     uint32_t elf_flags;
204 #endif
205 
206 #ifndef CONFIG_USER_ONLY
207     target_ulong priv;
208     /* This contains QEMU specific information about the virt state. */
209     bool virt_enabled;
210     target_ulong geilen;
211     uint64_t resetvec;
212 
213     target_ulong mhartid;
214     /*
215      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
216      * For RV64 this is a 64-bit mstatus.
217      */
218     uint64_t mstatus;
219 
220     uint64_t mip;
221     /*
222      * MIP contains the software writable version of SEIP ORed with the
223      * external interrupt value. The MIP register is always up-to-date.
224      * To keep track of the current source, we also save booleans of the values
225      * here.
226      */
227     bool external_seip;
228     bool software_seip;
229 
230     uint64_t miclaim;
231 
232     uint64_t mie;
233     uint64_t mideleg;
234 
235     /*
236      * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
237      * alias of mie[i] and needs to be maintained separately.
238      */
239     uint64_t sie;
240 
241     /*
242      * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
243      * alias of sie[i] (mie[i]) and needs to be maintained separately.
244      */
245     uint64_t vsie;
246 
247     target_ulong satp;   /* since: priv-1.10.0 */
248     target_ulong stval;
249     target_ulong medeleg;
250 
251     target_ulong stvec;
252     target_ulong sepc;
253     target_ulong scause;
254 
255     target_ulong mtvec;
256     target_ulong mepc;
257     target_ulong mcause;
258     target_ulong mtval;  /* since: priv-1.10.0 */
259 
260     /* Machine and Supervisor interrupt priorities */
261     uint8_t miprio[64];
262     uint8_t siprio[64];
263 
264     /* AIA CSRs */
265     target_ulong miselect;
266     target_ulong siselect;
267     uint64_t mvien;
268     uint64_t mvip;
269 
270     /* Hypervisor CSRs */
271     target_ulong hstatus;
272     target_ulong hedeleg;
273     uint64_t hideleg;
274     uint32_t hcounteren;
275     target_ulong htval;
276     target_ulong htinst;
277     target_ulong hgatp;
278     target_ulong hgeie;
279     target_ulong hgeip;
280     uint64_t htimedelta;
281     uint64_t hvien;
282 
283     /*
284      * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
285      * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
286      * maintain in hvip.
287      */
288     uint64_t hvip;
289 
290     /* Hypervisor controlled virtual interrupt priorities */
291     target_ulong hvictl;
292     uint8_t hviprio[64];
293 
294     /* Upper 64-bits of 128-bit CSRs */
295     uint64_t mscratchh;
296     uint64_t sscratchh;
297 
298     /* Virtual CSRs */
299     /*
300      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
301      * For RV64 this is a 64-bit vsstatus.
302      */
303     uint64_t vsstatus;
304     target_ulong vstvec;
305     target_ulong vsscratch;
306     target_ulong vsepc;
307     target_ulong vscause;
308     target_ulong vstval;
309     target_ulong vsatp;
310 
311     /* AIA VS-mode CSRs */
312     target_ulong vsiselect;
313 
314     target_ulong mtval2;
315     target_ulong mtinst;
316 
317     /* HS Backup CSRs */
318     target_ulong stvec_hs;
319     target_ulong sscratch_hs;
320     target_ulong sepc_hs;
321     target_ulong scause_hs;
322     target_ulong stval_hs;
323     target_ulong satp_hs;
324     uint64_t mstatus_hs;
325 
326     /*
327      * Signals whether the current exception occurred with two-stage address
328      * translation active.
329      */
330     bool two_stage_lookup;
331     /*
332      * Signals whether the current exception occurred while doing two-stage
333      * address translation for the VS-stage page table walk.
334      */
335     bool two_stage_indirect_lookup;
336 
337     uint32_t scounteren;
338     uint32_t mcounteren;
339 
340     uint32_t mcountinhibit;
341 
342     /* PMU counter state */
343     PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
344 
345     /* PMU event selector configured values. First three are unused */
346     target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
347 
348     /* PMU event selector configured values for RV32 */
349     target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
350 
351     target_ulong sscratch;
352     target_ulong mscratch;
353 
354     /* Sstc CSRs */
355     uint64_t stimecmp;
356 
357     uint64_t vstimecmp;
358 
359     /* physical memory protection */
360     pmp_table_t pmp_state;
361     target_ulong mseccfg;
362 
363     /* trigger module */
364     target_ulong trigger_cur;
365     target_ulong tdata1[RV_MAX_TRIGGERS];
366     target_ulong tdata2[RV_MAX_TRIGGERS];
367     target_ulong tdata3[RV_MAX_TRIGGERS];
368     target_ulong mcontext;
369     struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
370     struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
371     QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
372     int64_t last_icount;
373     bool itrigger_enabled;
374 
375     /* machine specific rdtime callback */
376     uint64_t (*rdtime_fn)(void *);
377     void *rdtime_fn_arg;
378 
379     /* machine specific AIA ireg read-modify-write callback */
380 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
381     ((((__xlen) & 0xff) << 24) | \
382      (((__vgein) & 0x3f) << 20) | \
383      (((__virt) & 0x1) << 18) | \
384      (((__priv) & 0x3) << 16) | \
385      (__isel & 0xffff))
386 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
387 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
388 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
389 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
390 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
391     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
392         target_ulong *val, target_ulong new_val, target_ulong write_mask);
393     void *aia_ireg_rmw_fn_arg[4];
394 
395     /* True if in debugger mode.  */
396     bool debugger;
397 
398     /*
399      * CSRs for PointerMasking extension
400      */
401     target_ulong mmte;
402     target_ulong mpmmask;
403     target_ulong mpmbase;
404     target_ulong spmmask;
405     target_ulong spmbase;
406     target_ulong upmmask;
407     target_ulong upmbase;
408 
409     /* CSRs for execution environment configuration */
410     uint64_t menvcfg;
411     uint64_t mstateen[SMSTATEEN_MAX_COUNT];
412     uint64_t hstateen[SMSTATEEN_MAX_COUNT];
413     uint64_t sstateen[SMSTATEEN_MAX_COUNT];
414     target_ulong senvcfg;
415     uint64_t henvcfg;
416 #endif
417     target_ulong cur_pmmask;
418     target_ulong cur_pmbase;
419 
420     /* Fields from here on are preserved across CPU reset. */
421     QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
422     QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
423     bool vstime_irq;
424 
425     hwaddr kernel_addr;
426     hwaddr fdt_addr;
427 
428 #ifdef CONFIG_KVM
429     /* kvm timer */
430     bool kvm_timer_dirty;
431     uint64_t kvm_timer_time;
432     uint64_t kvm_timer_compare;
433     uint64_t kvm_timer_state;
434     uint64_t kvm_timer_frequency;
435 #endif /* CONFIG_KVM */
436 };
437 
438 /*
439  * RISCVCPU:
440  * @env: #CPURISCVState
441  *
442  * A RISCV CPU.
443  */
444 struct ArchCPU {
445     CPUState parent_obj;
446 
447     CPURISCVState env;
448 
449     GDBFeature dyn_csr_feature;
450     GDBFeature dyn_vreg_feature;
451 
452     /* Configuration Settings */
453     RISCVCPUConfig cfg;
454 
455     QEMUTimer *pmu_timer;
456     /* A bitmask of Available programmable counters */
457     uint32_t pmu_avail_ctrs;
458     /* Mapping of events to counters */
459     GHashTable *pmu_event_ctr_map;
460 };
461 
462 /**
463  * RISCVCPUClass:
464  * @parent_realize: The parent class' realize handler.
465  * @parent_phases: The parent class' reset phase handlers.
466  *
467  * A RISCV CPU model.
468  */
469 struct RISCVCPUClass {
470     CPUClass parent_class;
471 
472     DeviceRealize parent_realize;
473     ResettablePhases parent_phases;
474     uint32_t misa_mxl_max;  /* max mxl for this cpu */
475 };
476 
477 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
478 {
479     return (env->misa_ext & ext) != 0;
480 }
481 
482 #include "cpu_user.h"
483 
484 extern const char * const riscv_int_regnames[];
485 extern const char * const riscv_int_regnamesh[];
486 extern const char * const riscv_fpr_regnames[];
487 
488 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
489 void riscv_cpu_do_interrupt(CPUState *cpu);
490 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
491                                int cpuid, DumpState *s);
492 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
493                                int cpuid, DumpState *s);
494 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
495 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
496 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
497 uint8_t riscv_cpu_default_priority(int irq);
498 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
499 int riscv_cpu_mirq_pending(CPURISCVState *env);
500 int riscv_cpu_sirq_pending(CPURISCVState *env);
501 int riscv_cpu_vsirq_pending(CPURISCVState *env);
502 bool riscv_cpu_fp_enabled(CPURISCVState *env);
503 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
504 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
505 bool riscv_cpu_vector_enabled(CPURISCVState *env);
506 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
507 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
508 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
509                                                MMUAccessType access_type,
510                                                int mmu_idx, uintptr_t retaddr);
511 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
512                         MMUAccessType access_type, int mmu_idx,
513                         bool probe, uintptr_t retaddr);
514 char *riscv_isa_string(RISCVCPU *cpu);
515 int riscv_cpu_max_xlen(RISCVCPUClass *mcc);
516 bool riscv_cpu_option_set(const char *optname);
517 
518 #ifndef CONFIG_USER_ONLY
519 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename);
520 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
521                                      vaddr addr, unsigned size,
522                                      MMUAccessType access_type,
523                                      int mmu_idx, MemTxAttrs attrs,
524                                      MemTxResult response, uintptr_t retaddr);
525 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
526 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
527 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
528 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
529 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
530                               uint64_t value);
531 void riscv_cpu_interrupt(CPURISCVState *env);
532 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
533 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
534                              void *arg);
535 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
536                                    int (*rmw_fn)(void *arg,
537                                                  target_ulong reg,
538                                                  target_ulong *val,
539                                                  target_ulong new_val,
540                                                  target_ulong write_mask),
541                                    void *rmw_fn_arg);
542 
543 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
544 #endif
545 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
546 
547 void riscv_translate_init(void);
548 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
549                                       uint32_t exception, uintptr_t pc);
550 
551 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
552 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
553 
554 #include "exec/cpu-all.h"
555 
556 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
557 FIELD(TB_FLAGS, FS, 3, 2)
558 /* Vector flags */
559 FIELD(TB_FLAGS, VS, 5, 2)
560 FIELD(TB_FLAGS, LMUL, 7, 3)
561 FIELD(TB_FLAGS, SEW, 10, 3)
562 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
563 FIELD(TB_FLAGS, VILL, 14, 1)
564 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
565 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
566 FIELD(TB_FLAGS, XL, 16, 2)
567 /* If PointerMasking should be applied */
568 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
569 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
570 FIELD(TB_FLAGS, VTA, 20, 1)
571 FIELD(TB_FLAGS, VMA, 21, 1)
572 /* Native debug itrigger */
573 FIELD(TB_FLAGS, ITRIGGER, 22, 1)
574 /* Virtual mode enabled */
575 FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
576 FIELD(TB_FLAGS, PRIV, 24, 2)
577 FIELD(TB_FLAGS, AXL, 26, 2)
578 
579 #ifdef TARGET_RISCV32
580 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
581 #else
582 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
583 {
584     return env->misa_mxl;
585 }
586 #endif
587 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
588 
589 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
590 {
591     return &env_archcpu(env)->cfg;
592 }
593 
594 #if !defined(CONFIG_USER_ONLY)
595 static inline int cpu_address_mode(CPURISCVState *env)
596 {
597     int mode = env->priv;
598 
599     if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
600         mode = get_field(env->mstatus, MSTATUS_MPP);
601     }
602     return mode;
603 }
604 
605 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
606 {
607     RISCVMXL xl = env->misa_mxl;
608     /*
609      * When emulating a 32-bit-only cpu, use RV32.
610      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
611      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
612      * back to RV64 for lower privs.
613      */
614     if (xl != MXL_RV32) {
615         switch (mode) {
616         case PRV_M:
617             break;
618         case PRV_U:
619             xl = get_field(env->mstatus, MSTATUS64_UXL);
620             break;
621         default: /* PRV_S */
622             xl = get_field(env->mstatus, MSTATUS64_SXL);
623             break;
624         }
625     }
626     return xl;
627 }
628 #endif
629 
630 #if defined(TARGET_RISCV32)
631 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
632 #else
633 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
634 {
635 #if !defined(CONFIG_USER_ONLY)
636     return cpu_get_xl(env, env->priv);
637 #else
638     return env->misa_mxl;
639 #endif
640 }
641 #endif
642 
643 #if defined(TARGET_RISCV32)
644 #define cpu_address_xl(env)  ((void)(env), MXL_RV32)
645 #else
646 static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
647 {
648 #ifdef CONFIG_USER_ONLY
649     return env->xl;
650 #else
651     int mode = cpu_address_mode(env);
652 
653     return cpu_get_xl(env, mode);
654 #endif
655 }
656 #endif
657 
658 static inline int riscv_cpu_xlen(CPURISCVState *env)
659 {
660     return 16 << env->xl;
661 }
662 
663 #ifdef TARGET_RISCV32
664 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
665 #else
666 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
667 {
668 #ifdef CONFIG_USER_ONLY
669     return env->misa_mxl;
670 #else
671     return get_field(env->mstatus, MSTATUS64_SXL);
672 #endif
673 }
674 #endif
675 
676 /*
677  * Encode LMUL to lmul as follows:
678  *     LMUL    vlmul    lmul
679  *      1       000       0
680  *      2       001       1
681  *      4       010       2
682  *      8       011       3
683  *      -       100       -
684  *     1/8      101      -3
685  *     1/4      110      -2
686  *     1/2      111      -1
687  *
688  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
689  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
690  *      => VLMAX = vlen >> (1 + 3 - (-3))
691  *               = 256 >> 7
692  *               = 2
693  */
694 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
695                                       int8_t lmul)
696 {
697     uint32_t vlen = vlenb << 3;
698 
699     /*
700      * We need to use 'vlen' instead of 'vlenb' to
701      * preserve the '+ 3' in the formula. Otherwise
702      * we risk a negative shift if vsew < lmul.
703      */
704     return vlen >> (vsew + 3 - lmul);
705 }
706 
707 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
708                           uint64_t *cs_base, uint32_t *pflags);
709 
710 void riscv_cpu_update_mask(CPURISCVState *env);
711 bool riscv_cpu_is_32bit(RISCVCPU *cpu);
712 
713 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
714                            target_ulong *ret_value,
715                            target_ulong new_value, target_ulong write_mask);
716 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
717                                  target_ulong *ret_value,
718                                  target_ulong new_value,
719                                  target_ulong write_mask);
720 
721 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
722                                    target_ulong val)
723 {
724     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
725 }
726 
727 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
728 {
729     target_ulong val = 0;
730     riscv_csrrw(env, csrno, &val, 0, 0);
731     return val;
732 }
733 
734 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
735                                                  int csrno);
736 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
737                                             target_ulong *ret_value);
738 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
739                                              target_ulong new_value);
740 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
741                                           target_ulong *ret_value,
742                                           target_ulong new_value,
743                                           target_ulong write_mask);
744 
745 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
746                                 Int128 *ret_value,
747                                 Int128 new_value, Int128 write_mask);
748 
749 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
750                                                Int128 *ret_value);
751 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
752                                              Int128 new_value);
753 
754 typedef struct {
755     const char *name;
756     riscv_csr_predicate_fn predicate;
757     riscv_csr_read_fn read;
758     riscv_csr_write_fn write;
759     riscv_csr_op_fn op;
760     riscv_csr_read128_fn read128;
761     riscv_csr_write128_fn write128;
762     /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
763     uint32_t min_priv_ver;
764 } riscv_csr_operations;
765 
766 /* CSR function table constants */
767 enum {
768     CSR_TABLE_SIZE = 0x1000
769 };
770 
771 /*
772  * The event id are encoded based on the encoding specified in the
773  * SBI specification v0.3
774  */
775 
776 enum riscv_pmu_event_idx {
777     RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
778     RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
779     RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
780     RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
781     RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
782 };
783 
784 /* used by tcg/tcg-cpu.c*/
785 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en);
786 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset);
787 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext);
788 bool riscv_cpu_is_vendor(Object *cpu_obj);
789 
790 typedef struct RISCVCPUMultiExtConfig {
791     const char *name;
792     uint32_t offset;
793     bool enabled;
794 } RISCVCPUMultiExtConfig;
795 
796 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
797 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
798 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
799 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
800 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
801 
802 typedef struct isa_ext_data {
803     const char *name;
804     int min_version;
805     int ext_enable_offset;
806 } RISCVIsaExtData;
807 extern const RISCVIsaExtData isa_edata_arr[];
808 char *riscv_cpu_get_name(RISCVCPU *cpu);
809 
810 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
811 void riscv_add_satp_mode_properties(Object *obj);
812 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu);
813 
814 /* CSR function table */
815 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
816 
817 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
818 
819 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
820 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
821 
822 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
823 
824 uint8_t satp_mode_max_from_map(uint32_t map);
825 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
826 
827 #endif /* RISCV_CPU_H */
828