xref: /qemu/target/riscv/cpu.h (revision 98f21c30)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef RISCV_CPU_H
21 #define RISCV_CPU_H
22 
23 #include "hw/core/cpu.h"
24 #include "hw/registerfields.h"
25 #include "hw/qdev-properties.h"
26 #include "exec/cpu-defs.h"
27 #include "exec/gdbstub.h"
28 #include "qemu/cpu-float.h"
29 #include "qom/object.h"
30 #include "qemu/int128.h"
31 #include "cpu_bits.h"
32 #include "cpu_cfg.h"
33 #include "qapi/qapi-types-common.h"
34 #include "cpu-qom.h"
35 
36 typedef struct CPUArchState CPURISCVState;
37 
38 #define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
39 
40 #if defined(TARGET_RISCV32)
41 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE32
42 #elif defined(TARGET_RISCV64)
43 # define TYPE_RISCV_CPU_BASE            TYPE_RISCV_CPU_BASE64
44 #endif
45 
46 /*
47  * RISC-V-specific extra insn start words:
48  * 1: Original instruction opcode
49  * 2: more information about instruction
50  */
51 #define TARGET_INSN_START_EXTRA_WORDS 2
52 /*
53  * b0: Whether a instruction always raise a store AMO or not.
54  */
55 #define RISCV_UW2_ALWAYS_STORE_AMO 1
56 
57 #define RV(x) ((target_ulong)1 << (x - 'A'))
58 
59 /*
60  * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[]
61  * when adding new MISA bits here.
62  */
63 #define RVI RV('I')
64 #define RVE RV('E') /* E and I are mutually exclusive */
65 #define RVM RV('M')
66 #define RVA RV('A')
67 #define RVF RV('F')
68 #define RVD RV('D')
69 #define RVV RV('V')
70 #define RVC RV('C')
71 #define RVS RV('S')
72 #define RVU RV('U')
73 #define RVH RV('H')
74 #define RVJ RV('J')
75 #define RVG RV('G')
76 #define RVB RV('B')
77 
78 extern const uint32_t misa_bits[];
79 const char *riscv_get_misa_ext_name(uint32_t bit);
80 const char *riscv_get_misa_ext_description(uint32_t bit);
81 
82 #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
83 
84 typedef struct riscv_cpu_profile {
85     struct riscv_cpu_profile *parent;
86     const char *name;
87     uint32_t misa_ext;
88     bool enabled;
89     bool user_set;
90     int priv_spec;
91     int satp_mode;
92     const int32_t ext_offsets[];
93 } RISCVCPUProfile;
94 
95 #define RISCV_PROFILE_EXT_LIST_END -1
96 #define RISCV_PROFILE_ATTR_UNUSED -1
97 
98 extern RISCVCPUProfile *riscv_profiles[];
99 
100 /* Privileged specification version */
101 #define PRIV_VER_1_10_0_STR "v1.10.0"
102 #define PRIV_VER_1_11_0_STR "v1.11.0"
103 #define PRIV_VER_1_12_0_STR "v1.12.0"
104 #define PRIV_VER_1_13_0_STR "v1.13.0"
105 enum {
106     PRIV_VERSION_1_10_0 = 0,
107     PRIV_VERSION_1_11_0,
108     PRIV_VERSION_1_12_0,
109     PRIV_VERSION_1_13_0,
110 
111     PRIV_VERSION_LATEST = PRIV_VERSION_1_13_0,
112 };
113 
114 #define VEXT_VERSION_1_00_0 0x00010000
115 #define VEXT_VER_1_00_0_STR "v1.0"
116 
117 enum {
118     TRANSLATE_SUCCESS,
119     TRANSLATE_FAIL,
120     TRANSLATE_PMP_FAIL,
121     TRANSLATE_G_STAGE_FAIL
122 };
123 
124 /* Extension context status */
125 typedef enum {
126     EXT_STATUS_DISABLED = 0,
127     EXT_STATUS_INITIAL,
128     EXT_STATUS_CLEAN,
129     EXT_STATUS_DIRTY,
130 } RISCVExtStatus;
131 
132 typedef struct riscv_cpu_implied_exts_rule {
133 #ifndef CONFIG_USER_ONLY
134     /*
135      * Bitmask indicates the rule enabled status for the harts.
136      * This enhancement is only available in system-mode QEMU,
137      * as we don't have a good way (e.g. mhartid) to distinguish
138      * the SMP cores in user-mode QEMU.
139      */
140     unsigned long *enabled;
141 #endif
142     /* True if this is a MISA implied rule. */
143     bool is_misa;
144     /* ext is MISA bit if is_misa flag is true, else multi extension offset. */
145     const uint32_t ext;
146     const uint32_t implied_misa_exts;
147     const uint32_t implied_multi_exts[];
148 } RISCVCPUImpliedExtsRule;
149 
150 extern RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[];
151 extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
152 
153 #define RISCV_IMPLIED_EXTS_RULE_END -1
154 
155 #define MMU_USER_IDX 3
156 
157 #define MAX_RISCV_PMPS (16)
158 
159 #if !defined(CONFIG_USER_ONLY)
160 #include "pmp.h"
161 #include "debug.h"
162 #endif
163 
164 #define RV_VLEN_MAX 1024
165 #define RV_MAX_MHPMEVENTS 32
166 #define RV_MAX_MHPMCOUNTERS 32
167 
168 FIELD(VTYPE, VLMUL, 0, 3)
169 FIELD(VTYPE, VSEW, 3, 3)
170 FIELD(VTYPE, VTA, 6, 1)
171 FIELD(VTYPE, VMA, 7, 1)
172 FIELD(VTYPE, VEDIV, 8, 2)
173 FIELD(VTYPE, RESERVED, 10, sizeof(target_ulong) * 8 - 11)
174 
175 typedef struct PMUCTRState {
176     /* Current value of a counter */
177     target_ulong mhpmcounter_val;
178     /* Current value of a counter in RV32 */
179     target_ulong mhpmcounterh_val;
180     /* Snapshot values of counter */
181     target_ulong mhpmcounter_prev;
182     /* Snapshort value of a counter in RV32 */
183     target_ulong mhpmcounterh_prev;
184     /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
185     target_ulong irq_overflow_left;
186 } PMUCTRState;
187 
188 typedef struct PMUFixedCtrState {
189         /* Track cycle and icount for each privilege mode */
190         uint64_t counter[4];
191         uint64_t counter_prev[4];
192         /* Track cycle and icount for each privilege mode when V = 1*/
193         uint64_t counter_virt[2];
194         uint64_t counter_virt_prev[2];
195 } PMUFixedCtrState;
196 
197 struct CPUArchState {
198     target_ulong gpr[32];
199     target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
200 
201     /* vector coprocessor state. */
202     uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16);
203     target_ulong vxrm;
204     target_ulong vxsat;
205     target_ulong vl;
206     target_ulong vstart;
207     target_ulong vtype;
208     bool vill;
209 
210     target_ulong pc;
211     target_ulong load_res;
212     target_ulong load_val;
213 
214     /* Floating-Point state */
215     uint64_t fpr[32]; /* assume both F and D extensions */
216     target_ulong frm;
217     float_status fp_status;
218 
219     target_ulong badaddr;
220     target_ulong bins;
221 
222     target_ulong guest_phys_fault_addr;
223 
224     target_ulong priv_ver;
225     target_ulong vext_ver;
226 
227     /* RISCVMXL, but uint32_t for vmstate migration */
228     uint32_t misa_mxl;      /* current mxl */
229     uint32_t misa_ext;      /* current extensions */
230     uint32_t misa_ext_mask; /* max ext for this cpu */
231     uint32_t xl;            /* current xlen */
232 
233     /* 128-bit helpers upper part return value */
234     target_ulong retxh;
235 
236     target_ulong jvt;
237 
238     /* elp state for zicfilp extension */
239     bool      elp;
240     /* shadow stack register for zicfiss extension */
241     target_ulong ssp;
242     /* env place holder for extra word 2 during unwind */
243     target_ulong excp_uw2;
244     /* sw check code for sw check exception */
245     target_ulong sw_check_code;
246 #ifdef CONFIG_USER_ONLY
247     uint32_t elf_flags;
248 #endif
249 
250     target_ulong priv;
251     /* CSRs for execution environment configuration */
252     uint64_t menvcfg;
253     target_ulong senvcfg;
254 
255 #ifndef CONFIG_USER_ONLY
256     /* This contains QEMU specific information about the virt state. */
257     bool virt_enabled;
258     target_ulong geilen;
259     uint64_t resetvec;
260 
261     target_ulong mhartid;
262     /*
263      * For RV32 this is 32-bit mstatus and 32-bit mstatush.
264      * For RV64 this is a 64-bit mstatus.
265      */
266     uint64_t mstatus;
267 
268     uint64_t mip;
269     /*
270      * MIP contains the software writable version of SEIP ORed with the
271      * external interrupt value. The MIP register is always up-to-date.
272      * To keep track of the current source, we also save booleans of the values
273      * here.
274      */
275     bool external_seip;
276     bool software_seip;
277 
278     uint64_t miclaim;
279 
280     uint64_t mie;
281     uint64_t mideleg;
282 
283     /*
284      * When mideleg[i]=0 and mvien[i]=1, sie[i] is no more
285      * alias of mie[i] and needs to be maintained separately.
286      */
287     uint64_t sie;
288 
289     /*
290      * When hideleg[i]=0 and hvien[i]=1, vsie[i] is no more
291      * alias of sie[i] (mie[i]) and needs to be maintained separately.
292      */
293     uint64_t vsie;
294 
295     target_ulong satp;   /* since: priv-1.10.0 */
296     target_ulong stval;
297     target_ulong medeleg;
298 
299     target_ulong stvec;
300     target_ulong sepc;
301     target_ulong scause;
302 
303     target_ulong mtvec;
304     target_ulong mepc;
305     target_ulong mcause;
306     target_ulong mtval;  /* since: priv-1.10.0 */
307 
308     /* Machine and Supervisor interrupt priorities */
309     uint8_t miprio[64];
310     uint8_t siprio[64];
311 
312     /* AIA CSRs */
313     target_ulong miselect;
314     target_ulong siselect;
315     uint64_t mvien;
316     uint64_t mvip;
317 
318     /* Hypervisor CSRs */
319     target_ulong hstatus;
320     target_ulong hedeleg;
321     uint64_t hideleg;
322     uint32_t hcounteren;
323     target_ulong htval;
324     target_ulong htinst;
325     target_ulong hgatp;
326     target_ulong hgeie;
327     target_ulong hgeip;
328     uint64_t htimedelta;
329     uint64_t hvien;
330 
331     /*
332      * Bits VSSIP, VSTIP and VSEIP in hvip are maintained in mip. Other bits
333      * from 0:12 are reserved. Bits 13:63 are not aliased and must be separately
334      * maintain in hvip.
335      */
336     uint64_t hvip;
337 
338     /* Hypervisor controlled virtual interrupt priorities */
339     target_ulong hvictl;
340     uint8_t hviprio[64];
341 
342     /* Upper 64-bits of 128-bit CSRs */
343     uint64_t mscratchh;
344     uint64_t sscratchh;
345 
346     /* Virtual CSRs */
347     /*
348      * For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
349      * For RV64 this is a 64-bit vsstatus.
350      */
351     uint64_t vsstatus;
352     target_ulong vstvec;
353     target_ulong vsscratch;
354     target_ulong vsepc;
355     target_ulong vscause;
356     target_ulong vstval;
357     target_ulong vsatp;
358 
359     /* AIA VS-mode CSRs */
360     target_ulong vsiselect;
361 
362     target_ulong mtval2;
363     target_ulong mtinst;
364 
365     /* HS Backup CSRs */
366     target_ulong stvec_hs;
367     target_ulong sscratch_hs;
368     target_ulong sepc_hs;
369     target_ulong scause_hs;
370     target_ulong stval_hs;
371     target_ulong satp_hs;
372     uint64_t mstatus_hs;
373 
374     /*
375      * Signals whether the current exception occurred with two-stage address
376      * translation active.
377      */
378     bool two_stage_lookup;
379     /*
380      * Signals whether the current exception occurred while doing two-stage
381      * address translation for the VS-stage page table walk.
382      */
383     bool two_stage_indirect_lookup;
384 
385     uint32_t scounteren;
386     uint32_t mcounteren;
387 
388     uint32_t mcountinhibit;
389 
390     /* PMU cycle & instret privilege mode filtering */
391     target_ulong mcyclecfg;
392     target_ulong mcyclecfgh;
393     target_ulong minstretcfg;
394     target_ulong minstretcfgh;
395 
396     /* PMU counter state */
397     PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
398 
399     /* PMU event selector configured values. First three are unused */
400     target_ulong mhpmevent_val[RV_MAX_MHPMEVENTS];
401 
402     /* PMU event selector configured values for RV32 */
403     target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
404 
405     PMUFixedCtrState pmu_fixed_ctrs[2];
406 
407     target_ulong sscratch;
408     target_ulong mscratch;
409 
410     /* Sstc CSRs */
411     uint64_t stimecmp;
412 
413     uint64_t vstimecmp;
414 
415     /* physical memory protection */
416     pmp_table_t pmp_state;
417     target_ulong mseccfg;
418 
419     /* trigger module */
420     target_ulong trigger_cur;
421     target_ulong tdata1[RV_MAX_TRIGGERS];
422     target_ulong tdata2[RV_MAX_TRIGGERS];
423     target_ulong tdata3[RV_MAX_TRIGGERS];
424     target_ulong mcontext;
425     struct CPUBreakpoint *cpu_breakpoint[RV_MAX_TRIGGERS];
426     struct CPUWatchpoint *cpu_watchpoint[RV_MAX_TRIGGERS];
427     QEMUTimer *itrigger_timer[RV_MAX_TRIGGERS];
428     int64_t last_icount;
429     bool itrigger_enabled;
430 
431     /* machine specific rdtime callback */
432     uint64_t (*rdtime_fn)(void *);
433     void *rdtime_fn_arg;
434 
435     /* machine specific AIA ireg read-modify-write callback */
436 #define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \
437     ((((__xlen) & 0xff) << 24) | \
438      (((__vgein) & 0x3f) << 20) | \
439      (((__virt) & 0x1) << 18) | \
440      (((__priv) & 0x3) << 16) | \
441      (__isel & 0xffff))
442 #define AIA_IREG_ISEL(__ireg)                  ((__ireg) & 0xffff)
443 #define AIA_IREG_PRIV(__ireg)                  (((__ireg) >> 16) & 0x3)
444 #define AIA_IREG_VIRT(__ireg)                  (((__ireg) >> 18) & 0x1)
445 #define AIA_IREG_VGEIN(__ireg)                 (((__ireg) >> 20) & 0x3f)
446 #define AIA_IREG_XLEN(__ireg)                  (((__ireg) >> 24) & 0xff)
447     int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg,
448         target_ulong *val, target_ulong new_val, target_ulong write_mask);
449     void *aia_ireg_rmw_fn_arg[4];
450 
451     /* True if in debugger mode.  */
452     bool debugger;
453 
454     /*
455      * CSRs for PointerMasking extension
456      */
457     target_ulong mmte;
458     target_ulong mpmmask;
459     target_ulong mpmbase;
460     target_ulong spmmask;
461     target_ulong spmbase;
462     target_ulong upmmask;
463     target_ulong upmbase;
464 
465     uint64_t mstateen[SMSTATEEN_MAX_COUNT];
466     uint64_t hstateen[SMSTATEEN_MAX_COUNT];
467     uint64_t sstateen[SMSTATEEN_MAX_COUNT];
468     uint64_t henvcfg;
469 #endif
470     target_ulong cur_pmmask;
471     target_ulong cur_pmbase;
472 
473     /* Fields from here on are preserved across CPU reset. */
474     QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
475     QEMUTimer *vstimer; /* Internal timer for VS-mode interrupt */
476     bool vstime_irq;
477 
478     hwaddr kernel_addr;
479     hwaddr fdt_addr;
480 
481 #ifdef CONFIG_KVM
482     /* kvm timer */
483     bool kvm_timer_dirty;
484     uint64_t kvm_timer_time;
485     uint64_t kvm_timer_compare;
486     uint64_t kvm_timer_state;
487     uint64_t kvm_timer_frequency;
488 #endif /* CONFIG_KVM */
489 };
490 
491 /*
492  * RISCVCPU:
493  * @env: #CPURISCVState
494  *
495  * A RISCV CPU.
496  */
497 struct ArchCPU {
498     CPUState parent_obj;
499 
500     CPURISCVState env;
501 
502     GDBFeature dyn_csr_feature;
503     GDBFeature dyn_vreg_feature;
504 
505     /* Configuration Settings */
506     RISCVCPUConfig cfg;
507 
508     QEMUTimer *pmu_timer;
509     /* A bitmask of Available programmable counters */
510     uint32_t pmu_avail_ctrs;
511     /* Mapping of events to counters */
512     GHashTable *pmu_event_ctr_map;
513     const GPtrArray *decoders;
514 };
515 
516 /**
517  * RISCVCPUClass:
518  * @parent_realize: The parent class' realize handler.
519  * @parent_phases: The parent class' reset phase handlers.
520  *
521  * A RISCV CPU model.
522  */
523 struct RISCVCPUClass {
524     CPUClass parent_class;
525 
526     DeviceRealize parent_realize;
527     ResettablePhases parent_phases;
528     uint32_t misa_mxl_max;  /* max mxl for this cpu */
529 };
530 
riscv_has_ext(CPURISCVState * env,target_ulong ext)531 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
532 {
533     return (env->misa_ext & ext) != 0;
534 }
535 
536 #include "cpu_user.h"
537 
538 extern const char * const riscv_int_regnames[];
539 extern const char * const riscv_int_regnamesh[];
540 extern const char * const riscv_fpr_regnames[];
541 
542 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
543 int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
544                                int cpuid, DumpState *s);
545 int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
546                                int cpuid, DumpState *s);
547 int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
548 int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
549 int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero);
550 uint8_t riscv_cpu_default_priority(int irq);
551 uint64_t riscv_cpu_all_pending(CPURISCVState *env);
552 int riscv_cpu_mirq_pending(CPURISCVState *env);
553 int riscv_cpu_sirq_pending(CPURISCVState *env);
554 int riscv_cpu_vsirq_pending(CPURISCVState *env);
555 bool riscv_cpu_fp_enabled(CPURISCVState *env);
556 target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
557 void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
558 bool riscv_cpu_vector_enabled(CPURISCVState *env);
559 void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
560 int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
561 bool cpu_get_fcfien(CPURISCVState *env);
562 bool cpu_get_bcfien(CPURISCVState *env);
563 G_NORETURN void  riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
564                                                MMUAccessType access_type,
565                                                int mmu_idx, uintptr_t retaddr);
566 bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
567                         MMUAccessType access_type, int mmu_idx,
568                         bool probe, uintptr_t retaddr);
569 char *riscv_isa_string(RISCVCPU *cpu);
570 int riscv_cpu_max_xlen(RISCVCPUClass *mcc);
571 bool riscv_cpu_option_set(const char *optname);
572 
573 #ifndef CONFIG_USER_ONLY
574 void riscv_cpu_do_interrupt(CPUState *cpu);
575 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename);
576 void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
577                                      vaddr addr, unsigned size,
578                                      MMUAccessType access_type,
579                                      int mmu_idx, MemTxAttrs attrs,
580                                      MemTxResult response, uintptr_t retaddr);
581 hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
582 bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
583 void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
584 int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
585 uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
586                               uint64_t value);
587 void riscv_cpu_interrupt(CPURISCVState *env);
588 #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
589 void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
590                              void *arg);
591 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
592                                    int (*rmw_fn)(void *arg,
593                                                  target_ulong reg,
594                                                  target_ulong *val,
595                                                  target_ulong new_val,
596                                                  target_ulong write_mask),
597                                    void *rmw_fn_arg);
598 
599 RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
600 #endif /* !CONFIG_USER_ONLY */
601 
602 void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
603 
604 void riscv_translate_init(void);
605 G_NORETURN void riscv_raise_exception(CPURISCVState *env,
606                                       uint32_t exception, uintptr_t pc);
607 
608 target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
609 void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
610 
611 #include "exec/cpu-all.h"
612 
613 FIELD(TB_FLAGS, MEM_IDX, 0, 3)
614 FIELD(TB_FLAGS, FS, 3, 2)
615 /* Vector flags */
616 FIELD(TB_FLAGS, VS, 5, 2)
617 FIELD(TB_FLAGS, LMUL, 7, 3)
618 FIELD(TB_FLAGS, SEW, 10, 3)
619 FIELD(TB_FLAGS, VL_EQ_VLMAX, 13, 1)
620 FIELD(TB_FLAGS, VILL, 14, 1)
621 FIELD(TB_FLAGS, VSTART_EQ_ZERO, 15, 1)
622 /* The combination of MXL/SXL/UXL that applies to the current cpu mode. */
623 FIELD(TB_FLAGS, XL, 16, 2)
624 /* If PointerMasking should be applied */
625 FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
626 FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
627 FIELD(TB_FLAGS, VTA, 20, 1)
628 FIELD(TB_FLAGS, VMA, 21, 1)
629 /* Native debug itrigger */
630 FIELD(TB_FLAGS, ITRIGGER, 22, 1)
631 /* Virtual mode enabled */
632 FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
633 FIELD(TB_FLAGS, PRIV, 24, 2)
634 FIELD(TB_FLAGS, AXL, 26, 2)
635 /* zicfilp needs a TB flag to track indirect branches */
636 FIELD(TB_FLAGS, FCFI_ENABLED, 28, 1)
637 FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 29, 1)
638 /* zicfiss needs a TB flag so that correct TB is located based on tb flags */
639 FIELD(TB_FLAGS, BCFI_ENABLED, 30, 1)
640 
641 #ifdef TARGET_RISCV32
642 #define riscv_cpu_mxl(env)  ((void)(env), MXL_RV32)
643 #else
644 static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env)
645 {
646     return env->misa_mxl;
647 }
648 #endif
649 #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env)))
650 
riscv_cpu_cfg(CPURISCVState * env)651 static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
652 {
653     return &env_archcpu(env)->cfg;
654 }
655 
656 #if !defined(CONFIG_USER_ONLY)
cpu_address_mode(CPURISCVState * env)657 static inline int cpu_address_mode(CPURISCVState *env)
658 {
659     int mode = env->priv;
660 
661     if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
662         mode = get_field(env->mstatus, MSTATUS_MPP);
663     }
664     return mode;
665 }
666 
cpu_get_xl(CPURISCVState * env,target_ulong mode)667 static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
668 {
669     RISCVMXL xl = env->misa_mxl;
670     /*
671      * When emulating a 32-bit-only cpu, use RV32.
672      * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
673      * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
674      * back to RV64 for lower privs.
675      */
676     if (xl != MXL_RV32) {
677         switch (mode) {
678         case PRV_M:
679             break;
680         case PRV_U:
681             xl = get_field(env->mstatus, MSTATUS64_UXL);
682             break;
683         default: /* PRV_S */
684             xl = get_field(env->mstatus, MSTATUS64_SXL);
685             break;
686         }
687     }
688     return xl;
689 }
690 #endif
691 
692 #if defined(TARGET_RISCV32)
693 #define cpu_recompute_xl(env)  ((void)(env), MXL_RV32)
694 #else
cpu_recompute_xl(CPURISCVState * env)695 static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
696 {
697 #if !defined(CONFIG_USER_ONLY)
698     return cpu_get_xl(env, env->priv);
699 #else
700     return env->misa_mxl;
701 #endif
702 }
703 #endif
704 
705 #if defined(TARGET_RISCV32)
706 #define cpu_address_xl(env)  ((void)(env), MXL_RV32)
707 #else
cpu_address_xl(CPURISCVState * env)708 static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
709 {
710 #ifdef CONFIG_USER_ONLY
711     return env->xl;
712 #else
713     int mode = cpu_address_mode(env);
714 
715     return cpu_get_xl(env, mode);
716 #endif
717 }
718 #endif
719 
riscv_cpu_xlen(CPURISCVState * env)720 static inline int riscv_cpu_xlen(CPURISCVState *env)
721 {
722     return 16 << env->xl;
723 }
724 
725 #ifdef TARGET_RISCV32
726 #define riscv_cpu_sxl(env)  ((void)(env), MXL_RV32)
727 #else
riscv_cpu_sxl(CPURISCVState * env)728 static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
729 {
730 #ifdef CONFIG_USER_ONLY
731     return env->misa_mxl;
732 #else
733     if (env->misa_mxl != MXL_RV32) {
734         return get_field(env->mstatus, MSTATUS64_SXL);
735     }
736 #endif
737     return MXL_RV32;
738 }
739 #endif
740 
741 /*
742  * Encode LMUL to lmul as follows:
743  *     LMUL    vlmul    lmul
744  *      1       000       0
745  *      2       001       1
746  *      4       010       2
747  *      8       011       3
748  *      -       100       -
749  *     1/8      101      -3
750  *     1/4      110      -2
751  *     1/2      111      -1
752  *
753  * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
754  * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
755  *      => VLMAX = vlen >> (1 + 3 - (-3))
756  *               = 256 >> 7
757  *               = 2
758  */
vext_get_vlmax(uint32_t vlenb,uint32_t vsew,int8_t lmul)759 static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
760                                       int8_t lmul)
761 {
762     uint32_t vlen = vlenb << 3;
763 
764     /*
765      * We need to use 'vlen' instead of 'vlenb' to
766      * preserve the '+ 3' in the formula. Otherwise
767      * we risk a negative shift if vsew < lmul.
768      */
769     return vlen >> (vsew + 3 - lmul);
770 }
771 
772 void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
773                           uint64_t *cs_base, uint32_t *pflags);
774 
775 void riscv_cpu_update_mask(CPURISCVState *env);
776 bool riscv_cpu_is_32bit(RISCVCPU *cpu);
777 
778 RISCVException riscv_csrr(CPURISCVState *env, int csrno,
779                           target_ulong *ret_value);
780 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
781                            target_ulong *ret_value,
782                            target_ulong new_value, target_ulong write_mask);
783 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
784                                  target_ulong *ret_value,
785                                  target_ulong new_value,
786                                  target_ulong write_mask);
787 
riscv_csr_write(CPURISCVState * env,int csrno,target_ulong val)788 static inline void riscv_csr_write(CPURISCVState *env, int csrno,
789                                    target_ulong val)
790 {
791     riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS));
792 }
793 
riscv_csr_read(CPURISCVState * env,int csrno)794 static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno)
795 {
796     target_ulong val = 0;
797     riscv_csrrw(env, csrno, &val, 0, 0);
798     return val;
799 }
800 
801 typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env,
802                                                  int csrno);
803 typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno,
804                                             target_ulong *ret_value);
805 typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno,
806                                              target_ulong new_value);
807 typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
808                                           target_ulong *ret_value,
809                                           target_ulong new_value,
810                                           target_ulong write_mask);
811 
812 RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
813                                Int128 *ret_value);
814 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
815                                 Int128 *ret_value,
816                                 Int128 new_value, Int128 write_mask);
817 
818 typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
819                                                Int128 *ret_value);
820 typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
821                                              Int128 new_value);
822 
823 typedef struct {
824     const char *name;
825     riscv_csr_predicate_fn predicate;
826     riscv_csr_read_fn read;
827     riscv_csr_write_fn write;
828     riscv_csr_op_fn op;
829     riscv_csr_read128_fn read128;
830     riscv_csr_write128_fn write128;
831     /* The default priv spec version should be PRIV_VERSION_1_10_0 (i.e 0) */
832     uint32_t min_priv_ver;
833 } riscv_csr_operations;
834 
835 /* CSR function table constants */
836 enum {
837     CSR_TABLE_SIZE = 0x1000
838 };
839 
840 /*
841  * The event id are encoded based on the encoding specified in the
842  * SBI specification v0.3
843  */
844 
845 enum riscv_pmu_event_idx {
846     RISCV_PMU_EVENT_HW_CPU_CYCLES = 0x01,
847     RISCV_PMU_EVENT_HW_INSTRUCTIONS = 0x02,
848     RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS = 0x10019,
849     RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS = 0x1001B,
850     RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS = 0x10021,
851 };
852 
853 /* used by tcg/tcg-cpu.c*/
854 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en);
855 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset);
856 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext);
857 bool riscv_cpu_is_vendor(Object *cpu_obj);
858 
859 typedef struct RISCVCPUMultiExtConfig {
860     const char *name;
861     uint32_t offset;
862     bool enabled;
863 } RISCVCPUMultiExtConfig;
864 
865 extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[];
866 extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[];
867 extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[];
868 extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[];
869 extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[];
870 
871 typedef struct isa_ext_data {
872     const char *name;
873     int min_version;
874     int ext_enable_offset;
875 } RISCVIsaExtData;
876 extern const RISCVIsaExtData isa_edata_arr[];
877 char *riscv_cpu_get_name(RISCVCPU *cpu);
878 
879 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
880 void riscv_add_satp_mode_properties(Object *obj);
881 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu);
882 
883 /* CSR function table */
884 extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE];
885 
886 extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[];
887 
888 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops);
889 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
890 
891 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
892 
893 target_ulong riscv_new_csr_seed(target_ulong new_value,
894                                 target_ulong write_mask);
895 
896 uint8_t satp_mode_max_from_map(uint32_t map);
897 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
898 
899 /* Implemented in th_csr.c */
900 void th_register_custom_csrs(RISCVCPU *cpu);
901 
902 const char *priv_spec_to_str(int priv_version);
903 #endif /* RISCV_CPU_H */
904