xref: /qemu/target/riscv/cpu.c (revision e995d5cc)
1 /*
2  * QEMU RISC-V CPU
3  *
4  * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5  * Copyright (c) 2017-2018 SiFive, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2 or later, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "pmu.h"
27 #include "internals.h"
28 #include "time_helper.h"
29 #include "exec/exec-all.h"
30 #include "qapi/error.h"
31 #include "qemu/error-report.h"
32 #include "hw/qdev-properties.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/kvm.h"
36 #include "kvm_riscv.h"
37 #include "tcg/tcg.h"
38 
39 /* RISC-V CPU definitions */
40 
41 #define RISCV_CPU_MARCHID   ((QEMU_VERSION_MAJOR << 16) | \
42                              (QEMU_VERSION_MINOR << 8)  | \
43                              (QEMU_VERSION_MICRO))
44 #define RISCV_CPU_MIMPID    RISCV_CPU_MARCHID
45 
46 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH";
47 
48 struct isa_ext_data {
49     const char *name;
50     bool multi_letter;
51     int min_version;
52     int ext_enable_offset;
53 };
54 
55 #define ISA_EXT_DATA_ENTRY(_name, _m_letter, _min_ver, _prop) \
56 {#_name, _m_letter, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
57 
58 /**
59  * Here are the ordering rules of extension naming defined by RISC-V
60  * specification :
61  * 1. All extensions should be separated from other multi-letter extensions
62  *    by an underscore.
63  * 2. The first letter following the 'Z' conventionally indicates the most
64  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
65  *    If multiple 'Z' extensions are named, they should be ordered first
66  *    by category, then alphabetically within a category.
67  * 3. Standard supervisor-level extensions (starts with 'S') should be
68  *    listed after standard unprivileged extensions.  If multiple
69  *    supervisor-level extensions are listed, they should be ordered
70  *    alphabetically.
71  * 4. Non-standard extensions (starts with 'X') must be listed after all
72  *    standard extensions. They must be separated from other multi-letter
73  *    extensions by an underscore.
74  */
75 static const struct isa_ext_data isa_edata_arr[] = {
76     ISA_EXT_DATA_ENTRY(h, false, PRIV_VERSION_1_12_0, ext_h),
77     ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_10_0, ext_v),
78     ISA_EXT_DATA_ENTRY(zicond, true, PRIV_VERSION_1_12_0, ext_zicond),
79     ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr),
80     ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei),
81     ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, ext_zihintpause),
82     ISA_EXT_DATA_ENTRY(zawrs, true, PRIV_VERSION_1_12_0, ext_zawrs),
83     ISA_EXT_DATA_ENTRY(zfh, true, PRIV_VERSION_1_11_0, ext_zfh),
84     ISA_EXT_DATA_ENTRY(zfhmin, true, PRIV_VERSION_1_12_0, ext_zfhmin),
85     ISA_EXT_DATA_ENTRY(zfinx, true, PRIV_VERSION_1_12_0, ext_zfinx),
86     ISA_EXT_DATA_ENTRY(zdinx, true, PRIV_VERSION_1_12_0, ext_zdinx),
87     ISA_EXT_DATA_ENTRY(zba, true, PRIV_VERSION_1_12_0, ext_zba),
88     ISA_EXT_DATA_ENTRY(zbb, true, PRIV_VERSION_1_12_0, ext_zbb),
89     ISA_EXT_DATA_ENTRY(zbc, true, PRIV_VERSION_1_12_0, ext_zbc),
90     ISA_EXT_DATA_ENTRY(zbkb, true, PRIV_VERSION_1_12_0, ext_zbkb),
91     ISA_EXT_DATA_ENTRY(zbkc, true, PRIV_VERSION_1_12_0, ext_zbkc),
92     ISA_EXT_DATA_ENTRY(zbkx, true, PRIV_VERSION_1_12_0, ext_zbkx),
93     ISA_EXT_DATA_ENTRY(zbs, true, PRIV_VERSION_1_12_0, ext_zbs),
94     ISA_EXT_DATA_ENTRY(zk, true, PRIV_VERSION_1_12_0, ext_zk),
95     ISA_EXT_DATA_ENTRY(zkn, true, PRIV_VERSION_1_12_0, ext_zkn),
96     ISA_EXT_DATA_ENTRY(zknd, true, PRIV_VERSION_1_12_0, ext_zknd),
97     ISA_EXT_DATA_ENTRY(zkne, true, PRIV_VERSION_1_12_0, ext_zkne),
98     ISA_EXT_DATA_ENTRY(zknh, true, PRIV_VERSION_1_12_0, ext_zknh),
99     ISA_EXT_DATA_ENTRY(zkr, true, PRIV_VERSION_1_12_0, ext_zkr),
100     ISA_EXT_DATA_ENTRY(zks, true, PRIV_VERSION_1_12_0, ext_zks),
101     ISA_EXT_DATA_ENTRY(zksed, true, PRIV_VERSION_1_12_0, ext_zksed),
102     ISA_EXT_DATA_ENTRY(zksh, true, PRIV_VERSION_1_12_0, ext_zksh),
103     ISA_EXT_DATA_ENTRY(zkt, true, PRIV_VERSION_1_12_0, ext_zkt),
104     ISA_EXT_DATA_ENTRY(zve32f, true, PRIV_VERSION_1_12_0, ext_zve32f),
105     ISA_EXT_DATA_ENTRY(zve64f, true, PRIV_VERSION_1_12_0, ext_zve64f),
106     ISA_EXT_DATA_ENTRY(zve64d, true, PRIV_VERSION_1_12_0, ext_zve64d),
107     ISA_EXT_DATA_ENTRY(zvfh, true, PRIV_VERSION_1_12_0, ext_zvfh),
108     ISA_EXT_DATA_ENTRY(zvfhmin, true, PRIV_VERSION_1_12_0, ext_zvfhmin),
109     ISA_EXT_DATA_ENTRY(zhinx, true, PRIV_VERSION_1_12_0, ext_zhinx),
110     ISA_EXT_DATA_ENTRY(zhinxmin, true, PRIV_VERSION_1_12_0, ext_zhinxmin),
111     ISA_EXT_DATA_ENTRY(smaia, true, PRIV_VERSION_1_12_0, ext_smaia),
112     ISA_EXT_DATA_ENTRY(ssaia, true, PRIV_VERSION_1_12_0, ext_ssaia),
113     ISA_EXT_DATA_ENTRY(sscofpmf, true, PRIV_VERSION_1_12_0, ext_sscofpmf),
114     ISA_EXT_DATA_ENTRY(sstc, true, PRIV_VERSION_1_12_0, ext_sstc),
115     ISA_EXT_DATA_ENTRY(svadu, true, PRIV_VERSION_1_12_0, ext_svadu),
116     ISA_EXT_DATA_ENTRY(svinval, true, PRIV_VERSION_1_12_0, ext_svinval),
117     ISA_EXT_DATA_ENTRY(svnapot, true, PRIV_VERSION_1_12_0, ext_svnapot),
118     ISA_EXT_DATA_ENTRY(svpbmt, true, PRIV_VERSION_1_12_0, ext_svpbmt),
119     ISA_EXT_DATA_ENTRY(xtheadba, true, PRIV_VERSION_1_11_0, ext_xtheadba),
120     ISA_EXT_DATA_ENTRY(xtheadbb, true, PRIV_VERSION_1_11_0, ext_xtheadbb),
121     ISA_EXT_DATA_ENTRY(xtheadbs, true, PRIV_VERSION_1_11_0, ext_xtheadbs),
122     ISA_EXT_DATA_ENTRY(xtheadcmo, true, PRIV_VERSION_1_11_0, ext_xtheadcmo),
123     ISA_EXT_DATA_ENTRY(xtheadcondmov, true, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
124     ISA_EXT_DATA_ENTRY(xtheadfmemidx, true, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
125     ISA_EXT_DATA_ENTRY(xtheadfmv, true, PRIV_VERSION_1_11_0, ext_xtheadfmv),
126     ISA_EXT_DATA_ENTRY(xtheadmac, true, PRIV_VERSION_1_11_0, ext_xtheadmac),
127     ISA_EXT_DATA_ENTRY(xtheadmemidx, true, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
128     ISA_EXT_DATA_ENTRY(xtheadmempair, true, PRIV_VERSION_1_11_0, ext_xtheadmempair),
129     ISA_EXT_DATA_ENTRY(xtheadsync, true, PRIV_VERSION_1_11_0, ext_xtheadsync),
130     ISA_EXT_DATA_ENTRY(xventanacondops, true, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
131 };
132 
133 static bool isa_ext_is_enabled(RISCVCPU *cpu,
134                                const struct isa_ext_data *edata)
135 {
136     bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
137 
138     return *ext_enabled;
139 }
140 
141 static void isa_ext_update_enabled(RISCVCPU *cpu,
142                                    const struct isa_ext_data *edata, bool en)
143 {
144     bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
145 
146     *ext_enabled = en;
147 }
148 
149 const char * const riscv_int_regnames[] = {
150   "x0/zero", "x1/ra",  "x2/sp",  "x3/gp",  "x4/tp",  "x5/t0",   "x6/t1",
151   "x7/t2",   "x8/s0",  "x9/s1",  "x10/a0", "x11/a1", "x12/a2",  "x13/a3",
152   "x14/a4",  "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3",  "x20/s4",
153   "x21/s5",  "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
154   "x28/t3",  "x29/t4", "x30/t5", "x31/t6"
155 };
156 
157 const char * const riscv_int_regnamesh[] = {
158   "x0h/zeroh", "x1h/rah",  "x2h/sph",   "x3h/gph",   "x4h/tph",  "x5h/t0h",
159   "x6h/t1h",   "x7h/t2h",  "x8h/s0h",   "x9h/s1h",   "x10h/a0h", "x11h/a1h",
160   "x12h/a2h",  "x13h/a3h", "x14h/a4h",  "x15h/a5h",  "x16h/a6h", "x17h/a7h",
161   "x18h/s2h",  "x19h/s3h", "x20h/s4h",  "x21h/s5h",  "x22h/s6h", "x23h/s7h",
162   "x24h/s8h",  "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
163   "x30h/t5h",  "x31h/t6h"
164 };
165 
166 const char * const riscv_fpr_regnames[] = {
167   "f0/ft0",   "f1/ft1",  "f2/ft2",   "f3/ft3",   "f4/ft4",  "f5/ft5",
168   "f6/ft6",   "f7/ft7",  "f8/fs0",   "f9/fs1",   "f10/fa0", "f11/fa1",
169   "f12/fa2",  "f13/fa3", "f14/fa4",  "f15/fa5",  "f16/fa6", "f17/fa7",
170   "f18/fs2",  "f19/fs3", "f20/fs4",  "f21/fs5",  "f22/fs6", "f23/fs7",
171   "f24/fs8",  "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
172   "f30/ft10", "f31/ft11"
173 };
174 
175 static const char * const riscv_excp_names[] = {
176     "misaligned_fetch",
177     "fault_fetch",
178     "illegal_instruction",
179     "breakpoint",
180     "misaligned_load",
181     "fault_load",
182     "misaligned_store",
183     "fault_store",
184     "user_ecall",
185     "supervisor_ecall",
186     "hypervisor_ecall",
187     "machine_ecall",
188     "exec_page_fault",
189     "load_page_fault",
190     "reserved",
191     "store_page_fault",
192     "reserved",
193     "reserved",
194     "reserved",
195     "reserved",
196     "guest_exec_page_fault",
197     "guest_load_page_fault",
198     "reserved",
199     "guest_store_page_fault",
200 };
201 
202 static const char * const riscv_intr_names[] = {
203     "u_software",
204     "s_software",
205     "vs_software",
206     "m_software",
207     "u_timer",
208     "s_timer",
209     "vs_timer",
210     "m_timer",
211     "u_external",
212     "s_external",
213     "vs_external",
214     "m_external",
215     "reserved",
216     "reserved",
217     "reserved",
218     "reserved"
219 };
220 
221 static void register_cpu_props(DeviceState *dev);
222 
223 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
224 {
225     if (async) {
226         return (cause < ARRAY_SIZE(riscv_intr_names)) ?
227                riscv_intr_names[cause] : "(unknown)";
228     } else {
229         return (cause < ARRAY_SIZE(riscv_excp_names)) ?
230                riscv_excp_names[cause] : "(unknown)";
231     }
232 }
233 
234 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
235 {
236     env->misa_mxl_max = env->misa_mxl = mxl;
237     env->misa_ext_mask = env->misa_ext = ext;
238 }
239 
240 static void set_priv_version(CPURISCVState *env, int priv_ver)
241 {
242     env->priv_ver = priv_ver;
243 }
244 
245 static void set_vext_version(CPURISCVState *env, int vext_ver)
246 {
247     env->vext_ver = vext_ver;
248 }
249 
250 static void riscv_any_cpu_init(Object *obj)
251 {
252     CPURISCVState *env = &RISCV_CPU(obj)->env;
253 #if defined(TARGET_RISCV32)
254     set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
255 #elif defined(TARGET_RISCV64)
256     set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
257 #endif
258     set_priv_version(env, PRIV_VERSION_1_12_0);
259     register_cpu_props(DEVICE(obj));
260 }
261 
262 #if defined(TARGET_RISCV64)
263 static void rv64_base_cpu_init(Object *obj)
264 {
265     CPURISCVState *env = &RISCV_CPU(obj)->env;
266     /* We set this in the realise function */
267     set_misa(env, MXL_RV64, 0);
268     register_cpu_props(DEVICE(obj));
269     /* Set latest version of privileged specification */
270     set_priv_version(env, PRIV_VERSION_1_12_0);
271 }
272 
273 static void rv64_sifive_u_cpu_init(Object *obj)
274 {
275     CPURISCVState *env = &RISCV_CPU(obj)->env;
276     set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
277     register_cpu_props(DEVICE(obj));
278     set_priv_version(env, PRIV_VERSION_1_10_0);
279 }
280 
281 static void rv64_sifive_e_cpu_init(Object *obj)
282 {
283     CPURISCVState *env = &RISCV_CPU(obj)->env;
284     RISCVCPU *cpu = RISCV_CPU(obj);
285 
286     set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
287     register_cpu_props(DEVICE(obj));
288     set_priv_version(env, PRIV_VERSION_1_10_0);
289     cpu->cfg.mmu = false;
290 }
291 
292 static void rv64_thead_c906_cpu_init(Object *obj)
293 {
294     CPURISCVState *env = &RISCV_CPU(obj)->env;
295     RISCVCPU *cpu = RISCV_CPU(obj);
296 
297     set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
298     set_priv_version(env, PRIV_VERSION_1_11_0);
299 
300     cpu->cfg.ext_g = true;
301     cpu->cfg.ext_c = true;
302     cpu->cfg.ext_u = true;
303     cpu->cfg.ext_s = true;
304     cpu->cfg.ext_icsr = true;
305     cpu->cfg.ext_zfh = true;
306     cpu->cfg.mmu = true;
307     cpu->cfg.ext_xtheadba = true;
308     cpu->cfg.ext_xtheadbb = true;
309     cpu->cfg.ext_xtheadbs = true;
310     cpu->cfg.ext_xtheadcmo = true;
311     cpu->cfg.ext_xtheadcondmov = true;
312     cpu->cfg.ext_xtheadfmemidx = true;
313     cpu->cfg.ext_xtheadmac = true;
314     cpu->cfg.ext_xtheadmemidx = true;
315     cpu->cfg.ext_xtheadmempair = true;
316     cpu->cfg.ext_xtheadsync = true;
317 
318     cpu->cfg.mvendorid = THEAD_VENDOR_ID;
319 }
320 
321 static void rv128_base_cpu_init(Object *obj)
322 {
323     if (qemu_tcg_mttcg_enabled()) {
324         /* Missing 128-bit aligned atomics */
325         error_report("128-bit RISC-V currently does not work with Multi "
326                      "Threaded TCG. Please use: -accel tcg,thread=single");
327         exit(EXIT_FAILURE);
328     }
329     CPURISCVState *env = &RISCV_CPU(obj)->env;
330     /* We set this in the realise function */
331     set_misa(env, MXL_RV128, 0);
332     register_cpu_props(DEVICE(obj));
333     /* Set latest version of privileged specification */
334     set_priv_version(env, PRIV_VERSION_1_12_0);
335 }
336 #else
337 static void rv32_base_cpu_init(Object *obj)
338 {
339     CPURISCVState *env = &RISCV_CPU(obj)->env;
340     /* We set this in the realise function */
341     set_misa(env, MXL_RV32, 0);
342     register_cpu_props(DEVICE(obj));
343     /* Set latest version of privileged specification */
344     set_priv_version(env, PRIV_VERSION_1_12_0);
345 }
346 
347 static void rv32_sifive_u_cpu_init(Object *obj)
348 {
349     CPURISCVState *env = &RISCV_CPU(obj)->env;
350     set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
351     register_cpu_props(DEVICE(obj));
352     set_priv_version(env, PRIV_VERSION_1_10_0);
353 }
354 
355 static void rv32_sifive_e_cpu_init(Object *obj)
356 {
357     CPURISCVState *env = &RISCV_CPU(obj)->env;
358     RISCVCPU *cpu = RISCV_CPU(obj);
359 
360     set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
361     register_cpu_props(DEVICE(obj));
362     set_priv_version(env, PRIV_VERSION_1_10_0);
363     cpu->cfg.mmu = false;
364 }
365 
366 static void rv32_ibex_cpu_init(Object *obj)
367 {
368     CPURISCVState *env = &RISCV_CPU(obj)->env;
369     RISCVCPU *cpu = RISCV_CPU(obj);
370 
371     set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
372     register_cpu_props(DEVICE(obj));
373     set_priv_version(env, PRIV_VERSION_1_11_0);
374     cpu->cfg.mmu = false;
375     cpu->cfg.epmp = true;
376 }
377 
378 static void rv32_imafcu_nommu_cpu_init(Object *obj)
379 {
380     CPURISCVState *env = &RISCV_CPU(obj)->env;
381     RISCVCPU *cpu = RISCV_CPU(obj);
382 
383     set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
384     register_cpu_props(DEVICE(obj));
385     set_priv_version(env, PRIV_VERSION_1_10_0);
386     cpu->cfg.mmu = false;
387 }
388 #endif
389 
390 #if defined(CONFIG_KVM)
391 static void riscv_host_cpu_init(Object *obj)
392 {
393     CPURISCVState *env = &RISCV_CPU(obj)->env;
394 #if defined(TARGET_RISCV32)
395     set_misa(env, MXL_RV32, 0);
396 #elif defined(TARGET_RISCV64)
397     set_misa(env, MXL_RV64, 0);
398 #endif
399     register_cpu_props(DEVICE(obj));
400 }
401 #endif
402 
403 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
404 {
405     ObjectClass *oc;
406     char *typename;
407     char **cpuname;
408 
409     cpuname = g_strsplit(cpu_model, ",", 1);
410     typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
411     oc = object_class_by_name(typename);
412     g_strfreev(cpuname);
413     g_free(typename);
414     if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) ||
415         object_class_is_abstract(oc)) {
416         return NULL;
417     }
418     return oc;
419 }
420 
421 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
422 {
423     RISCVCPU *cpu = RISCV_CPU(cs);
424     CPURISCVState *env = &cpu->env;
425     int i;
426 
427 #if !defined(CONFIG_USER_ONLY)
428     if (riscv_has_ext(env, RVH)) {
429         qemu_fprintf(f, " %s %d\n", "V      =  ", riscv_cpu_virt_enabled(env));
430     }
431 #endif
432     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc      ", env->pc);
433 #ifndef CONFIG_USER_ONLY
434     {
435         static const int dump_csrs[] = {
436             CSR_MHARTID,
437             CSR_MSTATUS,
438             CSR_MSTATUSH,
439             /*
440              * CSR_SSTATUS is intentionally omitted here as its value
441              * can be figured out by looking at CSR_MSTATUS
442              */
443             CSR_HSTATUS,
444             CSR_VSSTATUS,
445             CSR_MIP,
446             CSR_MIE,
447             CSR_MIDELEG,
448             CSR_HIDELEG,
449             CSR_MEDELEG,
450             CSR_HEDELEG,
451             CSR_MTVEC,
452             CSR_STVEC,
453             CSR_VSTVEC,
454             CSR_MEPC,
455             CSR_SEPC,
456             CSR_VSEPC,
457             CSR_MCAUSE,
458             CSR_SCAUSE,
459             CSR_VSCAUSE,
460             CSR_MTVAL,
461             CSR_STVAL,
462             CSR_HTVAL,
463             CSR_MTVAL2,
464             CSR_MSCRATCH,
465             CSR_SSCRATCH,
466             CSR_SATP,
467             CSR_MMTE,
468             CSR_UPMBASE,
469             CSR_UPMMASK,
470             CSR_SPMBASE,
471             CSR_SPMMASK,
472             CSR_MPMBASE,
473             CSR_MPMMASK,
474         };
475 
476         for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
477             int csrno = dump_csrs[i];
478             target_ulong val = 0;
479             RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
480 
481             /*
482              * Rely on the smode, hmode, etc, predicates within csr.c
483              * to do the filtering of the registers that are present.
484              */
485             if (res == RISCV_EXCP_NONE) {
486                 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
487                              csr_ops[csrno].name, val);
488             }
489         }
490     }
491 #endif
492 
493     for (i = 0; i < 32; i++) {
494         qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
495                      riscv_int_regnames[i], env->gpr[i]);
496         if ((i & 3) == 3) {
497             qemu_fprintf(f, "\n");
498         }
499     }
500     if (flags & CPU_DUMP_FPU) {
501         for (i = 0; i < 32; i++) {
502             qemu_fprintf(f, " %-8s %016" PRIx64,
503                          riscv_fpr_regnames[i], env->fpr[i]);
504             if ((i & 3) == 3) {
505                 qemu_fprintf(f, "\n");
506             }
507         }
508     }
509 }
510 
511 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
512 {
513     RISCVCPU *cpu = RISCV_CPU(cs);
514     CPURISCVState *env = &cpu->env;
515 
516     if (env->xl == MXL_RV32) {
517         env->pc = (int32_t)value;
518     } else {
519         env->pc = value;
520     }
521 }
522 
523 static vaddr riscv_cpu_get_pc(CPUState *cs)
524 {
525     RISCVCPU *cpu = RISCV_CPU(cs);
526     CPURISCVState *env = &cpu->env;
527 
528     /* Match cpu_get_tb_cpu_state. */
529     if (env->xl == MXL_RV32) {
530         return env->pc & UINT32_MAX;
531     }
532     return env->pc;
533 }
534 
535 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
536                                           const TranslationBlock *tb)
537 {
538     RISCVCPU *cpu = RISCV_CPU(cs);
539     CPURISCVState *env = &cpu->env;
540     RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
541 
542     tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
543 
544     if (xl == MXL_RV32) {
545         env->pc = (int32_t) tb->pc;
546     } else {
547         env->pc = tb->pc;
548     }
549 }
550 
551 static bool riscv_cpu_has_work(CPUState *cs)
552 {
553 #ifndef CONFIG_USER_ONLY
554     RISCVCPU *cpu = RISCV_CPU(cs);
555     CPURISCVState *env = &cpu->env;
556     /*
557      * Definition of the WFI instruction requires it to ignore the privilege
558      * mode and delegation registers, but respect individual enables
559      */
560     return riscv_cpu_all_pending(env) != 0;
561 #else
562     return true;
563 #endif
564 }
565 
566 static void riscv_restore_state_to_opc(CPUState *cs,
567                                        const TranslationBlock *tb,
568                                        const uint64_t *data)
569 {
570     RISCVCPU *cpu = RISCV_CPU(cs);
571     CPURISCVState *env = &cpu->env;
572     RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
573 
574     if (xl == MXL_RV32) {
575         env->pc = (int32_t)data[0];
576     } else {
577         env->pc = data[0];
578     }
579     env->bins = data[1];
580 }
581 
582 static void riscv_cpu_reset_hold(Object *obj)
583 {
584 #ifndef CONFIG_USER_ONLY
585     uint8_t iprio;
586     int i, irq, rdzero;
587 #endif
588     CPUState *cs = CPU(obj);
589     RISCVCPU *cpu = RISCV_CPU(cs);
590     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
591     CPURISCVState *env = &cpu->env;
592 
593     if (mcc->parent_phases.hold) {
594         mcc->parent_phases.hold(obj);
595     }
596 #ifndef CONFIG_USER_ONLY
597     env->misa_mxl = env->misa_mxl_max;
598     env->priv = PRV_M;
599     env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
600     if (env->misa_mxl > MXL_RV32) {
601         /*
602          * The reset status of SXL/UXL is undefined, but mstatus is WARL
603          * and we must ensure that the value after init is valid for read.
604          */
605         env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
606         env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
607         if (riscv_has_ext(env, RVH)) {
608             env->vsstatus = set_field(env->vsstatus,
609                                       MSTATUS64_SXL, env->misa_mxl);
610             env->vsstatus = set_field(env->vsstatus,
611                                       MSTATUS64_UXL, env->misa_mxl);
612             env->mstatus_hs = set_field(env->mstatus_hs,
613                                         MSTATUS64_SXL, env->misa_mxl);
614             env->mstatus_hs = set_field(env->mstatus_hs,
615                                         MSTATUS64_UXL, env->misa_mxl);
616         }
617     }
618     env->mcause = 0;
619     env->miclaim = MIP_SGEIP;
620     env->pc = env->resetvec;
621     env->bins = 0;
622     env->two_stage_lookup = false;
623 
624     env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
625                    (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0);
626     env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
627                    (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0);
628 
629     /* Initialized default priorities of local interrupts. */
630     for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
631         iprio = riscv_cpu_default_priority(i);
632         env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
633         env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
634         env->hviprio[i] = 0;
635     }
636     i = 0;
637     while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
638         if (!rdzero) {
639             env->hviprio[irq] = env->miprio[irq];
640         }
641         i++;
642     }
643     /* mmte is supposed to have pm.current hardwired to 1 */
644     env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT);
645 #endif
646     env->xl = riscv_cpu_mxl(env);
647     riscv_cpu_update_mask(env);
648     cs->exception_index = RISCV_EXCP_NONE;
649     env->load_res = -1;
650     set_default_nan_mode(1, &env->fp_status);
651 
652 #ifndef CONFIG_USER_ONLY
653     if (cpu->cfg.debug) {
654         riscv_trigger_init(env);
655     }
656 
657     if (kvm_enabled()) {
658         kvm_riscv_reset_vcpu(cpu);
659     }
660 #endif
661 }
662 
663 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
664 {
665     RISCVCPU *cpu = RISCV_CPU(s);
666 
667     switch (riscv_cpu_mxl(&cpu->env)) {
668     case MXL_RV32:
669         info->print_insn = print_insn_riscv32;
670         break;
671     case MXL_RV64:
672         info->print_insn = print_insn_riscv64;
673         break;
674     case MXL_RV128:
675         info->print_insn = print_insn_riscv128;
676         break;
677     default:
678         g_assert_not_reached();
679     }
680 }
681 
682 /*
683  * Check consistency between chosen extensions while setting
684  * cpu->cfg accordingly, doing a set_misa() in the end.
685  */
686 static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
687 {
688     CPURISCVState *env = &cpu->env;
689     uint32_t ext = 0;
690 
691     /* Do some ISA extension error checking */
692     if (cpu->cfg.ext_g && !(cpu->cfg.ext_i && cpu->cfg.ext_m &&
693                             cpu->cfg.ext_a && cpu->cfg.ext_f &&
694                             cpu->cfg.ext_d &&
695                             cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) {
696         warn_report("Setting G will also set IMAFD_Zicsr_Zifencei");
697         cpu->cfg.ext_i = true;
698         cpu->cfg.ext_m = true;
699         cpu->cfg.ext_a = true;
700         cpu->cfg.ext_f = true;
701         cpu->cfg.ext_d = true;
702         cpu->cfg.ext_icsr = true;
703         cpu->cfg.ext_ifencei = true;
704     }
705 
706     if (cpu->cfg.ext_i && cpu->cfg.ext_e) {
707         error_setg(errp,
708                    "I and E extensions are incompatible");
709         return;
710     }
711 
712     if (!cpu->cfg.ext_i && !cpu->cfg.ext_e) {
713         error_setg(errp,
714                    "Either I or E extension must be set");
715         return;
716     }
717 
718     if (cpu->cfg.ext_s && !cpu->cfg.ext_u) {
719         error_setg(errp,
720                    "Setting S extension without U extension is illegal");
721         return;
722     }
723 
724     if (cpu->cfg.ext_h && !cpu->cfg.ext_i) {
725         error_setg(errp,
726                    "H depends on an I base integer ISA with 32 x registers");
727         return;
728     }
729 
730     if (cpu->cfg.ext_h && !cpu->cfg.ext_s) {
731         error_setg(errp, "H extension implicitly requires S-mode");
732         return;
733     }
734 
735     if (cpu->cfg.ext_f && !cpu->cfg.ext_icsr) {
736         error_setg(errp, "F extension requires Zicsr");
737         return;
738     }
739 
740     if ((cpu->cfg.ext_zawrs) && !cpu->cfg.ext_a) {
741         error_setg(errp, "Zawrs extension requires A extension");
742         return;
743     }
744 
745     if (cpu->cfg.ext_zfh) {
746         cpu->cfg.ext_zfhmin = true;
747     }
748 
749     if (cpu->cfg.ext_zfhmin && !cpu->cfg.ext_f) {
750         error_setg(errp, "Zfh/Zfhmin extensions require F extension");
751         return;
752     }
753 
754     if (cpu->cfg.ext_d && !cpu->cfg.ext_f) {
755         error_setg(errp, "D extension requires F extension");
756         return;
757     }
758 
759     /* The V vector extension depends on the Zve64d extension */
760     if (cpu->cfg.ext_v) {
761         cpu->cfg.ext_zve64d = true;
762     }
763 
764     /* The Zve64d extension depends on the Zve64f extension */
765     if (cpu->cfg.ext_zve64d) {
766         cpu->cfg.ext_zve64f = true;
767     }
768 
769     /* The Zve64f extension depends on the Zve32f extension */
770     if (cpu->cfg.ext_zve64f) {
771         cpu->cfg.ext_zve32f = true;
772     }
773 
774     if (cpu->cfg.ext_zve64d && !cpu->cfg.ext_d) {
775         error_setg(errp, "Zve64d/V extensions require D extension");
776         return;
777     }
778 
779     if (cpu->cfg.ext_zve32f && !cpu->cfg.ext_f) {
780         error_setg(errp, "Zve32f/Zve64f extensions require F extension");
781         return;
782     }
783 
784     if (cpu->cfg.ext_zvfh) {
785         cpu->cfg.ext_zvfhmin = true;
786     }
787 
788     if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
789         error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
790         return;
791     }
792 
793     if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
794         error_setg(errp, "Zvfh extensions requires Zfhmin extension");
795         return;
796     }
797 
798     /* Set the ISA extensions, checks should have happened above */
799     if (cpu->cfg.ext_zhinx) {
800         cpu->cfg.ext_zhinxmin = true;
801     }
802 
803     if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) {
804         cpu->cfg.ext_zfinx = true;
805     }
806 
807     if (cpu->cfg.ext_zfinx) {
808         if (!cpu->cfg.ext_icsr) {
809             error_setg(errp, "Zfinx extension requires Zicsr");
810             return;
811         }
812         if (cpu->cfg.ext_f) {
813             error_setg(errp,
814                        "Zfinx cannot be supported together with F extension");
815             return;
816         }
817     }
818 
819     if (cpu->cfg.ext_zk) {
820         cpu->cfg.ext_zkn = true;
821         cpu->cfg.ext_zkr = true;
822         cpu->cfg.ext_zkt = true;
823     }
824 
825     if (cpu->cfg.ext_zkn) {
826         cpu->cfg.ext_zbkb = true;
827         cpu->cfg.ext_zbkc = true;
828         cpu->cfg.ext_zbkx = true;
829         cpu->cfg.ext_zkne = true;
830         cpu->cfg.ext_zknd = true;
831         cpu->cfg.ext_zknh = true;
832     }
833 
834     if (cpu->cfg.ext_zks) {
835         cpu->cfg.ext_zbkb = true;
836         cpu->cfg.ext_zbkc = true;
837         cpu->cfg.ext_zbkx = true;
838         cpu->cfg.ext_zksed = true;
839         cpu->cfg.ext_zksh = true;
840     }
841 
842     if (cpu->cfg.ext_i) {
843         ext |= RVI;
844     }
845     if (cpu->cfg.ext_e) {
846         ext |= RVE;
847     }
848     if (cpu->cfg.ext_m) {
849         ext |= RVM;
850     }
851     if (cpu->cfg.ext_a) {
852         ext |= RVA;
853     }
854     if (cpu->cfg.ext_f) {
855         ext |= RVF;
856     }
857     if (cpu->cfg.ext_d) {
858         ext |= RVD;
859     }
860     if (cpu->cfg.ext_c) {
861         ext |= RVC;
862     }
863     if (cpu->cfg.ext_s) {
864         ext |= RVS;
865     }
866     if (cpu->cfg.ext_u) {
867         ext |= RVU;
868     }
869     if (cpu->cfg.ext_h) {
870         ext |= RVH;
871     }
872     if (cpu->cfg.ext_v) {
873         int vext_version = VEXT_VERSION_1_00_0;
874         ext |= RVV;
875         if (!is_power_of_2(cpu->cfg.vlen)) {
876             error_setg(errp,
877                        "Vector extension VLEN must be power of 2");
878             return;
879         }
880         if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) {
881             error_setg(errp,
882                        "Vector extension implementation only supports VLEN "
883                        "in the range [128, %d]", RV_VLEN_MAX);
884             return;
885         }
886         if (!is_power_of_2(cpu->cfg.elen)) {
887             error_setg(errp,
888                        "Vector extension ELEN must be power of 2");
889             return;
890         }
891         if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) {
892             error_setg(errp,
893                        "Vector extension implementation only supports ELEN "
894                        "in the range [8, 64]");
895             return;
896         }
897         if (cpu->cfg.vext_spec) {
898             if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) {
899                 vext_version = VEXT_VERSION_1_00_0;
900             } else {
901                 error_setg(errp,
902                            "Unsupported vector spec version '%s'",
903                            cpu->cfg.vext_spec);
904                 return;
905             }
906         } else {
907             qemu_log("vector version is not specified, "
908                      "use the default value v1.0\n");
909         }
910         set_vext_version(env, vext_version);
911     }
912     if (cpu->cfg.ext_j) {
913         ext |= RVJ;
914     }
915 
916     set_misa(env, env->misa_mxl, ext);
917 }
918 
919 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
920 {
921     CPUState *cs = CPU(dev);
922     RISCVCPU *cpu = RISCV_CPU(dev);
923     CPURISCVState *env = &cpu->env;
924     RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
925     CPUClass *cc = CPU_CLASS(mcc);
926     int i, priv_version = -1;
927     Error *local_err = NULL;
928 
929     cpu_exec_realizefn(cs, &local_err);
930     if (local_err != NULL) {
931         error_propagate(errp, local_err);
932         return;
933     }
934 
935     if (cpu->cfg.priv_spec) {
936         if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) {
937             priv_version = PRIV_VERSION_1_12_0;
938         } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
939             priv_version = PRIV_VERSION_1_11_0;
940         } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
941             priv_version = PRIV_VERSION_1_10_0;
942         } else {
943             error_setg(errp,
944                        "Unsupported privilege spec version '%s'",
945                        cpu->cfg.priv_spec);
946             return;
947         }
948     }
949 
950     if (priv_version >= PRIV_VERSION_1_10_0) {
951         set_priv_version(env, priv_version);
952     }
953 
954     /* Force disable extensions if priv spec version does not match */
955     for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
956         if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) &&
957             (env->priv_ver < isa_edata_arr[i].min_version)) {
958             isa_ext_update_enabled(cpu, &isa_edata_arr[i], false);
959 #ifndef CONFIG_USER_ONLY
960             warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
961                         " because privilege spec version does not match",
962                         isa_edata_arr[i].name, env->mhartid);
963 #else
964             warn_report("disabling %s extension because "
965                         "privilege spec version does not match",
966                         isa_edata_arr[i].name);
967 #endif
968         }
969     }
970 
971     if (cpu->cfg.epmp && !cpu->cfg.pmp) {
972         /*
973          * Enhanced PMP should only be available
974          * on harts with PMP support
975          */
976         error_setg(errp, "Invalid configuration: EPMP requires PMP support");
977         return;
978     }
979 
980 
981 #ifndef CONFIG_USER_ONLY
982     if (cpu->cfg.ext_sstc) {
983         riscv_timer_init(cpu);
984     }
985 #endif /* CONFIG_USER_ONLY */
986 
987     /* Validate that MISA_MXL is set properly. */
988     switch (env->misa_mxl_max) {
989 #ifdef TARGET_RISCV64
990     case MXL_RV64:
991     case MXL_RV128:
992         cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
993         break;
994 #endif
995     case MXL_RV32:
996         cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
997         break;
998     default:
999         g_assert_not_reached();
1000     }
1001     assert(env->misa_mxl_max == env->misa_mxl);
1002 
1003     riscv_cpu_validate_set_extensions(cpu, &local_err);
1004     if (local_err != NULL) {
1005         error_propagate(errp, local_err);
1006         return;
1007     }
1008 
1009 #ifndef CONFIG_USER_ONLY
1010     if (cpu->cfg.pmu_num) {
1011         if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) {
1012             cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1013                                           riscv_pmu_timer_cb, cpu);
1014         }
1015      }
1016 #endif
1017 
1018     riscv_cpu_register_gdb_regs_for_features(cs);
1019 
1020     qemu_init_vcpu(cs);
1021     cpu_reset(cs);
1022 
1023     mcc->parent_realize(dev, errp);
1024 }
1025 
1026 #ifndef CONFIG_USER_ONLY
1027 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1028 {
1029     RISCVCPU *cpu = RISCV_CPU(opaque);
1030     CPURISCVState *env = &cpu->env;
1031 
1032     if (irq < IRQ_LOCAL_MAX) {
1033         switch (irq) {
1034         case IRQ_U_SOFT:
1035         case IRQ_S_SOFT:
1036         case IRQ_VS_SOFT:
1037         case IRQ_M_SOFT:
1038         case IRQ_U_TIMER:
1039         case IRQ_S_TIMER:
1040         case IRQ_VS_TIMER:
1041         case IRQ_M_TIMER:
1042         case IRQ_U_EXT:
1043         case IRQ_VS_EXT:
1044         case IRQ_M_EXT:
1045             if (kvm_enabled()) {
1046                 kvm_riscv_set_irq(cpu, irq, level);
1047             } else {
1048                 riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level));
1049             }
1050              break;
1051         case IRQ_S_EXT:
1052             if (kvm_enabled()) {
1053                 kvm_riscv_set_irq(cpu, irq, level);
1054             } else {
1055                 env->external_seip = level;
1056                 riscv_cpu_update_mip(cpu, 1 << irq,
1057                                      BOOL_TO_MASK(level | env->software_seip));
1058             }
1059             break;
1060         default:
1061             g_assert_not_reached();
1062         }
1063     } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1064         /* Require H-extension for handling guest local interrupts */
1065         if (!riscv_has_ext(env, RVH)) {
1066             g_assert_not_reached();
1067         }
1068 
1069         /* Compute bit position in HGEIP CSR */
1070         irq = irq - IRQ_LOCAL_MAX + 1;
1071         if (env->geilen < irq) {
1072             g_assert_not_reached();
1073         }
1074 
1075         /* Update HGEIP CSR */
1076         env->hgeip &= ~((target_ulong)1 << irq);
1077         if (level) {
1078             env->hgeip |= (target_ulong)1 << irq;
1079         }
1080 
1081         /* Update mip.SGEIP bit */
1082         riscv_cpu_update_mip(cpu, MIP_SGEIP,
1083                              BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1084     } else {
1085         g_assert_not_reached();
1086     }
1087 }
1088 #endif /* CONFIG_USER_ONLY */
1089 
1090 static void riscv_cpu_init(Object *obj)
1091 {
1092     RISCVCPU *cpu = RISCV_CPU(obj);
1093 
1094     cpu->cfg.ext_ifencei = true;
1095     cpu->cfg.ext_icsr = true;
1096     cpu->cfg.mmu = true;
1097     cpu->cfg.pmp = true;
1098 
1099     cpu_set_cpustate_pointers(cpu);
1100 
1101 #ifndef CONFIG_USER_ONLY
1102     qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq,
1103                       IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1104 #endif /* CONFIG_USER_ONLY */
1105 }
1106 
1107 static Property riscv_cpu_extensions[] = {
1108     /* Defaults for standard extensions */
1109     DEFINE_PROP_BOOL("i", RISCVCPU, cfg.ext_i, true),
1110     DEFINE_PROP_BOOL("e", RISCVCPU, cfg.ext_e, false),
1111     DEFINE_PROP_BOOL("g", RISCVCPU, cfg.ext_g, false),
1112     DEFINE_PROP_BOOL("m", RISCVCPU, cfg.ext_m, true),
1113     DEFINE_PROP_BOOL("a", RISCVCPU, cfg.ext_a, true),
1114     DEFINE_PROP_BOOL("f", RISCVCPU, cfg.ext_f, true),
1115     DEFINE_PROP_BOOL("d", RISCVCPU, cfg.ext_d, true),
1116     DEFINE_PROP_BOOL("c", RISCVCPU, cfg.ext_c, true),
1117     DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true),
1118     DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
1119     DEFINE_PROP_BOOL("v", RISCVCPU, cfg.ext_v, false),
1120     DEFINE_PROP_BOOL("h", RISCVCPU, cfg.ext_h, true),
1121     DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16),
1122     DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false),
1123     DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
1124     DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
1125     DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
1126     DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true),
1127     DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
1128     DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
1129     DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
1130     DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false),
1131     DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false),
1132     DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
1133     DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
1134     DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true),
1135 
1136     DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
1137     DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
1138     DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1139     DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1140 
1141     DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true),
1142 
1143     DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false),
1144     DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false),
1145     DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false),
1146 
1147     DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true),
1148     DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true),
1149     DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
1150     DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false),
1151     DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false),
1152     DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false),
1153     DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
1154     DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false),
1155     DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false),
1156     DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false),
1157     DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false),
1158     DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false),
1159     DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false),
1160     DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false),
1161     DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false),
1162     DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false),
1163     DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false),
1164 
1165     DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false),
1166     DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false),
1167     DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false),
1168     DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false),
1169 
1170     DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false),
1171 
1172     /* Vendor-specific custom extensions */
1173     DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false),
1174     DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false),
1175     DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false),
1176     DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false),
1177     DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false),
1178     DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false),
1179     DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false),
1180     DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false),
1181     DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false),
1182     DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false),
1183     DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false),
1184     DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
1185 
1186     /* These are experimental so mark with 'x-' */
1187     DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false),
1188     DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false),
1189     /* ePMP 0.9.3 */
1190     DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
1191     DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false),
1192     DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false),
1193 
1194     DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false),
1195     DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false),
1196 
1197     DEFINE_PROP_END_OF_LIST(),
1198 };
1199 
1200 /*
1201  * Register CPU props based on env.misa_ext. If a non-zero
1202  * value was set, register only the required cpu->cfg.ext_*
1203  * properties and leave. env.misa_ext = 0 means that we want
1204  * all the default properties to be registered.
1205  */
1206 static void register_cpu_props(DeviceState *dev)
1207 {
1208     RISCVCPU *cpu = RISCV_CPU(OBJECT(dev));
1209     uint32_t misa_ext = cpu->env.misa_ext;
1210     Property *prop;
1211 
1212     /*
1213      * If misa_ext is not zero, set cfg properties now to
1214      * allow them to be read during riscv_cpu_realize()
1215      * later on.
1216      */
1217     if (cpu->env.misa_ext != 0) {
1218         cpu->cfg.ext_i = misa_ext & RVI;
1219         cpu->cfg.ext_e = misa_ext & RVE;
1220         cpu->cfg.ext_m = misa_ext & RVM;
1221         cpu->cfg.ext_a = misa_ext & RVA;
1222         cpu->cfg.ext_f = misa_ext & RVF;
1223         cpu->cfg.ext_d = misa_ext & RVD;
1224         cpu->cfg.ext_v = misa_ext & RVV;
1225         cpu->cfg.ext_c = misa_ext & RVC;
1226         cpu->cfg.ext_s = misa_ext & RVS;
1227         cpu->cfg.ext_u = misa_ext & RVU;
1228         cpu->cfg.ext_h = misa_ext & RVH;
1229         cpu->cfg.ext_j = misa_ext & RVJ;
1230 
1231         /*
1232          * We don't want to set the default riscv_cpu_extensions
1233          * in this case.
1234          */
1235         return;
1236     }
1237 
1238     for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1239         qdev_property_add_static(dev, prop);
1240     }
1241 }
1242 
1243 static Property riscv_cpu_properties[] = {
1244     DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1245 
1246     DEFINE_PROP_UINT32("mvendorid", RISCVCPU, cfg.mvendorid, 0),
1247     DEFINE_PROP_UINT64("marchid", RISCVCPU, cfg.marchid, RISCV_CPU_MARCHID),
1248     DEFINE_PROP_UINT64("mimpid", RISCVCPU, cfg.mimpid, RISCV_CPU_MIMPID),
1249 
1250 #ifndef CONFIG_USER_ONLY
1251     DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1252 #endif
1253 
1254     DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1255 
1256     DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1257     DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1258 
1259     /*
1260      * write_misa() is marked as experimental for now so mark
1261      * it with -x and default to 'false'.
1262      */
1263     DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1264     DEFINE_PROP_END_OF_LIST(),
1265 };
1266 
1267 static gchar *riscv_gdb_arch_name(CPUState *cs)
1268 {
1269     RISCVCPU *cpu = RISCV_CPU(cs);
1270     CPURISCVState *env = &cpu->env;
1271 
1272     switch (riscv_cpu_mxl(env)) {
1273     case MXL_RV32:
1274         return g_strdup("riscv:rv32");
1275     case MXL_RV64:
1276     case MXL_RV128:
1277         return g_strdup("riscv:rv64");
1278     default:
1279         g_assert_not_reached();
1280     }
1281 }
1282 
1283 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1284 {
1285     RISCVCPU *cpu = RISCV_CPU(cs);
1286 
1287     if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1288         return cpu->dyn_csr_xml;
1289     } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1290         return cpu->dyn_vreg_xml;
1291     }
1292 
1293     return NULL;
1294 }
1295 
1296 #ifndef CONFIG_USER_ONLY
1297 #include "hw/core/sysemu-cpu-ops.h"
1298 
1299 static const struct SysemuCPUOps riscv_sysemu_ops = {
1300     .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1301     .write_elf64_note = riscv_cpu_write_elf64_note,
1302     .write_elf32_note = riscv_cpu_write_elf32_note,
1303     .legacy_vmsd = &vmstate_riscv_cpu,
1304 };
1305 #endif
1306 
1307 #include "hw/core/tcg-cpu-ops.h"
1308 
1309 static const struct TCGCPUOps riscv_tcg_ops = {
1310     .initialize = riscv_translate_init,
1311     .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
1312     .restore_state_to_opc = riscv_restore_state_to_opc,
1313 
1314 #ifndef CONFIG_USER_ONLY
1315     .tlb_fill = riscv_cpu_tlb_fill,
1316     .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
1317     .do_interrupt = riscv_cpu_do_interrupt,
1318     .do_transaction_failed = riscv_cpu_do_transaction_failed,
1319     .do_unaligned_access = riscv_cpu_do_unaligned_access,
1320     .debug_excp_handler = riscv_cpu_debug_excp_handler,
1321     .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
1322     .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
1323 #endif /* !CONFIG_USER_ONLY */
1324 };
1325 
1326 static void riscv_cpu_class_init(ObjectClass *c, void *data)
1327 {
1328     RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
1329     CPUClass *cc = CPU_CLASS(c);
1330     DeviceClass *dc = DEVICE_CLASS(c);
1331     ResettableClass *rc = RESETTABLE_CLASS(c);
1332 
1333     device_class_set_parent_realize(dc, riscv_cpu_realize,
1334                                     &mcc->parent_realize);
1335 
1336     resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
1337                                        &mcc->parent_phases);
1338 
1339     cc->class_by_name = riscv_cpu_class_by_name;
1340     cc->has_work = riscv_cpu_has_work;
1341     cc->dump_state = riscv_cpu_dump_state;
1342     cc->set_pc = riscv_cpu_set_pc;
1343     cc->get_pc = riscv_cpu_get_pc;
1344     cc->gdb_read_register = riscv_cpu_gdb_read_register;
1345     cc->gdb_write_register = riscv_cpu_gdb_write_register;
1346     cc->gdb_num_core_regs = 33;
1347     cc->gdb_stop_before_watchpoint = true;
1348     cc->disas_set_info = riscv_cpu_disas_set_info;
1349 #ifndef CONFIG_USER_ONLY
1350     cc->sysemu_ops = &riscv_sysemu_ops;
1351 #endif
1352     cc->gdb_arch_name = riscv_gdb_arch_name;
1353     cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
1354     cc->tcg_ops = &riscv_tcg_ops;
1355 
1356     device_class_set_props(dc, riscv_cpu_properties);
1357 }
1358 
1359 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, int max_str_len)
1360 {
1361     char *old = *isa_str;
1362     char *new = *isa_str;
1363     int i;
1364 
1365     for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
1366         if (isa_edata_arr[i].multi_letter &&
1367             isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
1368             new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL);
1369             g_free(old);
1370             old = new;
1371         }
1372     }
1373 
1374     *isa_str = new;
1375 }
1376 
1377 char *riscv_isa_string(RISCVCPU *cpu)
1378 {
1379     int i;
1380     const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
1381     char *isa_str = g_new(char, maxlen);
1382     char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
1383     for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
1384         if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
1385             *p++ = qemu_tolower(riscv_single_letter_exts[i]);
1386         }
1387     }
1388     *p = '\0';
1389     if (!cpu->cfg.short_isa_string) {
1390         riscv_isa_string_ext(cpu, &isa_str, maxlen);
1391     }
1392     return isa_str;
1393 }
1394 
1395 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b)
1396 {
1397     ObjectClass *class_a = (ObjectClass *)a;
1398     ObjectClass *class_b = (ObjectClass *)b;
1399     const char *name_a, *name_b;
1400 
1401     name_a = object_class_get_name(class_a);
1402     name_b = object_class_get_name(class_b);
1403     return strcmp(name_a, name_b);
1404 }
1405 
1406 static void riscv_cpu_list_entry(gpointer data, gpointer user_data)
1407 {
1408     const char *typename = object_class_get_name(OBJECT_CLASS(data));
1409     int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX);
1410 
1411     qemu_printf("%.*s\n", len, typename);
1412 }
1413 
1414 void riscv_cpu_list(void)
1415 {
1416     GSList *list;
1417 
1418     list = object_class_get_list(TYPE_RISCV_CPU, false);
1419     list = g_slist_sort(list, riscv_cpu_list_compare);
1420     g_slist_foreach(list, riscv_cpu_list_entry, NULL);
1421     g_slist_free(list);
1422 }
1423 
1424 #define DEFINE_CPU(type_name, initfn)      \
1425     {                                      \
1426         .name = type_name,                 \
1427         .parent = TYPE_RISCV_CPU,          \
1428         .instance_init = initfn            \
1429     }
1430 
1431 static const TypeInfo riscv_cpu_type_infos[] = {
1432     {
1433         .name = TYPE_RISCV_CPU,
1434         .parent = TYPE_CPU,
1435         .instance_size = sizeof(RISCVCPU),
1436         .instance_align = __alignof__(RISCVCPU),
1437         .instance_init = riscv_cpu_init,
1438         .abstract = true,
1439         .class_size = sizeof(RISCVCPUClass),
1440         .class_init = riscv_cpu_class_init,
1441     },
1442     DEFINE_CPU(TYPE_RISCV_CPU_ANY,              riscv_any_cpu_init),
1443 #if defined(CONFIG_KVM)
1444     DEFINE_CPU(TYPE_RISCV_CPU_HOST,             riscv_host_cpu_init),
1445 #endif
1446 #if defined(TARGET_RISCV32)
1447     DEFINE_CPU(TYPE_RISCV_CPU_BASE32,           rv32_base_cpu_init),
1448     DEFINE_CPU(TYPE_RISCV_CPU_IBEX,             rv32_ibex_cpu_init),
1449     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31,       rv32_sifive_e_cpu_init),
1450     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34,       rv32_imafcu_nommu_cpu_init),
1451     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34,       rv32_sifive_u_cpu_init),
1452 #elif defined(TARGET_RISCV64)
1453     DEFINE_CPU(TYPE_RISCV_CPU_BASE64,           rv64_base_cpu_init),
1454     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51,       rv64_sifive_e_cpu_init),
1455     DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54,       rv64_sifive_u_cpu_init),
1456     DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C,         rv64_sifive_u_cpu_init),
1457     DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906,       rv64_thead_c906_cpu_init),
1458     DEFINE_CPU(TYPE_RISCV_CPU_BASE128,          rv128_base_cpu_init),
1459 #endif
1460 };
1461 
1462 DEFINE_TYPES(riscv_cpu_type_infos)
1463