1 /*
2 * QEMU RISC-V CPU
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "internals.h"
27 #include "exec/exec-all.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "hw/core/qdev-prop-internal.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/device_tree.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm/kvm_riscv.h"
39 #include "tcg/tcg-cpu.h"
40 #include "tcg/tcg.h"
41
42 /* RISC-V CPU definitions */
43 static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
44 const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
45 RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
46
47 /*
48 * From vector_helper.c
49 * Note that vector data is stored in host-endian 64-bit chunks,
50 * so addressing bytes needs a host-endian fixup.
51 */
52 #if HOST_BIG_ENDIAN
53 #define BYTE(x) ((x) ^ 7)
54 #else
55 #define BYTE(x) (x)
56 #endif
57
riscv_cpu_is_32bit(RISCVCPU * cpu)58 bool riscv_cpu_is_32bit(RISCVCPU *cpu)
59 {
60 return riscv_cpu_mxl(&cpu->env) == MXL_RV32;
61 }
62
63 /* Hash that stores general user set numeric options */
64 static GHashTable *general_user_opts;
65
cpu_option_add_user_setting(const char * optname,uint32_t value)66 static void cpu_option_add_user_setting(const char *optname, uint32_t value)
67 {
68 g_hash_table_insert(general_user_opts, (gpointer)optname,
69 GUINT_TO_POINTER(value));
70 }
71
riscv_cpu_option_set(const char * optname)72 bool riscv_cpu_option_set(const char *optname)
73 {
74 return g_hash_table_contains(general_user_opts, optname);
75 }
76
77 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
78 {#_name, _min_ver, CPU_CFG_OFFSET(_prop)}
79
80 /*
81 * Here are the ordering rules of extension naming defined by RISC-V
82 * specification :
83 * 1. All extensions should be separated from other multi-letter extensions
84 * by an underscore.
85 * 2. The first letter following the 'Z' conventionally indicates the most
86 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
87 * If multiple 'Z' extensions are named, they should be ordered first
88 * by category, then alphabetically within a category.
89 * 3. Standard supervisor-level extensions (starts with 'S') should be
90 * listed after standard unprivileged extensions. If multiple
91 * supervisor-level extensions are listed, they should be ordered
92 * alphabetically.
93 * 4. Non-standard extensions (starts with 'X') must be listed after all
94 * standard extensions. They must be separated from other multi-letter
95 * extensions by an underscore.
96 *
97 * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
98 * instead.
99 */
100 const RISCVIsaExtData isa_edata_arr[] = {
101 ISA_EXT_DATA_ENTRY(zic64b, PRIV_VERSION_1_12_0, ext_zic64b),
102 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom),
103 ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop),
104 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz),
105 ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
106 ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
107 ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
108 ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
109 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
110 ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr),
111 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_zicsr),
112 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_zifencei),
113 ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
114 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
115 ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm),
116 ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop),
117 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
118 ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11),
119 ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo),
120 ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha),
121 ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas),
122 ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b),
123 ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc),
124 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
125 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
126 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
127 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
128 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
129 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
130 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
131 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
132 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
133 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
134 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
135 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
136 ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop),
137 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
138 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
139 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
140 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
141 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
142 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
143 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
144 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
145 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
146 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
147 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
148 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
149 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
150 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
151 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
152 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
153 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
154 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
155 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
156 ISA_EXT_DATA_ENTRY(ztso, PRIV_VERSION_1_12_0, ext_ztso),
157 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
158 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
159 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
160 ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
161 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
162 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
163 ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
164 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
165 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
166 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
167 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
168 ISA_EXT_DATA_ENTRY(zvkb, PRIV_VERSION_1_12_0, ext_zvkb),
169 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
170 ISA_EXT_DATA_ENTRY(zvkn, PRIV_VERSION_1_12_0, ext_zvkn),
171 ISA_EXT_DATA_ENTRY(zvknc, PRIV_VERSION_1_12_0, ext_zvknc),
172 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
173 ISA_EXT_DATA_ENTRY(zvkng, PRIV_VERSION_1_12_0, ext_zvkng),
174 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
175 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
176 ISA_EXT_DATA_ENTRY(zvks, PRIV_VERSION_1_12_0, ext_zvks),
177 ISA_EXT_DATA_ENTRY(zvksc, PRIV_VERSION_1_12_0, ext_zvksc),
178 ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
179 ISA_EXT_DATA_ENTRY(zvksg, PRIV_VERSION_1_12_0, ext_zvksg),
180 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
181 ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
182 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
183 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
184 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
185 ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
186 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
187 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
188 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
189 ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
190 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
191 ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
192 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
193 ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
194 ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
195 ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
196 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
197 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
198 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
199 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
200 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
201 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
202 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
203 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
204 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
205 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
206 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
207 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
208 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
209 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
210 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
211 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
212
213 DEFINE_PROP_END_OF_LIST(),
214 };
215
isa_ext_is_enabled(RISCVCPU * cpu,uint32_t ext_offset)216 bool isa_ext_is_enabled(RISCVCPU *cpu, uint32_t ext_offset)
217 {
218 bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
219
220 return *ext_enabled;
221 }
222
isa_ext_update_enabled(RISCVCPU * cpu,uint32_t ext_offset,bool en)223 void isa_ext_update_enabled(RISCVCPU *cpu, uint32_t ext_offset, bool en)
224 {
225 bool *ext_enabled = (void *)&cpu->cfg + ext_offset;
226
227 *ext_enabled = en;
228 }
229
riscv_cpu_is_vendor(Object * cpu_obj)230 bool riscv_cpu_is_vendor(Object *cpu_obj)
231 {
232 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
233 }
234
235 const char * const riscv_int_regnames[] = {
236 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
237 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
238 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
239 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
240 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
241 };
242
243 const char * const riscv_int_regnamesh[] = {
244 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
245 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
246 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
247 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
248 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
249 "x30h/t5h", "x31h/t6h"
250 };
251
252 const char * const riscv_fpr_regnames[] = {
253 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
254 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
255 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
256 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
257 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
258 "f30/ft10", "f31/ft11"
259 };
260
261 const char * const riscv_rvv_regnames[] = {
262 "v0", "v1", "v2", "v3", "v4", "v5", "v6",
263 "v7", "v8", "v9", "v10", "v11", "v12", "v13",
264 "v14", "v15", "v16", "v17", "v18", "v19", "v20",
265 "v21", "v22", "v23", "v24", "v25", "v26", "v27",
266 "v28", "v29", "v30", "v31"
267 };
268
269 static const char * const riscv_excp_names[] = {
270 "misaligned_fetch",
271 "fault_fetch",
272 "illegal_instruction",
273 "breakpoint",
274 "misaligned_load",
275 "fault_load",
276 "misaligned_store",
277 "fault_store",
278 "user_ecall",
279 "supervisor_ecall",
280 "hypervisor_ecall",
281 "machine_ecall",
282 "exec_page_fault",
283 "load_page_fault",
284 "reserved",
285 "store_page_fault",
286 "reserved",
287 "reserved",
288 "reserved",
289 "reserved",
290 "guest_exec_page_fault",
291 "guest_load_page_fault",
292 "reserved",
293 "guest_store_page_fault",
294 };
295
296 static const char * const riscv_intr_names[] = {
297 "u_software",
298 "s_software",
299 "vs_software",
300 "m_software",
301 "u_timer",
302 "s_timer",
303 "vs_timer",
304 "m_timer",
305 "u_external",
306 "s_external",
307 "vs_external",
308 "m_external",
309 "reserved",
310 "reserved",
311 "reserved",
312 "reserved"
313 };
314
riscv_cpu_get_trap_name(target_ulong cause,bool async)315 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
316 {
317 if (async) {
318 return (cause < ARRAY_SIZE(riscv_intr_names)) ?
319 riscv_intr_names[cause] : "(unknown)";
320 } else {
321 return (cause < ARRAY_SIZE(riscv_excp_names)) ?
322 riscv_excp_names[cause] : "(unknown)";
323 }
324 }
325
riscv_cpu_set_misa_ext(CPURISCVState * env,uint32_t ext)326 void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext)
327 {
328 env->misa_ext_mask = env->misa_ext = ext;
329 }
330
riscv_cpu_max_xlen(RISCVCPUClass * mcc)331 int riscv_cpu_max_xlen(RISCVCPUClass *mcc)
332 {
333 return 16 << mcc->misa_mxl_max;
334 }
335
336 #ifndef CONFIG_USER_ONLY
satp_mode_from_str(const char * satp_mode_str)337 static uint8_t satp_mode_from_str(const char *satp_mode_str)
338 {
339 if (!strncmp(satp_mode_str, "mbare", 5)) {
340 return VM_1_10_MBARE;
341 }
342
343 if (!strncmp(satp_mode_str, "sv32", 4)) {
344 return VM_1_10_SV32;
345 }
346
347 if (!strncmp(satp_mode_str, "sv39", 4)) {
348 return VM_1_10_SV39;
349 }
350
351 if (!strncmp(satp_mode_str, "sv48", 4)) {
352 return VM_1_10_SV48;
353 }
354
355 if (!strncmp(satp_mode_str, "sv57", 4)) {
356 return VM_1_10_SV57;
357 }
358
359 if (!strncmp(satp_mode_str, "sv64", 4)) {
360 return VM_1_10_SV64;
361 }
362
363 g_assert_not_reached();
364 }
365
satp_mode_max_from_map(uint32_t map)366 uint8_t satp_mode_max_from_map(uint32_t map)
367 {
368 /*
369 * 'map = 0' will make us return (31 - 32), which C will
370 * happily overflow to UINT_MAX. There's no good result to
371 * return if 'map = 0' (e.g. returning 0 will be ambiguous
372 * with the result for 'map = 1').
373 *
374 * Assert out if map = 0. Callers will have to deal with
375 * it outside of this function.
376 */
377 g_assert(map > 0);
378
379 /* map here has at least one bit set, so no problem with clz */
380 return 31 - __builtin_clz(map);
381 }
382
satp_mode_str(uint8_t satp_mode,bool is_32_bit)383 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
384 {
385 if (is_32_bit) {
386 switch (satp_mode) {
387 case VM_1_10_SV32:
388 return "sv32";
389 case VM_1_10_MBARE:
390 return "none";
391 }
392 } else {
393 switch (satp_mode) {
394 case VM_1_10_SV64:
395 return "sv64";
396 case VM_1_10_SV57:
397 return "sv57";
398 case VM_1_10_SV48:
399 return "sv48";
400 case VM_1_10_SV39:
401 return "sv39";
402 case VM_1_10_MBARE:
403 return "none";
404 }
405 }
406
407 g_assert_not_reached();
408 }
409
set_satp_mode_max_supported(RISCVCPU * cpu,uint8_t satp_mode)410 static void set_satp_mode_max_supported(RISCVCPU *cpu,
411 uint8_t satp_mode)
412 {
413 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
414 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
415
416 for (int i = 0; i <= satp_mode; ++i) {
417 if (valid_vm[i]) {
418 cpu->cfg.satp_mode.supported |= (1 << i);
419 }
420 }
421 }
422
423 /* Set the satp mode to the max supported */
set_satp_mode_default_map(RISCVCPU * cpu)424 static void set_satp_mode_default_map(RISCVCPU *cpu)
425 {
426 /*
427 * Bare CPUs do not default to the max available.
428 * Users must set a valid satp_mode in the command
429 * line.
430 */
431 if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) {
432 warn_report("No satp mode set. Defaulting to 'bare'");
433 cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE);
434 return;
435 }
436
437 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
438 }
439 #endif
440
riscv_max_cpu_init(Object * obj)441 static void riscv_max_cpu_init(Object *obj)
442 {
443 RISCVCPU *cpu = RISCV_CPU(obj);
444 CPURISCVState *env = &cpu->env;
445
446 cpu->cfg.mmu = true;
447 cpu->cfg.pmp = true;
448
449 env->priv_ver = PRIV_VERSION_LATEST;
450 #ifndef CONFIG_USER_ONLY
451 #ifdef TARGET_RISCV32
452 set_satp_mode_max_supported(cpu, VM_1_10_SV32);
453 #else
454 set_satp_mode_max_supported(cpu, VM_1_10_SV57);
455 #endif
456 #endif
457 }
458
459 #if defined(TARGET_RISCV64)
rv64_base_cpu_init(Object * obj)460 static void rv64_base_cpu_init(Object *obj)
461 {
462 RISCVCPU *cpu = RISCV_CPU(obj);
463 CPURISCVState *env = &cpu->env;
464
465 cpu->cfg.mmu = true;
466 cpu->cfg.pmp = true;
467
468 /* Set latest version of privileged specification */
469 env->priv_ver = PRIV_VERSION_LATEST;
470 #ifndef CONFIG_USER_ONLY
471 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
472 #endif
473 }
474
rv64_sifive_u_cpu_init(Object * obj)475 static void rv64_sifive_u_cpu_init(Object *obj)
476 {
477 RISCVCPU *cpu = RISCV_CPU(obj);
478 CPURISCVState *env = &cpu->env;
479 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
480 env->priv_ver = PRIV_VERSION_1_10_0;
481 #ifndef CONFIG_USER_ONLY
482 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
483 #endif
484
485 /* inherited from parent obj via riscv_cpu_init() */
486 cpu->cfg.ext_zifencei = true;
487 cpu->cfg.ext_zicsr = true;
488 cpu->cfg.mmu = true;
489 cpu->cfg.pmp = true;
490 }
491
rv64_sifive_e_cpu_init(Object * obj)492 static void rv64_sifive_e_cpu_init(Object *obj)
493 {
494 CPURISCVState *env = &RISCV_CPU(obj)->env;
495 RISCVCPU *cpu = RISCV_CPU(obj);
496
497 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
498 env->priv_ver = PRIV_VERSION_1_10_0;
499 #ifndef CONFIG_USER_ONLY
500 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
501 #endif
502
503 /* inherited from parent obj via riscv_cpu_init() */
504 cpu->cfg.ext_zifencei = true;
505 cpu->cfg.ext_zicsr = true;
506 cpu->cfg.pmp = true;
507 }
508
rv64_thead_c906_cpu_init(Object * obj)509 static void rv64_thead_c906_cpu_init(Object *obj)
510 {
511 CPURISCVState *env = &RISCV_CPU(obj)->env;
512 RISCVCPU *cpu = RISCV_CPU(obj);
513
514 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU);
515 env->priv_ver = PRIV_VERSION_1_11_0;
516
517 cpu->cfg.ext_zfa = true;
518 cpu->cfg.ext_zfh = true;
519 cpu->cfg.mmu = true;
520 cpu->cfg.ext_xtheadba = true;
521 cpu->cfg.ext_xtheadbb = true;
522 cpu->cfg.ext_xtheadbs = true;
523 cpu->cfg.ext_xtheadcmo = true;
524 cpu->cfg.ext_xtheadcondmov = true;
525 cpu->cfg.ext_xtheadfmemidx = true;
526 cpu->cfg.ext_xtheadmac = true;
527 cpu->cfg.ext_xtheadmemidx = true;
528 cpu->cfg.ext_xtheadmempair = true;
529 cpu->cfg.ext_xtheadsync = true;
530
531 cpu->cfg.mvendorid = THEAD_VENDOR_ID;
532 #ifndef CONFIG_USER_ONLY
533 set_satp_mode_max_supported(cpu, VM_1_10_SV39);
534 th_register_custom_csrs(cpu);
535 #endif
536
537 /* inherited from parent obj via riscv_cpu_init() */
538 cpu->cfg.pmp = true;
539 }
540
rv64_veyron_v1_cpu_init(Object * obj)541 static void rv64_veyron_v1_cpu_init(Object *obj)
542 {
543 CPURISCVState *env = &RISCV_CPU(obj)->env;
544 RISCVCPU *cpu = RISCV_CPU(obj);
545
546 riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH);
547 env->priv_ver = PRIV_VERSION_1_12_0;
548
549 /* Enable ISA extensions */
550 cpu->cfg.mmu = true;
551 cpu->cfg.ext_zifencei = true;
552 cpu->cfg.ext_zicsr = true;
553 cpu->cfg.pmp = true;
554 cpu->cfg.ext_zicbom = true;
555 cpu->cfg.cbom_blocksize = 64;
556 cpu->cfg.cboz_blocksize = 64;
557 cpu->cfg.ext_zicboz = true;
558 cpu->cfg.ext_smaia = true;
559 cpu->cfg.ext_ssaia = true;
560 cpu->cfg.ext_sscofpmf = true;
561 cpu->cfg.ext_sstc = true;
562 cpu->cfg.ext_svinval = true;
563 cpu->cfg.ext_svnapot = true;
564 cpu->cfg.ext_svpbmt = true;
565 cpu->cfg.ext_smstateen = true;
566 cpu->cfg.ext_zba = true;
567 cpu->cfg.ext_zbb = true;
568 cpu->cfg.ext_zbc = true;
569 cpu->cfg.ext_zbs = true;
570 cpu->cfg.ext_XVentanaCondOps = true;
571
572 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
573 cpu->cfg.marchid = VEYRON_V1_MARCHID;
574 cpu->cfg.mimpid = VEYRON_V1_MIMPID;
575
576 #ifndef CONFIG_USER_ONLY
577 set_satp_mode_max_supported(cpu, VM_1_10_SV48);
578 #endif
579 }
580
581 #ifdef CONFIG_TCG
rv128_base_cpu_init(Object * obj)582 static void rv128_base_cpu_init(Object *obj)
583 {
584 RISCVCPU *cpu = RISCV_CPU(obj);
585 CPURISCVState *env = &cpu->env;
586
587 if (qemu_tcg_mttcg_enabled()) {
588 /* Missing 128-bit aligned atomics */
589 error_report("128-bit RISC-V currently does not work with Multi "
590 "Threaded TCG. Please use: -accel tcg,thread=single");
591 exit(EXIT_FAILURE);
592 }
593
594 cpu->cfg.mmu = true;
595 cpu->cfg.pmp = true;
596
597 /* Set latest version of privileged specification */
598 env->priv_ver = PRIV_VERSION_LATEST;
599 #ifndef CONFIG_USER_ONLY
600 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
601 #endif
602 }
603 #endif /* CONFIG_TCG */
604
rv64i_bare_cpu_init(Object * obj)605 static void rv64i_bare_cpu_init(Object *obj)
606 {
607 CPURISCVState *env = &RISCV_CPU(obj)->env;
608 riscv_cpu_set_misa_ext(env, RVI);
609 }
610
rv64e_bare_cpu_init(Object * obj)611 static void rv64e_bare_cpu_init(Object *obj)
612 {
613 CPURISCVState *env = &RISCV_CPU(obj)->env;
614 riscv_cpu_set_misa_ext(env, RVE);
615 }
616
617 #else /* !TARGET_RISCV64 */
618
rv32_base_cpu_init(Object * obj)619 static void rv32_base_cpu_init(Object *obj)
620 {
621 RISCVCPU *cpu = RISCV_CPU(obj);
622 CPURISCVState *env = &cpu->env;
623
624 cpu->cfg.mmu = true;
625 cpu->cfg.pmp = true;
626
627 /* Set latest version of privileged specification */
628 env->priv_ver = PRIV_VERSION_LATEST;
629 #ifndef CONFIG_USER_ONLY
630 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
631 #endif
632 }
633
rv32_sifive_u_cpu_init(Object * obj)634 static void rv32_sifive_u_cpu_init(Object *obj)
635 {
636 RISCVCPU *cpu = RISCV_CPU(obj);
637 CPURISCVState *env = &cpu->env;
638 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
639 env->priv_ver = PRIV_VERSION_1_10_0;
640 #ifndef CONFIG_USER_ONLY
641 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
642 #endif
643
644 /* inherited from parent obj via riscv_cpu_init() */
645 cpu->cfg.ext_zifencei = true;
646 cpu->cfg.ext_zicsr = true;
647 cpu->cfg.mmu = true;
648 cpu->cfg.pmp = true;
649 }
650
rv32_sifive_e_cpu_init(Object * obj)651 static void rv32_sifive_e_cpu_init(Object *obj)
652 {
653 CPURISCVState *env = &RISCV_CPU(obj)->env;
654 RISCVCPU *cpu = RISCV_CPU(obj);
655
656 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU);
657 env->priv_ver = PRIV_VERSION_1_10_0;
658 #ifndef CONFIG_USER_ONLY
659 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
660 #endif
661
662 /* inherited from parent obj via riscv_cpu_init() */
663 cpu->cfg.ext_zifencei = true;
664 cpu->cfg.ext_zicsr = true;
665 cpu->cfg.pmp = true;
666 }
667
rv32_ibex_cpu_init(Object * obj)668 static void rv32_ibex_cpu_init(Object *obj)
669 {
670 CPURISCVState *env = &RISCV_CPU(obj)->env;
671 RISCVCPU *cpu = RISCV_CPU(obj);
672
673 riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU);
674 env->priv_ver = PRIV_VERSION_1_12_0;
675 #ifndef CONFIG_USER_ONLY
676 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
677 #endif
678 /* inherited from parent obj via riscv_cpu_init() */
679 cpu->cfg.ext_zifencei = true;
680 cpu->cfg.ext_zicsr = true;
681 cpu->cfg.pmp = true;
682 cpu->cfg.ext_smepmp = true;
683 }
684
rv32_imafcu_nommu_cpu_init(Object * obj)685 static void rv32_imafcu_nommu_cpu_init(Object *obj)
686 {
687 CPURISCVState *env = &RISCV_CPU(obj)->env;
688 RISCVCPU *cpu = RISCV_CPU(obj);
689
690 riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU);
691 env->priv_ver = PRIV_VERSION_1_10_0;
692 #ifndef CONFIG_USER_ONLY
693 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
694 #endif
695
696 /* inherited from parent obj via riscv_cpu_init() */
697 cpu->cfg.ext_zifencei = true;
698 cpu->cfg.ext_zicsr = true;
699 cpu->cfg.pmp = true;
700 }
701
rv32i_bare_cpu_init(Object * obj)702 static void rv32i_bare_cpu_init(Object *obj)
703 {
704 CPURISCVState *env = &RISCV_CPU(obj)->env;
705 riscv_cpu_set_misa_ext(env, RVI);
706 }
707
rv32e_bare_cpu_init(Object * obj)708 static void rv32e_bare_cpu_init(Object *obj)
709 {
710 CPURISCVState *env = &RISCV_CPU(obj)->env;
711 riscv_cpu_set_misa_ext(env, RVE);
712 }
713 #endif
714
riscv_cpu_class_by_name(const char * cpu_model)715 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
716 {
717 ObjectClass *oc;
718 char *typename;
719 char **cpuname;
720
721 cpuname = g_strsplit(cpu_model, ",", 1);
722 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
723 oc = object_class_by_name(typename);
724 g_strfreev(cpuname);
725 g_free(typename);
726
727 return oc;
728 }
729
riscv_cpu_get_name(RISCVCPU * cpu)730 char *riscv_cpu_get_name(RISCVCPU *cpu)
731 {
732 RISCVCPUClass *rcc = RISCV_CPU_GET_CLASS(cpu);
733 const char *typename = object_class_get_name(OBJECT_CLASS(rcc));
734
735 g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX));
736
737 return cpu_model_from_type(typename);
738 }
739
riscv_cpu_dump_state(CPUState * cs,FILE * f,int flags)740 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
741 {
742 RISCVCPU *cpu = RISCV_CPU(cs);
743 CPURISCVState *env = &cpu->env;
744 int i, j;
745 uint8_t *p;
746
747 #if !defined(CONFIG_USER_ONLY)
748 if (riscv_has_ext(env, RVH)) {
749 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled);
750 }
751 #endif
752 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
753 #ifndef CONFIG_USER_ONLY
754 {
755 static const int dump_csrs[] = {
756 CSR_MHARTID,
757 CSR_MSTATUS,
758 CSR_MSTATUSH,
759 /*
760 * CSR_SSTATUS is intentionally omitted here as its value
761 * can be figured out by looking at CSR_MSTATUS
762 */
763 CSR_HSTATUS,
764 CSR_VSSTATUS,
765 CSR_MIP,
766 CSR_MIE,
767 CSR_MIDELEG,
768 CSR_HIDELEG,
769 CSR_MEDELEG,
770 CSR_HEDELEG,
771 CSR_MTVEC,
772 CSR_STVEC,
773 CSR_VSTVEC,
774 CSR_MEPC,
775 CSR_SEPC,
776 CSR_VSEPC,
777 CSR_MCAUSE,
778 CSR_SCAUSE,
779 CSR_VSCAUSE,
780 CSR_MTVAL,
781 CSR_STVAL,
782 CSR_HTVAL,
783 CSR_MTVAL2,
784 CSR_MSCRATCH,
785 CSR_SSCRATCH,
786 CSR_SATP,
787 CSR_MMTE,
788 CSR_UPMBASE,
789 CSR_UPMMASK,
790 CSR_SPMBASE,
791 CSR_SPMMASK,
792 CSR_MPMBASE,
793 CSR_MPMMASK,
794 };
795
796 for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
797 int csrno = dump_csrs[i];
798 target_ulong val = 0;
799 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
800
801 /*
802 * Rely on the smode, hmode, etc, predicates within csr.c
803 * to do the filtering of the registers that are present.
804 */
805 if (res == RISCV_EXCP_NONE) {
806 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
807 csr_ops[csrno].name, val);
808 }
809 }
810 }
811 #endif
812
813 for (i = 0; i < 32; i++) {
814 qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
815 riscv_int_regnames[i], env->gpr[i]);
816 if ((i & 3) == 3) {
817 qemu_fprintf(f, "\n");
818 }
819 }
820 if (flags & CPU_DUMP_FPU) {
821 for (i = 0; i < 32; i++) {
822 qemu_fprintf(f, " %-8s %016" PRIx64,
823 riscv_fpr_regnames[i], env->fpr[i]);
824 if ((i & 3) == 3) {
825 qemu_fprintf(f, "\n");
826 }
827 }
828 }
829 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
830 static const int dump_rvv_csrs[] = {
831 CSR_VSTART,
832 CSR_VXSAT,
833 CSR_VXRM,
834 CSR_VCSR,
835 CSR_VL,
836 CSR_VTYPE,
837 CSR_VLENB,
838 };
839 for (i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
840 int csrno = dump_rvv_csrs[i];
841 target_ulong val = 0;
842 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
843
844 /*
845 * Rely on the smode, hmode, etc, predicates within csr.c
846 * to do the filtering of the registers that are present.
847 */
848 if (res == RISCV_EXCP_NONE) {
849 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
850 csr_ops[csrno].name, val);
851 }
852 }
853 uint16_t vlenb = cpu->cfg.vlenb;
854
855 for (i = 0; i < 32; i++) {
856 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
857 p = (uint8_t *)env->vreg;
858 for (j = vlenb - 1 ; j >= 0; j--) {
859 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
860 }
861 qemu_fprintf(f, "\n");
862 }
863 }
864 }
865
riscv_cpu_set_pc(CPUState * cs,vaddr value)866 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
867 {
868 RISCVCPU *cpu = RISCV_CPU(cs);
869 CPURISCVState *env = &cpu->env;
870
871 if (env->xl == MXL_RV32) {
872 env->pc = (int32_t)value;
873 } else {
874 env->pc = value;
875 }
876 }
877
riscv_cpu_get_pc(CPUState * cs)878 static vaddr riscv_cpu_get_pc(CPUState *cs)
879 {
880 RISCVCPU *cpu = RISCV_CPU(cs);
881 CPURISCVState *env = &cpu->env;
882
883 /* Match cpu_get_tb_cpu_state. */
884 if (env->xl == MXL_RV32) {
885 return env->pc & UINT32_MAX;
886 }
887 return env->pc;
888 }
889
riscv_cpu_has_work(CPUState * cs)890 bool riscv_cpu_has_work(CPUState *cs)
891 {
892 #ifndef CONFIG_USER_ONLY
893 RISCVCPU *cpu = RISCV_CPU(cs);
894 CPURISCVState *env = &cpu->env;
895 /*
896 * Definition of the WFI instruction requires it to ignore the privilege
897 * mode and delegation registers, but respect individual enables
898 */
899 return riscv_cpu_all_pending(env) != 0 ||
900 riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
901 riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
902 #else
903 return true;
904 #endif
905 }
906
riscv_cpu_mmu_index(CPUState * cs,bool ifetch)907 static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
908 {
909 return riscv_env_mmu_index(cpu_env(cs), ifetch);
910 }
911
riscv_cpu_reset_hold(Object * obj,ResetType type)912 static void riscv_cpu_reset_hold(Object *obj, ResetType type)
913 {
914 #ifndef CONFIG_USER_ONLY
915 uint8_t iprio;
916 int i, irq, rdzero;
917 #endif
918 CPUState *cs = CPU(obj);
919 RISCVCPU *cpu = RISCV_CPU(cs);
920 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
921 CPURISCVState *env = &cpu->env;
922
923 if (mcc->parent_phases.hold) {
924 mcc->parent_phases.hold(obj, type);
925 }
926 #ifndef CONFIG_USER_ONLY
927 env->misa_mxl = mcc->misa_mxl_max;
928 env->priv = PRV_M;
929 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
930 if (env->misa_mxl > MXL_RV32) {
931 /*
932 * The reset status of SXL/UXL is undefined, but mstatus is WARL
933 * and we must ensure that the value after init is valid for read.
934 */
935 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
936 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
937 if (riscv_has_ext(env, RVH)) {
938 env->vsstatus = set_field(env->vsstatus,
939 MSTATUS64_SXL, env->misa_mxl);
940 env->vsstatus = set_field(env->vsstatus,
941 MSTATUS64_UXL, env->misa_mxl);
942 env->mstatus_hs = set_field(env->mstatus_hs,
943 MSTATUS64_SXL, env->misa_mxl);
944 env->mstatus_hs = set_field(env->mstatus_hs,
945 MSTATUS64_UXL, env->misa_mxl);
946 }
947 }
948 env->mcause = 0;
949 env->miclaim = MIP_SGEIP;
950 env->pc = env->resetvec;
951 env->bins = 0;
952 env->two_stage_lookup = false;
953
954 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
955 (!cpu->cfg.ext_svade && cpu->cfg.ext_svadu ?
956 MENVCFG_ADUE : 0);
957 env->henvcfg = 0;
958
959 /* Initialized default priorities of local interrupts. */
960 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
961 iprio = riscv_cpu_default_priority(i);
962 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
963 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
964 env->hviprio[i] = 0;
965 }
966 i = 0;
967 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
968 if (!rdzero) {
969 env->hviprio[irq] = env->miprio[irq];
970 }
971 i++;
972 }
973 /* mmte is supposed to have pm.current hardwired to 1 */
974 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
975
976 /*
977 * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
978 * extension is enabled.
979 */
980 if (riscv_has_ext(env, RVH)) {
981 env->mideleg |= HS_MODE_INTERRUPTS;
982 }
983
984 /*
985 * Clear mseccfg and unlock all the PMP entries upon reset.
986 * This is allowed as per the priv and smepmp specifications
987 * and is needed to clear stale entries across reboots.
988 */
989 if (riscv_cpu_cfg(env)->ext_smepmp) {
990 env->mseccfg = 0;
991 }
992
993 pmp_unlock_entries(env);
994 #endif
995 env->xl = riscv_cpu_mxl(env);
996 riscv_cpu_update_mask(env);
997 cs->exception_index = RISCV_EXCP_NONE;
998 env->load_res = -1;
999 set_default_nan_mode(1, &env->fp_status);
1000
1001 #ifndef CONFIG_USER_ONLY
1002 if (cpu->cfg.debug) {
1003 riscv_trigger_reset_hold(env);
1004 }
1005
1006 if (kvm_enabled()) {
1007 kvm_riscv_reset_vcpu(cpu);
1008 }
1009 #endif
1010 }
1011
riscv_cpu_disas_set_info(CPUState * s,disassemble_info * info)1012 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
1013 {
1014 RISCVCPU *cpu = RISCV_CPU(s);
1015 CPURISCVState *env = &cpu->env;
1016 info->target_info = &cpu->cfg;
1017
1018 switch (env->xl) {
1019 case MXL_RV32:
1020 info->print_insn = print_insn_riscv32;
1021 break;
1022 case MXL_RV64:
1023 info->print_insn = print_insn_riscv64;
1024 break;
1025 case MXL_RV128:
1026 info->print_insn = print_insn_riscv128;
1027 break;
1028 default:
1029 g_assert_not_reached();
1030 }
1031 }
1032
1033 #ifndef CONFIG_USER_ONLY
riscv_cpu_satp_mode_finalize(RISCVCPU * cpu,Error ** errp)1034 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1035 {
1036 bool rv32 = riscv_cpu_is_32bit(cpu);
1037 uint8_t satp_mode_map_max, satp_mode_supported_max;
1038
1039 /* The CPU wants the OS to decide which satp mode to use */
1040 if (cpu->cfg.satp_mode.supported == 0) {
1041 return;
1042 }
1043
1044 satp_mode_supported_max =
1045 satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1046
1047 if (cpu->cfg.satp_mode.map == 0) {
1048 if (cpu->cfg.satp_mode.init == 0) {
1049 /* If unset by the user, we fallback to the default satp mode. */
1050 set_satp_mode_default_map(cpu);
1051 } else {
1052 /*
1053 * Find the lowest level that was disabled and then enable the
1054 * first valid level below which can be found in
1055 * valid_vm_1_10_32/64.
1056 */
1057 for (int i = 1; i < 16; ++i) {
1058 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1059 (cpu->cfg.satp_mode.supported & (1 << i))) {
1060 for (int j = i - 1; j >= 0; --j) {
1061 if (cpu->cfg.satp_mode.supported & (1 << j)) {
1062 cpu->cfg.satp_mode.map |= (1 << j);
1063 break;
1064 }
1065 }
1066 break;
1067 }
1068 }
1069 }
1070 }
1071
1072 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1073
1074 /* Make sure the user asked for a supported configuration (HW and qemu) */
1075 if (satp_mode_map_max > satp_mode_supported_max) {
1076 error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1077 satp_mode_str(satp_mode_map_max, rv32),
1078 satp_mode_str(satp_mode_supported_max, rv32));
1079 return;
1080 }
1081
1082 /*
1083 * Make sure the user did not ask for an invalid configuration as per
1084 * the specification.
1085 */
1086 if (!rv32) {
1087 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1088 if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1089 (cpu->cfg.satp_mode.init & (1 << i)) &&
1090 (cpu->cfg.satp_mode.supported & (1 << i))) {
1091 error_setg(errp, "cannot disable %s satp mode if %s "
1092 "is enabled", satp_mode_str(i, false),
1093 satp_mode_str(satp_mode_map_max, false));
1094 return;
1095 }
1096 }
1097 }
1098
1099 /* Finally expand the map so that all valid modes are set */
1100 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1101 if (cpu->cfg.satp_mode.supported & (1 << i)) {
1102 cpu->cfg.satp_mode.map |= (1 << i);
1103 }
1104 }
1105 }
1106 #endif
1107
riscv_cpu_finalize_features(RISCVCPU * cpu,Error ** errp)1108 void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1109 {
1110 Error *local_err = NULL;
1111
1112 #ifndef CONFIG_USER_ONLY
1113 riscv_cpu_satp_mode_finalize(cpu, &local_err);
1114 if (local_err != NULL) {
1115 error_propagate(errp, local_err);
1116 return;
1117 }
1118 #endif
1119
1120 if (tcg_enabled()) {
1121 riscv_tcg_cpu_finalize_features(cpu, &local_err);
1122 if (local_err != NULL) {
1123 error_propagate(errp, local_err);
1124 return;
1125 }
1126 riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
1127 } else if (kvm_enabled()) {
1128 riscv_kvm_cpu_finalize_features(cpu, &local_err);
1129 if (local_err != NULL) {
1130 error_propagate(errp, local_err);
1131 return;
1132 }
1133 }
1134 }
1135
riscv_cpu_realize(DeviceState * dev,Error ** errp)1136 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1137 {
1138 CPUState *cs = CPU(dev);
1139 RISCVCPU *cpu = RISCV_CPU(dev);
1140 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1141 Error *local_err = NULL;
1142
1143 cpu_exec_realizefn(cs, &local_err);
1144 if (local_err != NULL) {
1145 error_propagate(errp, local_err);
1146 return;
1147 }
1148
1149 riscv_cpu_finalize_features(cpu, &local_err);
1150 if (local_err != NULL) {
1151 error_propagate(errp, local_err);
1152 return;
1153 }
1154
1155 riscv_cpu_register_gdb_regs_for_features(cs);
1156
1157 #ifndef CONFIG_USER_ONLY
1158 if (cpu->cfg.debug) {
1159 riscv_trigger_realize(&cpu->env);
1160 }
1161 #endif
1162
1163 qemu_init_vcpu(cs);
1164 cpu_reset(cs);
1165
1166 mcc->parent_realize(dev, errp);
1167 }
1168
riscv_cpu_accelerator_compatible(RISCVCPU * cpu)1169 bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu)
1170 {
1171 if (tcg_enabled()) {
1172 return riscv_cpu_tcg_compatible(cpu);
1173 }
1174
1175 return true;
1176 }
1177
1178 #ifndef CONFIG_USER_ONLY
cpu_riscv_get_satp(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1179 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1180 void *opaque, Error **errp)
1181 {
1182 RISCVSATPMap *satp_map = opaque;
1183 uint8_t satp = satp_mode_from_str(name);
1184 bool value;
1185
1186 value = satp_map->map & (1 << satp);
1187
1188 visit_type_bool(v, name, &value, errp);
1189 }
1190
cpu_riscv_set_satp(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1191 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1192 void *opaque, Error **errp)
1193 {
1194 RISCVSATPMap *satp_map = opaque;
1195 uint8_t satp = satp_mode_from_str(name);
1196 bool value;
1197
1198 if (!visit_type_bool(v, name, &value, errp)) {
1199 return;
1200 }
1201
1202 satp_map->map = deposit32(satp_map->map, satp, 1, value);
1203 satp_map->init |= 1 << satp;
1204 }
1205
riscv_add_satp_mode_properties(Object * obj)1206 void riscv_add_satp_mode_properties(Object *obj)
1207 {
1208 RISCVCPU *cpu = RISCV_CPU(obj);
1209
1210 if (cpu->env.misa_mxl == MXL_RV32) {
1211 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1212 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1213 } else {
1214 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1215 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1216 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1217 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1218 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1219 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1220 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1221 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1222 }
1223 }
1224
riscv_cpu_set_irq(void * opaque,int irq,int level)1225 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1226 {
1227 RISCVCPU *cpu = RISCV_CPU(opaque);
1228 CPURISCVState *env = &cpu->env;
1229
1230 if (irq < IRQ_LOCAL_MAX) {
1231 switch (irq) {
1232 case IRQ_U_SOFT:
1233 case IRQ_S_SOFT:
1234 case IRQ_VS_SOFT:
1235 case IRQ_M_SOFT:
1236 case IRQ_U_TIMER:
1237 case IRQ_S_TIMER:
1238 case IRQ_VS_TIMER:
1239 case IRQ_M_TIMER:
1240 case IRQ_U_EXT:
1241 case IRQ_VS_EXT:
1242 case IRQ_M_EXT:
1243 if (kvm_enabled()) {
1244 kvm_riscv_set_irq(cpu, irq, level);
1245 } else {
1246 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1247 }
1248 break;
1249 case IRQ_S_EXT:
1250 if (kvm_enabled()) {
1251 kvm_riscv_set_irq(cpu, irq, level);
1252 } else {
1253 env->external_seip = level;
1254 riscv_cpu_update_mip(env, 1 << irq,
1255 BOOL_TO_MASK(level | env->software_seip));
1256 }
1257 break;
1258 default:
1259 g_assert_not_reached();
1260 }
1261 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1262 /* Require H-extension for handling guest local interrupts */
1263 if (!riscv_has_ext(env, RVH)) {
1264 g_assert_not_reached();
1265 }
1266
1267 /* Compute bit position in HGEIP CSR */
1268 irq = irq - IRQ_LOCAL_MAX + 1;
1269 if (env->geilen < irq) {
1270 g_assert_not_reached();
1271 }
1272
1273 /* Update HGEIP CSR */
1274 env->hgeip &= ~((target_ulong)1 << irq);
1275 if (level) {
1276 env->hgeip |= (target_ulong)1 << irq;
1277 }
1278
1279 /* Update mip.SGEIP bit */
1280 riscv_cpu_update_mip(env, MIP_SGEIP,
1281 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1282 } else {
1283 g_assert_not_reached();
1284 }
1285 }
1286 #endif /* CONFIG_USER_ONLY */
1287
riscv_cpu_is_dynamic(Object * cpu_obj)1288 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
1289 {
1290 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
1291 }
1292
riscv_cpu_post_init(Object * obj)1293 static void riscv_cpu_post_init(Object *obj)
1294 {
1295 accel_cpu_instance_init(CPU(obj));
1296 }
1297
riscv_cpu_init(Object * obj)1298 static void riscv_cpu_init(Object *obj)
1299 {
1300 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj);
1301 RISCVCPU *cpu = RISCV_CPU(obj);
1302 CPURISCVState *env = &cpu->env;
1303
1304 env->misa_mxl = mcc->misa_mxl_max;
1305
1306 #ifndef CONFIG_USER_ONLY
1307 qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
1308 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1309 #endif /* CONFIG_USER_ONLY */
1310
1311 general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
1312
1313 /*
1314 * The timer and performance counters extensions were supported
1315 * in QEMU before they were added as discrete extensions in the
1316 * ISA. To keep compatibility we'll always default them to 'true'
1317 * for all CPUs. Each accelerator will decide what to do when
1318 * users disable them.
1319 */
1320 RISCV_CPU(obj)->cfg.ext_zicntr = true;
1321 RISCV_CPU(obj)->cfg.ext_zihpm = true;
1322
1323 /* Default values for non-bool cpu properties */
1324 cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16);
1325 cpu->cfg.vlenb = 128 >> 3;
1326 cpu->cfg.elen = 64;
1327 cpu->cfg.cbom_blocksize = 64;
1328 cpu->cfg.cbop_blocksize = 64;
1329 cpu->cfg.cboz_blocksize = 64;
1330 cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1331 }
1332
riscv_bare_cpu_init(Object * obj)1333 static void riscv_bare_cpu_init(Object *obj)
1334 {
1335 RISCVCPU *cpu = RISCV_CPU(obj);
1336
1337 /*
1338 * Bare CPUs do not inherit the timer and performance
1339 * counters from the parent class (see riscv_cpu_init()
1340 * for info on why the parent enables them).
1341 *
1342 * Users have to explicitly enable these counters for
1343 * bare CPUs.
1344 */
1345 cpu->cfg.ext_zicntr = false;
1346 cpu->cfg.ext_zihpm = false;
1347
1348 /* Set to QEMU's first supported priv version */
1349 cpu->env.priv_ver = PRIV_VERSION_1_10_0;
1350
1351 /*
1352 * Support all available satp_mode settings. The default
1353 * value will be set to MBARE if the user doesn't set
1354 * satp_mode manually (see set_satp_mode_default()).
1355 */
1356 #ifndef CONFIG_USER_ONLY
1357 set_satp_mode_max_supported(cpu, VM_1_10_SV64);
1358 #endif
1359 }
1360
1361 typedef struct misa_ext_info {
1362 const char *name;
1363 const char *description;
1364 } MISAExtInfo;
1365
1366 #define MISA_INFO_IDX(_bit) \
1367 __builtin_ctz(_bit)
1368
1369 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1370 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1371
1372 static const MISAExtInfo misa_ext_info_arr[] = {
1373 MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1374 MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1375 MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1376 MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1377 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1378 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1379 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1380 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1381 MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1382 MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1383 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1384 MISA_EXT_INFO(RVV, "v", "Vector operations"),
1385 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1386 MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
1387 };
1388
riscv_cpu_validate_misa_mxl(RISCVCPUClass * mcc)1389 static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
1390 {
1391 CPUClass *cc = CPU_CLASS(mcc);
1392
1393 /* Validate that MISA_MXL is set properly. */
1394 switch (mcc->misa_mxl_max) {
1395 #ifdef TARGET_RISCV64
1396 case MXL_RV64:
1397 case MXL_RV128:
1398 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1399 break;
1400 #endif
1401 case MXL_RV32:
1402 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1403 break;
1404 default:
1405 g_assert_not_reached();
1406 }
1407 }
1408
riscv_validate_misa_info_idx(uint32_t bit)1409 static int riscv_validate_misa_info_idx(uint32_t bit)
1410 {
1411 int idx;
1412
1413 /*
1414 * Our lowest valid input (RVA) is 1 and
1415 * __builtin_ctz() is UB with zero.
1416 */
1417 g_assert(bit != 0);
1418 idx = MISA_INFO_IDX(bit);
1419
1420 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1421 return idx;
1422 }
1423
riscv_get_misa_ext_name(uint32_t bit)1424 const char *riscv_get_misa_ext_name(uint32_t bit)
1425 {
1426 int idx = riscv_validate_misa_info_idx(bit);
1427 const char *val = misa_ext_info_arr[idx].name;
1428
1429 g_assert(val != NULL);
1430 return val;
1431 }
1432
riscv_get_misa_ext_description(uint32_t bit)1433 const char *riscv_get_misa_ext_description(uint32_t bit)
1434 {
1435 int idx = riscv_validate_misa_info_idx(bit);
1436 const char *val = misa_ext_info_arr[idx].description;
1437
1438 g_assert(val != NULL);
1439 return val;
1440 }
1441
1442 #define MULTI_EXT_CFG_BOOL(_name, _prop, _defval) \
1443 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
1444 .enabled = _defval}
1445
1446 const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
1447 /* Defaults for standard extensions */
1448 MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
1449 MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
1450 MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
1451 MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
1452 MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
1453 MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true),
1454 MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false),
1455 MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false),
1456 MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false),
1457 MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false),
1458 MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false),
1459 MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false),
1460 MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false),
1461 MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true),
1462 MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true),
1463 MULTI_EXT_CFG_BOOL("zfbfmin", ext_zfbfmin, false),
1464 MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
1465 MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
1466 MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
1467 MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
1468 MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
1469 MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
1470 MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
1471 MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
1472 MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
1473 MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
1474 MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
1475 MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
1476
1477 MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
1478 MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
1479 MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
1480 MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
1481 MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
1482 MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
1483 MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
1484 MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false),
1485 MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false),
1486
1487 MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true),
1488 MULTI_EXT_CFG_BOOL("zihpm", ext_zihpm, true),
1489
1490 MULTI_EXT_CFG_BOOL("zba", ext_zba, true),
1491 MULTI_EXT_CFG_BOOL("zbb", ext_zbb, true),
1492 MULTI_EXT_CFG_BOOL("zbc", ext_zbc, true),
1493 MULTI_EXT_CFG_BOOL("zbkb", ext_zbkb, false),
1494 MULTI_EXT_CFG_BOOL("zbkc", ext_zbkc, false),
1495 MULTI_EXT_CFG_BOOL("zbkx", ext_zbkx, false),
1496 MULTI_EXT_CFG_BOOL("zbs", ext_zbs, true),
1497 MULTI_EXT_CFG_BOOL("zk", ext_zk, false),
1498 MULTI_EXT_CFG_BOOL("zkn", ext_zkn, false),
1499 MULTI_EXT_CFG_BOOL("zknd", ext_zknd, false),
1500 MULTI_EXT_CFG_BOOL("zkne", ext_zkne, false),
1501 MULTI_EXT_CFG_BOOL("zknh", ext_zknh, false),
1502 MULTI_EXT_CFG_BOOL("zkr", ext_zkr, false),
1503 MULTI_EXT_CFG_BOOL("zks", ext_zks, false),
1504 MULTI_EXT_CFG_BOOL("zksed", ext_zksed, false),
1505 MULTI_EXT_CFG_BOOL("zksh", ext_zksh, false),
1506 MULTI_EXT_CFG_BOOL("zkt", ext_zkt, false),
1507 MULTI_EXT_CFG_BOOL("ztso", ext_ztso, false),
1508
1509 MULTI_EXT_CFG_BOOL("zdinx", ext_zdinx, false),
1510 MULTI_EXT_CFG_BOOL("zfinx", ext_zfinx, false),
1511 MULTI_EXT_CFG_BOOL("zhinx", ext_zhinx, false),
1512 MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false),
1513
1514 MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true),
1515 MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true),
1516 MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true),
1517
1518 MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false),
1519
1520 MULTI_EXT_CFG_BOOL("zca", ext_zca, false),
1521 MULTI_EXT_CFG_BOOL("zcb", ext_zcb, false),
1522 MULTI_EXT_CFG_BOOL("zcd", ext_zcd, false),
1523 MULTI_EXT_CFG_BOOL("zce", ext_zce, false),
1524 MULTI_EXT_CFG_BOOL("zcf", ext_zcf, false),
1525 MULTI_EXT_CFG_BOOL("zcmp", ext_zcmp, false),
1526 MULTI_EXT_CFG_BOOL("zcmt", ext_zcmt, false),
1527 MULTI_EXT_CFG_BOOL("zicond", ext_zicond, false),
1528
1529 /* Vector cryptography extensions */
1530 MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
1531 MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
1532 MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
1533 MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
1534 MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
1535 MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
1536 MULTI_EXT_CFG_BOOL("zvknhb", ext_zvknhb, false),
1537 MULTI_EXT_CFG_BOOL("zvksed", ext_zvksed, false),
1538 MULTI_EXT_CFG_BOOL("zvksh", ext_zvksh, false),
1539 MULTI_EXT_CFG_BOOL("zvkt", ext_zvkt, false),
1540 MULTI_EXT_CFG_BOOL("zvkn", ext_zvkn, false),
1541 MULTI_EXT_CFG_BOOL("zvknc", ext_zvknc, false),
1542 MULTI_EXT_CFG_BOOL("zvkng", ext_zvkng, false),
1543 MULTI_EXT_CFG_BOOL("zvks", ext_zvks, false),
1544 MULTI_EXT_CFG_BOOL("zvksc", ext_zvksc, false),
1545 MULTI_EXT_CFG_BOOL("zvksg", ext_zvksg, false),
1546
1547 DEFINE_PROP_END_OF_LIST(),
1548 };
1549
1550 const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[] = {
1551 MULTI_EXT_CFG_BOOL("xtheadba", ext_xtheadba, false),
1552 MULTI_EXT_CFG_BOOL("xtheadbb", ext_xtheadbb, false),
1553 MULTI_EXT_CFG_BOOL("xtheadbs", ext_xtheadbs, false),
1554 MULTI_EXT_CFG_BOOL("xtheadcmo", ext_xtheadcmo, false),
1555 MULTI_EXT_CFG_BOOL("xtheadcondmov", ext_xtheadcondmov, false),
1556 MULTI_EXT_CFG_BOOL("xtheadfmemidx", ext_xtheadfmemidx, false),
1557 MULTI_EXT_CFG_BOOL("xtheadfmv", ext_xtheadfmv, false),
1558 MULTI_EXT_CFG_BOOL("xtheadmac", ext_xtheadmac, false),
1559 MULTI_EXT_CFG_BOOL("xtheadmemidx", ext_xtheadmemidx, false),
1560 MULTI_EXT_CFG_BOOL("xtheadmempair", ext_xtheadmempair, false),
1561 MULTI_EXT_CFG_BOOL("xtheadsync", ext_xtheadsync, false),
1562 MULTI_EXT_CFG_BOOL("xventanacondops", ext_XVentanaCondOps, false),
1563
1564 DEFINE_PROP_END_OF_LIST(),
1565 };
1566
1567 /* These are experimental so mark with 'x-' */
1568 const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
1569 DEFINE_PROP_END_OF_LIST(),
1570 };
1571
1572 /*
1573 * 'Named features' is the name we give to extensions that we
1574 * don't want to expose to users. They are either immutable
1575 * (always enabled/disable) or they'll vary depending on
1576 * the resulting CPU state. They have riscv,isa strings
1577 * and priv_ver like regular extensions.
1578 */
1579 const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
1580 MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
1581
1582 DEFINE_PROP_END_OF_LIST(),
1583 };
1584
1585 /* Deprecated entries marked for future removal */
1586 const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = {
1587 MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true),
1588 MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true),
1589 MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true),
1590 MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true),
1591 MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true),
1592 MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true),
1593 MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false),
1594 MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false),
1595 MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false),
1596 MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false),
1597 MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false),
1598
1599 DEFINE_PROP_END_OF_LIST(),
1600 };
1601
cpu_set_prop_err(RISCVCPU * cpu,const char * propname,Error ** errp)1602 static void cpu_set_prop_err(RISCVCPU *cpu, const char *propname,
1603 Error **errp)
1604 {
1605 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1606 error_setg(errp, "CPU '%s' does not allow changing the value of '%s'",
1607 cpuname, propname);
1608 }
1609
prop_pmu_num_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1610 static void prop_pmu_num_set(Object *obj, Visitor *v, const char *name,
1611 void *opaque, Error **errp)
1612 {
1613 RISCVCPU *cpu = RISCV_CPU(obj);
1614 uint8_t pmu_num, curr_pmu_num;
1615 uint32_t pmu_mask;
1616
1617 visit_type_uint8(v, name, &pmu_num, errp);
1618
1619 curr_pmu_num = ctpop32(cpu->cfg.pmu_mask);
1620
1621 if (pmu_num != curr_pmu_num && riscv_cpu_is_vendor(obj)) {
1622 cpu_set_prop_err(cpu, name, errp);
1623 error_append_hint(errp, "Current '%s' val: %u\n",
1624 name, curr_pmu_num);
1625 return;
1626 }
1627
1628 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1629 error_setg(errp, "Number of counters exceeds maximum available");
1630 return;
1631 }
1632
1633 if (pmu_num == 0) {
1634 pmu_mask = 0;
1635 } else {
1636 pmu_mask = MAKE_64BIT_MASK(3, pmu_num);
1637 }
1638
1639 warn_report("\"pmu-num\" property is deprecated; use \"pmu-mask\"");
1640 cpu->cfg.pmu_mask = pmu_mask;
1641 cpu_option_add_user_setting("pmu-mask", pmu_mask);
1642 }
1643
prop_pmu_num_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1644 static void prop_pmu_num_get(Object *obj, Visitor *v, const char *name,
1645 void *opaque, Error **errp)
1646 {
1647 RISCVCPU *cpu = RISCV_CPU(obj);
1648 uint8_t pmu_num = ctpop32(cpu->cfg.pmu_mask);
1649
1650 visit_type_uint8(v, name, &pmu_num, errp);
1651 }
1652
1653 static const PropertyInfo prop_pmu_num = {
1654 .name = "pmu-num",
1655 .get = prop_pmu_num_get,
1656 .set = prop_pmu_num_set,
1657 };
1658
prop_pmu_mask_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1659 static void prop_pmu_mask_set(Object *obj, Visitor *v, const char *name,
1660 void *opaque, Error **errp)
1661 {
1662 RISCVCPU *cpu = RISCV_CPU(obj);
1663 uint32_t value;
1664 uint8_t pmu_num;
1665
1666 visit_type_uint32(v, name, &value, errp);
1667
1668 if (value != cpu->cfg.pmu_mask && riscv_cpu_is_vendor(obj)) {
1669 cpu_set_prop_err(cpu, name, errp);
1670 error_append_hint(errp, "Current '%s' val: %x\n",
1671 name, cpu->cfg.pmu_mask);
1672 return;
1673 }
1674
1675 pmu_num = ctpop32(value);
1676
1677 if (pmu_num > (RV_MAX_MHPMCOUNTERS - 3)) {
1678 error_setg(errp, "Number of counters exceeds maximum available");
1679 return;
1680 }
1681
1682 cpu_option_add_user_setting(name, value);
1683 cpu->cfg.pmu_mask = value;
1684 }
1685
prop_pmu_mask_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1686 static void prop_pmu_mask_get(Object *obj, Visitor *v, const char *name,
1687 void *opaque, Error **errp)
1688 {
1689 uint8_t pmu_mask = RISCV_CPU(obj)->cfg.pmu_mask;
1690
1691 visit_type_uint8(v, name, &pmu_mask, errp);
1692 }
1693
1694 static const PropertyInfo prop_pmu_mask = {
1695 .name = "pmu-mask",
1696 .get = prop_pmu_mask_get,
1697 .set = prop_pmu_mask_set,
1698 };
1699
prop_mmu_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1700 static void prop_mmu_set(Object *obj, Visitor *v, const char *name,
1701 void *opaque, Error **errp)
1702 {
1703 RISCVCPU *cpu = RISCV_CPU(obj);
1704 bool value;
1705
1706 visit_type_bool(v, name, &value, errp);
1707
1708 if (cpu->cfg.mmu != value && riscv_cpu_is_vendor(obj)) {
1709 cpu_set_prop_err(cpu, "mmu", errp);
1710 return;
1711 }
1712
1713 cpu_option_add_user_setting(name, value);
1714 cpu->cfg.mmu = value;
1715 }
1716
prop_mmu_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1717 static void prop_mmu_get(Object *obj, Visitor *v, const char *name,
1718 void *opaque, Error **errp)
1719 {
1720 bool value = RISCV_CPU(obj)->cfg.mmu;
1721
1722 visit_type_bool(v, name, &value, errp);
1723 }
1724
1725 static const PropertyInfo prop_mmu = {
1726 .name = "mmu",
1727 .get = prop_mmu_get,
1728 .set = prop_mmu_set,
1729 };
1730
prop_pmp_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1731 static void prop_pmp_set(Object *obj, Visitor *v, const char *name,
1732 void *opaque, Error **errp)
1733 {
1734 RISCVCPU *cpu = RISCV_CPU(obj);
1735 bool value;
1736
1737 visit_type_bool(v, name, &value, errp);
1738
1739 if (cpu->cfg.pmp != value && riscv_cpu_is_vendor(obj)) {
1740 cpu_set_prop_err(cpu, name, errp);
1741 return;
1742 }
1743
1744 cpu_option_add_user_setting(name, value);
1745 cpu->cfg.pmp = value;
1746 }
1747
prop_pmp_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1748 static void prop_pmp_get(Object *obj, Visitor *v, const char *name,
1749 void *opaque, Error **errp)
1750 {
1751 bool value = RISCV_CPU(obj)->cfg.pmp;
1752
1753 visit_type_bool(v, name, &value, errp);
1754 }
1755
1756 static const PropertyInfo prop_pmp = {
1757 .name = "pmp",
1758 .get = prop_pmp_get,
1759 .set = prop_pmp_set,
1760 };
1761
priv_spec_from_str(const char * priv_spec_str)1762 static int priv_spec_from_str(const char *priv_spec_str)
1763 {
1764 int priv_version = -1;
1765
1766 if (!g_strcmp0(priv_spec_str, PRIV_VER_1_13_0_STR)) {
1767 priv_version = PRIV_VERSION_1_13_0;
1768 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_12_0_STR)) {
1769 priv_version = PRIV_VERSION_1_12_0;
1770 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_11_0_STR)) {
1771 priv_version = PRIV_VERSION_1_11_0;
1772 } else if (!g_strcmp0(priv_spec_str, PRIV_VER_1_10_0_STR)) {
1773 priv_version = PRIV_VERSION_1_10_0;
1774 }
1775
1776 return priv_version;
1777 }
1778
priv_spec_to_str(int priv_version)1779 const char *priv_spec_to_str(int priv_version)
1780 {
1781 switch (priv_version) {
1782 case PRIV_VERSION_1_10_0:
1783 return PRIV_VER_1_10_0_STR;
1784 case PRIV_VERSION_1_11_0:
1785 return PRIV_VER_1_11_0_STR;
1786 case PRIV_VERSION_1_12_0:
1787 return PRIV_VER_1_12_0_STR;
1788 case PRIV_VERSION_1_13_0:
1789 return PRIV_VER_1_13_0_STR;
1790 default:
1791 return NULL;
1792 }
1793 }
1794
prop_priv_spec_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1795 static void prop_priv_spec_set(Object *obj, Visitor *v, const char *name,
1796 void *opaque, Error **errp)
1797 {
1798 RISCVCPU *cpu = RISCV_CPU(obj);
1799 g_autofree char *value = NULL;
1800 int priv_version = -1;
1801
1802 visit_type_str(v, name, &value, errp);
1803
1804 priv_version = priv_spec_from_str(value);
1805 if (priv_version < 0) {
1806 error_setg(errp, "Unsupported privilege spec version '%s'", value);
1807 return;
1808 }
1809
1810 if (priv_version != cpu->env.priv_ver && riscv_cpu_is_vendor(obj)) {
1811 cpu_set_prop_err(cpu, name, errp);
1812 error_append_hint(errp, "Current '%s' val: %s\n", name,
1813 object_property_get_str(obj, name, NULL));
1814 return;
1815 }
1816
1817 cpu_option_add_user_setting(name, priv_version);
1818 cpu->env.priv_ver = priv_version;
1819 }
1820
prop_priv_spec_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1821 static void prop_priv_spec_get(Object *obj, Visitor *v, const char *name,
1822 void *opaque, Error **errp)
1823 {
1824 RISCVCPU *cpu = RISCV_CPU(obj);
1825 const char *value = priv_spec_to_str(cpu->env.priv_ver);
1826
1827 visit_type_str(v, name, (char **)&value, errp);
1828 }
1829
1830 static const PropertyInfo prop_priv_spec = {
1831 .name = "priv_spec",
1832 .get = prop_priv_spec_get,
1833 .set = prop_priv_spec_set,
1834 };
1835
prop_vext_spec_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1836 static void prop_vext_spec_set(Object *obj, Visitor *v, const char *name,
1837 void *opaque, Error **errp)
1838 {
1839 RISCVCPU *cpu = RISCV_CPU(obj);
1840 g_autofree char *value = NULL;
1841
1842 visit_type_str(v, name, &value, errp);
1843
1844 if (g_strcmp0(value, VEXT_VER_1_00_0_STR) != 0) {
1845 error_setg(errp, "Unsupported vector spec version '%s'", value);
1846 return;
1847 }
1848
1849 cpu_option_add_user_setting(name, VEXT_VERSION_1_00_0);
1850 cpu->env.vext_ver = VEXT_VERSION_1_00_0;
1851 }
1852
prop_vext_spec_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1853 static void prop_vext_spec_get(Object *obj, Visitor *v, const char *name,
1854 void *opaque, Error **errp)
1855 {
1856 const char *value = VEXT_VER_1_00_0_STR;
1857
1858 visit_type_str(v, name, (char **)&value, errp);
1859 }
1860
1861 static const PropertyInfo prop_vext_spec = {
1862 .name = "vext_spec",
1863 .get = prop_vext_spec_get,
1864 .set = prop_vext_spec_set,
1865 };
1866
prop_vlen_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1867 static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
1868 void *opaque, Error **errp)
1869 {
1870 RISCVCPU *cpu = RISCV_CPU(obj);
1871 uint16_t value;
1872
1873 if (!visit_type_uint16(v, name, &value, errp)) {
1874 return;
1875 }
1876
1877 if (!is_power_of_2(value)) {
1878 error_setg(errp, "Vector extension VLEN must be power of 2");
1879 return;
1880 }
1881
1882 if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
1883 cpu_set_prop_err(cpu, name, errp);
1884 error_append_hint(errp, "Current '%s' val: %u\n",
1885 name, cpu->cfg.vlenb << 3);
1886 return;
1887 }
1888
1889 cpu_option_add_user_setting(name, value);
1890 cpu->cfg.vlenb = value >> 3;
1891 }
1892
prop_vlen_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1893 static void prop_vlen_get(Object *obj, Visitor *v, const char *name,
1894 void *opaque, Error **errp)
1895 {
1896 uint16_t value = RISCV_CPU(obj)->cfg.vlenb << 3;
1897
1898 visit_type_uint16(v, name, &value, errp);
1899 }
1900
1901 static const PropertyInfo prop_vlen = {
1902 .name = "vlen",
1903 .get = prop_vlen_get,
1904 .set = prop_vlen_set,
1905 };
1906
prop_elen_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1907 static void prop_elen_set(Object *obj, Visitor *v, const char *name,
1908 void *opaque, Error **errp)
1909 {
1910 RISCVCPU *cpu = RISCV_CPU(obj);
1911 uint16_t value;
1912
1913 if (!visit_type_uint16(v, name, &value, errp)) {
1914 return;
1915 }
1916
1917 if (!is_power_of_2(value)) {
1918 error_setg(errp, "Vector extension ELEN must be power of 2");
1919 return;
1920 }
1921
1922 if (value != cpu->cfg.elen && riscv_cpu_is_vendor(obj)) {
1923 cpu_set_prop_err(cpu, name, errp);
1924 error_append_hint(errp, "Current '%s' val: %u\n",
1925 name, cpu->cfg.elen);
1926 return;
1927 }
1928
1929 cpu_option_add_user_setting(name, value);
1930 cpu->cfg.elen = value;
1931 }
1932
prop_elen_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1933 static void prop_elen_get(Object *obj, Visitor *v, const char *name,
1934 void *opaque, Error **errp)
1935 {
1936 uint16_t value = RISCV_CPU(obj)->cfg.elen;
1937
1938 visit_type_uint16(v, name, &value, errp);
1939 }
1940
1941 static const PropertyInfo prop_elen = {
1942 .name = "elen",
1943 .get = prop_elen_get,
1944 .set = prop_elen_set,
1945 };
1946
prop_cbom_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1947 static void prop_cbom_blksize_set(Object *obj, Visitor *v, const char *name,
1948 void *opaque, Error **errp)
1949 {
1950 RISCVCPU *cpu = RISCV_CPU(obj);
1951 uint16_t value;
1952
1953 if (!visit_type_uint16(v, name, &value, errp)) {
1954 return;
1955 }
1956
1957 if (value != cpu->cfg.cbom_blocksize && riscv_cpu_is_vendor(obj)) {
1958 cpu_set_prop_err(cpu, name, errp);
1959 error_append_hint(errp, "Current '%s' val: %u\n",
1960 name, cpu->cfg.cbom_blocksize);
1961 return;
1962 }
1963
1964 cpu_option_add_user_setting(name, value);
1965 cpu->cfg.cbom_blocksize = value;
1966 }
1967
prop_cbom_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1968 static void prop_cbom_blksize_get(Object *obj, Visitor *v, const char *name,
1969 void *opaque, Error **errp)
1970 {
1971 uint16_t value = RISCV_CPU(obj)->cfg.cbom_blocksize;
1972
1973 visit_type_uint16(v, name, &value, errp);
1974 }
1975
1976 static const PropertyInfo prop_cbom_blksize = {
1977 .name = "cbom_blocksize",
1978 .get = prop_cbom_blksize_get,
1979 .set = prop_cbom_blksize_set,
1980 };
1981
prop_cbop_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)1982 static void prop_cbop_blksize_set(Object *obj, Visitor *v, const char *name,
1983 void *opaque, Error **errp)
1984 {
1985 RISCVCPU *cpu = RISCV_CPU(obj);
1986 uint16_t value;
1987
1988 if (!visit_type_uint16(v, name, &value, errp)) {
1989 return;
1990 }
1991
1992 if (value != cpu->cfg.cbop_blocksize && riscv_cpu_is_vendor(obj)) {
1993 cpu_set_prop_err(cpu, name, errp);
1994 error_append_hint(errp, "Current '%s' val: %u\n",
1995 name, cpu->cfg.cbop_blocksize);
1996 return;
1997 }
1998
1999 cpu_option_add_user_setting(name, value);
2000 cpu->cfg.cbop_blocksize = value;
2001 }
2002
prop_cbop_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2003 static void prop_cbop_blksize_get(Object *obj, Visitor *v, const char *name,
2004 void *opaque, Error **errp)
2005 {
2006 uint16_t value = RISCV_CPU(obj)->cfg.cbop_blocksize;
2007
2008 visit_type_uint16(v, name, &value, errp);
2009 }
2010
2011 static const PropertyInfo prop_cbop_blksize = {
2012 .name = "cbop_blocksize",
2013 .get = prop_cbop_blksize_get,
2014 .set = prop_cbop_blksize_set,
2015 };
2016
prop_cboz_blksize_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2017 static void prop_cboz_blksize_set(Object *obj, Visitor *v, const char *name,
2018 void *opaque, Error **errp)
2019 {
2020 RISCVCPU *cpu = RISCV_CPU(obj);
2021 uint16_t value;
2022
2023 if (!visit_type_uint16(v, name, &value, errp)) {
2024 return;
2025 }
2026
2027 if (value != cpu->cfg.cboz_blocksize && riscv_cpu_is_vendor(obj)) {
2028 cpu_set_prop_err(cpu, name, errp);
2029 error_append_hint(errp, "Current '%s' val: %u\n",
2030 name, cpu->cfg.cboz_blocksize);
2031 return;
2032 }
2033
2034 cpu_option_add_user_setting(name, value);
2035 cpu->cfg.cboz_blocksize = value;
2036 }
2037
prop_cboz_blksize_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2038 static void prop_cboz_blksize_get(Object *obj, Visitor *v, const char *name,
2039 void *opaque, Error **errp)
2040 {
2041 uint16_t value = RISCV_CPU(obj)->cfg.cboz_blocksize;
2042
2043 visit_type_uint16(v, name, &value, errp);
2044 }
2045
2046 static const PropertyInfo prop_cboz_blksize = {
2047 .name = "cboz_blocksize",
2048 .get = prop_cboz_blksize_get,
2049 .set = prop_cboz_blksize_set,
2050 };
2051
prop_mvendorid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2052 static void prop_mvendorid_set(Object *obj, Visitor *v, const char *name,
2053 void *opaque, Error **errp)
2054 {
2055 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2056 RISCVCPU *cpu = RISCV_CPU(obj);
2057 uint32_t prev_val = cpu->cfg.mvendorid;
2058 uint32_t value;
2059
2060 if (!visit_type_uint32(v, name, &value, errp)) {
2061 return;
2062 }
2063
2064 if (!dynamic_cpu && prev_val != value) {
2065 error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2066 object_get_typename(obj), prev_val);
2067 return;
2068 }
2069
2070 cpu->cfg.mvendorid = value;
2071 }
2072
prop_mvendorid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2073 static void prop_mvendorid_get(Object *obj, Visitor *v, const char *name,
2074 void *opaque, Error **errp)
2075 {
2076 uint32_t value = RISCV_CPU(obj)->cfg.mvendorid;
2077
2078 visit_type_uint32(v, name, &value, errp);
2079 }
2080
2081 static const PropertyInfo prop_mvendorid = {
2082 .name = "mvendorid",
2083 .get = prop_mvendorid_get,
2084 .set = prop_mvendorid_set,
2085 };
2086
prop_mimpid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2087 static void prop_mimpid_set(Object *obj, Visitor *v, const char *name,
2088 void *opaque, Error **errp)
2089 {
2090 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2091 RISCVCPU *cpu = RISCV_CPU(obj);
2092 uint64_t prev_val = cpu->cfg.mimpid;
2093 uint64_t value;
2094
2095 if (!visit_type_uint64(v, name, &value, errp)) {
2096 return;
2097 }
2098
2099 if (!dynamic_cpu && prev_val != value) {
2100 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2101 object_get_typename(obj), prev_val);
2102 return;
2103 }
2104
2105 cpu->cfg.mimpid = value;
2106 }
2107
prop_mimpid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2108 static void prop_mimpid_get(Object *obj, Visitor *v, const char *name,
2109 void *opaque, Error **errp)
2110 {
2111 uint64_t value = RISCV_CPU(obj)->cfg.mimpid;
2112
2113 visit_type_uint64(v, name, &value, errp);
2114 }
2115
2116 static const PropertyInfo prop_mimpid = {
2117 .name = "mimpid",
2118 .get = prop_mimpid_get,
2119 .set = prop_mimpid_set,
2120 };
2121
prop_marchid_set(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2122 static void prop_marchid_set(Object *obj, Visitor *v, const char *name,
2123 void *opaque, Error **errp)
2124 {
2125 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2126 RISCVCPU *cpu = RISCV_CPU(obj);
2127 uint64_t prev_val = cpu->cfg.marchid;
2128 uint64_t value, invalid_val;
2129 uint32_t mxlen = 0;
2130
2131 if (!visit_type_uint64(v, name, &value, errp)) {
2132 return;
2133 }
2134
2135 if (!dynamic_cpu && prev_val != value) {
2136 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2137 object_get_typename(obj), prev_val);
2138 return;
2139 }
2140
2141 switch (riscv_cpu_mxl(&cpu->env)) {
2142 case MXL_RV32:
2143 mxlen = 32;
2144 break;
2145 case MXL_RV64:
2146 case MXL_RV128:
2147 mxlen = 64;
2148 break;
2149 default:
2150 g_assert_not_reached();
2151 }
2152
2153 invalid_val = 1LL << (mxlen - 1);
2154
2155 if (value == invalid_val) {
2156 error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2157 "and the remaining bits zero", mxlen);
2158 return;
2159 }
2160
2161 cpu->cfg.marchid = value;
2162 }
2163
prop_marchid_get(Object * obj,Visitor * v,const char * name,void * opaque,Error ** errp)2164 static void prop_marchid_get(Object *obj, Visitor *v, const char *name,
2165 void *opaque, Error **errp)
2166 {
2167 uint64_t value = RISCV_CPU(obj)->cfg.marchid;
2168
2169 visit_type_uint64(v, name, &value, errp);
2170 }
2171
2172 static const PropertyInfo prop_marchid = {
2173 .name = "marchid",
2174 .get = prop_marchid_get,
2175 .set = prop_marchid_set,
2176 };
2177
2178 /*
2179 * RVA22U64 defines some 'named features' that are cache
2180 * related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa
2181 * and Zicclsm. They are always implemented in TCG and
2182 * doesn't need to be manually enabled by the profile.
2183 */
2184 static RISCVCPUProfile RVA22U64 = {
2185 .parent = NULL,
2186 .name = "rva22u64",
2187 .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
2188 .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
2189 .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
2190 .ext_offsets = {
2191 CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause),
2192 CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb),
2193 CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin),
2194 CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr),
2195 CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom),
2196 CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz),
2197
2198 /* mandatory named features for this profile */
2199 CPU_CFG_OFFSET(ext_zic64b),
2200
2201 RISCV_PROFILE_EXT_LIST_END
2202 }
2203 };
2204
2205 /*
2206 * As with RVA22U64, RVA22S64 also defines 'named features'.
2207 *
2208 * Cache related features that we consider enabled since we don't
2209 * implement cache: Ssccptr
2210 *
2211 * Other named features that we already implement: Sstvecd, Sstvala,
2212 * Sscounterenw
2213 *
2214 * The remaining features/extensions comes from RVA22U64.
2215 */
2216 static RISCVCPUProfile RVA22S64 = {
2217 .parent = &RVA22U64,
2218 .name = "rva22s64",
2219 .misa_ext = RVS,
2220 .priv_spec = PRIV_VERSION_1_12_0,
2221 .satp_mode = VM_1_10_SV39,
2222 .ext_offsets = {
2223 /* rva22s64 exts */
2224 CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt),
2225 CPU_CFG_OFFSET(ext_svinval), CPU_CFG_OFFSET(ext_svade),
2226
2227 RISCV_PROFILE_EXT_LIST_END
2228 }
2229 };
2230
2231 RISCVCPUProfile *riscv_profiles[] = {
2232 &RVA22U64,
2233 &RVA22S64,
2234 NULL,
2235 };
2236
2237 static RISCVCPUImpliedExtsRule RVA_IMPLIED = {
2238 .is_misa = true,
2239 .ext = RVA,
2240 .implied_multi_exts = {
2241 CPU_CFG_OFFSET(ext_zalrsc), CPU_CFG_OFFSET(ext_zaamo),
2242
2243 RISCV_IMPLIED_EXTS_RULE_END
2244 },
2245 };
2246
2247 static RISCVCPUImpliedExtsRule RVD_IMPLIED = {
2248 .is_misa = true,
2249 .ext = RVD,
2250 .implied_misa_exts = RVF,
2251 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2252 };
2253
2254 static RISCVCPUImpliedExtsRule RVF_IMPLIED = {
2255 .is_misa = true,
2256 .ext = RVF,
2257 .implied_multi_exts = {
2258 CPU_CFG_OFFSET(ext_zicsr),
2259
2260 RISCV_IMPLIED_EXTS_RULE_END
2261 },
2262 };
2263
2264 static RISCVCPUImpliedExtsRule RVM_IMPLIED = {
2265 .is_misa = true,
2266 .ext = RVM,
2267 .implied_multi_exts = {
2268 CPU_CFG_OFFSET(ext_zmmul),
2269
2270 RISCV_IMPLIED_EXTS_RULE_END
2271 },
2272 };
2273
2274 static RISCVCPUImpliedExtsRule RVV_IMPLIED = {
2275 .is_misa = true,
2276 .ext = RVV,
2277 .implied_multi_exts = {
2278 CPU_CFG_OFFSET(ext_zve64d),
2279
2280 RISCV_IMPLIED_EXTS_RULE_END
2281 },
2282 };
2283
2284 static RISCVCPUImpliedExtsRule ZCB_IMPLIED = {
2285 .ext = CPU_CFG_OFFSET(ext_zcb),
2286 .implied_multi_exts = {
2287 CPU_CFG_OFFSET(ext_zca),
2288
2289 RISCV_IMPLIED_EXTS_RULE_END
2290 },
2291 };
2292
2293 static RISCVCPUImpliedExtsRule ZCD_IMPLIED = {
2294 .ext = CPU_CFG_OFFSET(ext_zcd),
2295 .implied_misa_exts = RVD,
2296 .implied_multi_exts = {
2297 CPU_CFG_OFFSET(ext_zca),
2298
2299 RISCV_IMPLIED_EXTS_RULE_END
2300 },
2301 };
2302
2303 static RISCVCPUImpliedExtsRule ZCE_IMPLIED = {
2304 .ext = CPU_CFG_OFFSET(ext_zce),
2305 .implied_multi_exts = {
2306 CPU_CFG_OFFSET(ext_zcb), CPU_CFG_OFFSET(ext_zcmp),
2307 CPU_CFG_OFFSET(ext_zcmt),
2308
2309 RISCV_IMPLIED_EXTS_RULE_END
2310 },
2311 };
2312
2313 static RISCVCPUImpliedExtsRule ZCF_IMPLIED = {
2314 .ext = CPU_CFG_OFFSET(ext_zcf),
2315 .implied_misa_exts = RVF,
2316 .implied_multi_exts = {
2317 CPU_CFG_OFFSET(ext_zca),
2318
2319 RISCV_IMPLIED_EXTS_RULE_END
2320 },
2321 };
2322
2323 static RISCVCPUImpliedExtsRule ZCMP_IMPLIED = {
2324 .ext = CPU_CFG_OFFSET(ext_zcmp),
2325 .implied_multi_exts = {
2326 CPU_CFG_OFFSET(ext_zca),
2327
2328 RISCV_IMPLIED_EXTS_RULE_END
2329 },
2330 };
2331
2332 static RISCVCPUImpliedExtsRule ZCMT_IMPLIED = {
2333 .ext = CPU_CFG_OFFSET(ext_zcmt),
2334 .implied_multi_exts = {
2335 CPU_CFG_OFFSET(ext_zca), CPU_CFG_OFFSET(ext_zicsr),
2336
2337 RISCV_IMPLIED_EXTS_RULE_END
2338 },
2339 };
2340
2341 static RISCVCPUImpliedExtsRule ZDINX_IMPLIED = {
2342 .ext = CPU_CFG_OFFSET(ext_zdinx),
2343 .implied_multi_exts = {
2344 CPU_CFG_OFFSET(ext_zfinx),
2345
2346 RISCV_IMPLIED_EXTS_RULE_END
2347 },
2348 };
2349
2350 static RISCVCPUImpliedExtsRule ZFA_IMPLIED = {
2351 .ext = CPU_CFG_OFFSET(ext_zfa),
2352 .implied_misa_exts = RVF,
2353 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2354 };
2355
2356 static RISCVCPUImpliedExtsRule ZFBFMIN_IMPLIED = {
2357 .ext = CPU_CFG_OFFSET(ext_zfbfmin),
2358 .implied_misa_exts = RVF,
2359 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2360 };
2361
2362 static RISCVCPUImpliedExtsRule ZFH_IMPLIED = {
2363 .ext = CPU_CFG_OFFSET(ext_zfh),
2364 .implied_multi_exts = {
2365 CPU_CFG_OFFSET(ext_zfhmin),
2366
2367 RISCV_IMPLIED_EXTS_RULE_END
2368 },
2369 };
2370
2371 static RISCVCPUImpliedExtsRule ZFHMIN_IMPLIED = {
2372 .ext = CPU_CFG_OFFSET(ext_zfhmin),
2373 .implied_misa_exts = RVF,
2374 .implied_multi_exts = { RISCV_IMPLIED_EXTS_RULE_END },
2375 };
2376
2377 static RISCVCPUImpliedExtsRule ZFINX_IMPLIED = {
2378 .ext = CPU_CFG_OFFSET(ext_zfinx),
2379 .implied_multi_exts = {
2380 CPU_CFG_OFFSET(ext_zicsr),
2381
2382 RISCV_IMPLIED_EXTS_RULE_END
2383 },
2384 };
2385
2386 static RISCVCPUImpliedExtsRule ZHINX_IMPLIED = {
2387 .ext = CPU_CFG_OFFSET(ext_zhinx),
2388 .implied_multi_exts = {
2389 CPU_CFG_OFFSET(ext_zhinxmin),
2390
2391 RISCV_IMPLIED_EXTS_RULE_END
2392 },
2393 };
2394
2395 static RISCVCPUImpliedExtsRule ZHINXMIN_IMPLIED = {
2396 .ext = CPU_CFG_OFFSET(ext_zhinxmin),
2397 .implied_multi_exts = {
2398 CPU_CFG_OFFSET(ext_zfinx),
2399
2400 RISCV_IMPLIED_EXTS_RULE_END
2401 },
2402 };
2403
2404 static RISCVCPUImpliedExtsRule ZICNTR_IMPLIED = {
2405 .ext = CPU_CFG_OFFSET(ext_zicntr),
2406 .implied_multi_exts = {
2407 CPU_CFG_OFFSET(ext_zicsr),
2408
2409 RISCV_IMPLIED_EXTS_RULE_END
2410 },
2411 };
2412
2413 static RISCVCPUImpliedExtsRule ZIHPM_IMPLIED = {
2414 .ext = CPU_CFG_OFFSET(ext_zihpm),
2415 .implied_multi_exts = {
2416 CPU_CFG_OFFSET(ext_zicsr),
2417
2418 RISCV_IMPLIED_EXTS_RULE_END
2419 },
2420 };
2421
2422 static RISCVCPUImpliedExtsRule ZK_IMPLIED = {
2423 .ext = CPU_CFG_OFFSET(ext_zk),
2424 .implied_multi_exts = {
2425 CPU_CFG_OFFSET(ext_zkn), CPU_CFG_OFFSET(ext_zkr),
2426 CPU_CFG_OFFSET(ext_zkt),
2427
2428 RISCV_IMPLIED_EXTS_RULE_END
2429 },
2430 };
2431
2432 static RISCVCPUImpliedExtsRule ZKN_IMPLIED = {
2433 .ext = CPU_CFG_OFFSET(ext_zkn),
2434 .implied_multi_exts = {
2435 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2436 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zkne),
2437 CPU_CFG_OFFSET(ext_zknd), CPU_CFG_OFFSET(ext_zknh),
2438
2439 RISCV_IMPLIED_EXTS_RULE_END
2440 },
2441 };
2442
2443 static RISCVCPUImpliedExtsRule ZKS_IMPLIED = {
2444 .ext = CPU_CFG_OFFSET(ext_zks),
2445 .implied_multi_exts = {
2446 CPU_CFG_OFFSET(ext_zbkb), CPU_CFG_OFFSET(ext_zbkc),
2447 CPU_CFG_OFFSET(ext_zbkx), CPU_CFG_OFFSET(ext_zksed),
2448 CPU_CFG_OFFSET(ext_zksh),
2449
2450 RISCV_IMPLIED_EXTS_RULE_END
2451 },
2452 };
2453
2454 static RISCVCPUImpliedExtsRule ZVBB_IMPLIED = {
2455 .ext = CPU_CFG_OFFSET(ext_zvbb),
2456 .implied_multi_exts = {
2457 CPU_CFG_OFFSET(ext_zvkb),
2458
2459 RISCV_IMPLIED_EXTS_RULE_END
2460 },
2461 };
2462
2463 static RISCVCPUImpliedExtsRule ZVE32F_IMPLIED = {
2464 .ext = CPU_CFG_OFFSET(ext_zve32f),
2465 .implied_misa_exts = RVF,
2466 .implied_multi_exts = {
2467 CPU_CFG_OFFSET(ext_zve32x),
2468
2469 RISCV_IMPLIED_EXTS_RULE_END
2470 },
2471 };
2472
2473 static RISCVCPUImpliedExtsRule ZVE32X_IMPLIED = {
2474 .ext = CPU_CFG_OFFSET(ext_zve32x),
2475 .implied_multi_exts = {
2476 CPU_CFG_OFFSET(ext_zicsr),
2477
2478 RISCV_IMPLIED_EXTS_RULE_END
2479 },
2480 };
2481
2482 static RISCVCPUImpliedExtsRule ZVE64D_IMPLIED = {
2483 .ext = CPU_CFG_OFFSET(ext_zve64d),
2484 .implied_misa_exts = RVD,
2485 .implied_multi_exts = {
2486 CPU_CFG_OFFSET(ext_zve64f),
2487
2488 RISCV_IMPLIED_EXTS_RULE_END
2489 },
2490 };
2491
2492 static RISCVCPUImpliedExtsRule ZVE64F_IMPLIED = {
2493 .ext = CPU_CFG_OFFSET(ext_zve64f),
2494 .implied_misa_exts = RVF,
2495 .implied_multi_exts = {
2496 CPU_CFG_OFFSET(ext_zve32f), CPU_CFG_OFFSET(ext_zve64x),
2497
2498 RISCV_IMPLIED_EXTS_RULE_END
2499 },
2500 };
2501
2502 static RISCVCPUImpliedExtsRule ZVE64X_IMPLIED = {
2503 .ext = CPU_CFG_OFFSET(ext_zve64x),
2504 .implied_multi_exts = {
2505 CPU_CFG_OFFSET(ext_zve32x),
2506
2507 RISCV_IMPLIED_EXTS_RULE_END
2508 },
2509 };
2510
2511 static RISCVCPUImpliedExtsRule ZVFBFMIN_IMPLIED = {
2512 .ext = CPU_CFG_OFFSET(ext_zvfbfmin),
2513 .implied_multi_exts = {
2514 CPU_CFG_OFFSET(ext_zve32f),
2515
2516 RISCV_IMPLIED_EXTS_RULE_END
2517 },
2518 };
2519
2520 static RISCVCPUImpliedExtsRule ZVFBFWMA_IMPLIED = {
2521 .ext = CPU_CFG_OFFSET(ext_zvfbfwma),
2522 .implied_multi_exts = {
2523 CPU_CFG_OFFSET(ext_zvfbfmin), CPU_CFG_OFFSET(ext_zfbfmin),
2524
2525 RISCV_IMPLIED_EXTS_RULE_END
2526 },
2527 };
2528
2529 static RISCVCPUImpliedExtsRule ZVFH_IMPLIED = {
2530 .ext = CPU_CFG_OFFSET(ext_zvfh),
2531 .implied_multi_exts = {
2532 CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zfhmin),
2533
2534 RISCV_IMPLIED_EXTS_RULE_END
2535 },
2536 };
2537
2538 static RISCVCPUImpliedExtsRule ZVFHMIN_IMPLIED = {
2539 .ext = CPU_CFG_OFFSET(ext_zvfhmin),
2540 .implied_multi_exts = {
2541 CPU_CFG_OFFSET(ext_zve32f),
2542
2543 RISCV_IMPLIED_EXTS_RULE_END
2544 },
2545 };
2546
2547 static RISCVCPUImpliedExtsRule ZVKN_IMPLIED = {
2548 .ext = CPU_CFG_OFFSET(ext_zvkn),
2549 .implied_multi_exts = {
2550 CPU_CFG_OFFSET(ext_zvkned), CPU_CFG_OFFSET(ext_zvknhb),
2551 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2552
2553 RISCV_IMPLIED_EXTS_RULE_END
2554 },
2555 };
2556
2557 static RISCVCPUImpliedExtsRule ZVKNC_IMPLIED = {
2558 .ext = CPU_CFG_OFFSET(ext_zvknc),
2559 .implied_multi_exts = {
2560 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvbc),
2561
2562 RISCV_IMPLIED_EXTS_RULE_END
2563 },
2564 };
2565
2566 static RISCVCPUImpliedExtsRule ZVKNG_IMPLIED = {
2567 .ext = CPU_CFG_OFFSET(ext_zvkng),
2568 .implied_multi_exts = {
2569 CPU_CFG_OFFSET(ext_zvkn), CPU_CFG_OFFSET(ext_zvkg),
2570
2571 RISCV_IMPLIED_EXTS_RULE_END
2572 },
2573 };
2574
2575 static RISCVCPUImpliedExtsRule ZVKNHB_IMPLIED = {
2576 .ext = CPU_CFG_OFFSET(ext_zvknhb),
2577 .implied_multi_exts = {
2578 CPU_CFG_OFFSET(ext_zve64x),
2579
2580 RISCV_IMPLIED_EXTS_RULE_END
2581 },
2582 };
2583
2584 static RISCVCPUImpliedExtsRule ZVKS_IMPLIED = {
2585 .ext = CPU_CFG_OFFSET(ext_zvks),
2586 .implied_multi_exts = {
2587 CPU_CFG_OFFSET(ext_zvksed), CPU_CFG_OFFSET(ext_zvksh),
2588 CPU_CFG_OFFSET(ext_zvkb), CPU_CFG_OFFSET(ext_zvkt),
2589
2590 RISCV_IMPLIED_EXTS_RULE_END
2591 },
2592 };
2593
2594 static RISCVCPUImpliedExtsRule ZVKSC_IMPLIED = {
2595 .ext = CPU_CFG_OFFSET(ext_zvksc),
2596 .implied_multi_exts = {
2597 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvbc),
2598
2599 RISCV_IMPLIED_EXTS_RULE_END
2600 },
2601 };
2602
2603 static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
2604 .ext = CPU_CFG_OFFSET(ext_zvksg),
2605 .implied_multi_exts = {
2606 CPU_CFG_OFFSET(ext_zvks), CPU_CFG_OFFSET(ext_zvkg),
2607
2608 RISCV_IMPLIED_EXTS_RULE_END
2609 },
2610 };
2611
2612 RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
2613 &RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
2614 &RVM_IMPLIED, &RVV_IMPLIED, NULL
2615 };
2616
2617 RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
2618 &ZCB_IMPLIED, &ZCD_IMPLIED, &ZCE_IMPLIED,
2619 &ZCF_IMPLIED, &ZCMP_IMPLIED, &ZCMT_IMPLIED,
2620 &ZDINX_IMPLIED, &ZFA_IMPLIED, &ZFBFMIN_IMPLIED,
2621 &ZFH_IMPLIED, &ZFHMIN_IMPLIED, &ZFINX_IMPLIED,
2622 &ZHINX_IMPLIED, &ZHINXMIN_IMPLIED, &ZICNTR_IMPLIED,
2623 &ZIHPM_IMPLIED, &ZK_IMPLIED, &ZKN_IMPLIED,
2624 &ZKS_IMPLIED, &ZVBB_IMPLIED, &ZVE32F_IMPLIED,
2625 &ZVE32X_IMPLIED, &ZVE64D_IMPLIED, &ZVE64F_IMPLIED,
2626 &ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
2627 &ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
2628 &ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
2629 &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
2630 NULL
2631 };
2632
2633 static Property riscv_cpu_properties[] = {
2634 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
2635
2636 {.name = "pmu-mask", .info = &prop_pmu_mask},
2637 {.name = "pmu-num", .info = &prop_pmu_num}, /* Deprecated */
2638
2639 {.name = "mmu", .info = &prop_mmu},
2640 {.name = "pmp", .info = &prop_pmp},
2641
2642 {.name = "priv_spec", .info = &prop_priv_spec},
2643 {.name = "vext_spec", .info = &prop_vext_spec},
2644
2645 {.name = "vlen", .info = &prop_vlen},
2646 {.name = "elen", .info = &prop_elen},
2647
2648 {.name = "cbom_blocksize", .info = &prop_cbom_blksize},
2649 {.name = "cbop_blocksize", .info = &prop_cbop_blksize},
2650 {.name = "cboz_blocksize", .info = &prop_cboz_blksize},
2651
2652 {.name = "mvendorid", .info = &prop_mvendorid},
2653 {.name = "mimpid", .info = &prop_mimpid},
2654 {.name = "marchid", .info = &prop_marchid},
2655
2656 #ifndef CONFIG_USER_ONLY
2657 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
2658 #endif
2659
2660 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
2661
2662 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
2663 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
2664
2665 /*
2666 * write_misa() is marked as experimental for now so mark
2667 * it with -x and default to 'false'.
2668 */
2669 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
2670 DEFINE_PROP_END_OF_LIST(),
2671 };
2672
2673 #if defined(TARGET_RISCV64)
rva22u64_profile_cpu_init(Object * obj)2674 static void rva22u64_profile_cpu_init(Object *obj)
2675 {
2676 rv64i_bare_cpu_init(obj);
2677
2678 RVA22U64.enabled = true;
2679 }
2680
rva22s64_profile_cpu_init(Object * obj)2681 static void rva22s64_profile_cpu_init(Object *obj)
2682 {
2683 rv64i_bare_cpu_init(obj);
2684
2685 RVA22S64.enabled = true;
2686 }
2687 #endif
2688
riscv_gdb_arch_name(CPUState * cs)2689 static const gchar *riscv_gdb_arch_name(CPUState *cs)
2690 {
2691 RISCVCPU *cpu = RISCV_CPU(cs);
2692 CPURISCVState *env = &cpu->env;
2693
2694 switch (riscv_cpu_mxl(env)) {
2695 case MXL_RV32:
2696 return "riscv:rv32";
2697 case MXL_RV64:
2698 case MXL_RV128:
2699 return "riscv:rv64";
2700 default:
2701 g_assert_not_reached();
2702 }
2703 }
2704
2705 #ifndef CONFIG_USER_ONLY
riscv_get_arch_id(CPUState * cs)2706 static int64_t riscv_get_arch_id(CPUState *cs)
2707 {
2708 RISCVCPU *cpu = RISCV_CPU(cs);
2709
2710 return cpu->env.mhartid;
2711 }
2712
2713 #include "hw/core/sysemu-cpu-ops.h"
2714
2715 static const struct SysemuCPUOps riscv_sysemu_ops = {
2716 .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2717 .write_elf64_note = riscv_cpu_write_elf64_note,
2718 .write_elf32_note = riscv_cpu_write_elf32_note,
2719 .legacy_vmsd = &vmstate_riscv_cpu,
2720 };
2721 #endif
2722
riscv_cpu_common_class_init(ObjectClass * c,void * data)2723 static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
2724 {
2725 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2726 CPUClass *cc = CPU_CLASS(c);
2727 DeviceClass *dc = DEVICE_CLASS(c);
2728 ResettableClass *rc = RESETTABLE_CLASS(c);
2729
2730 device_class_set_parent_realize(dc, riscv_cpu_realize,
2731 &mcc->parent_realize);
2732
2733 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2734 &mcc->parent_phases);
2735
2736 cc->class_by_name = riscv_cpu_class_by_name;
2737 cc->has_work = riscv_cpu_has_work;
2738 cc->mmu_index = riscv_cpu_mmu_index;
2739 cc->dump_state = riscv_cpu_dump_state;
2740 cc->set_pc = riscv_cpu_set_pc;
2741 cc->get_pc = riscv_cpu_get_pc;
2742 cc->gdb_read_register = riscv_cpu_gdb_read_register;
2743 cc->gdb_write_register = riscv_cpu_gdb_write_register;
2744 cc->gdb_stop_before_watchpoint = true;
2745 cc->disas_set_info = riscv_cpu_disas_set_info;
2746 #ifndef CONFIG_USER_ONLY
2747 cc->sysemu_ops = &riscv_sysemu_ops;
2748 cc->get_arch_id = riscv_get_arch_id;
2749 #endif
2750 cc->gdb_arch_name = riscv_gdb_arch_name;
2751
2752 device_class_set_props(dc, riscv_cpu_properties);
2753 }
2754
riscv_cpu_class_init(ObjectClass * c,void * data)2755 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2756 {
2757 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2758
2759 mcc->misa_mxl_max = (uint32_t)(uintptr_t)data;
2760 riscv_cpu_validate_misa_mxl(mcc);
2761 }
2762
riscv_isa_string_ext(RISCVCPU * cpu,char ** isa_str,int max_str_len)2763 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2764 int max_str_len)
2765 {
2766 const RISCVIsaExtData *edata;
2767 char *old = *isa_str;
2768 char *new = *isa_str;
2769
2770 for (edata = isa_edata_arr; edata && edata->name; edata++) {
2771 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2772 new = g_strconcat(old, "_", edata->name, NULL);
2773 g_free(old);
2774 old = new;
2775 }
2776 }
2777
2778 *isa_str = new;
2779 }
2780
riscv_isa_string(RISCVCPU * cpu)2781 char *riscv_isa_string(RISCVCPU *cpu)
2782 {
2783 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2784 int i;
2785 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2786 char *isa_str = g_new(char, maxlen);
2787 int xlen = riscv_cpu_max_xlen(mcc);
2788 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", xlen);
2789
2790 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2791 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2792 *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2793 }
2794 }
2795 *p = '\0';
2796 if (!cpu->cfg.short_isa_string) {
2797 riscv_isa_string_ext(cpu, &isa_str, maxlen);
2798 }
2799 return isa_str;
2800 }
2801
2802 #ifndef CONFIG_USER_ONLY
riscv_isa_extensions_list(RISCVCPU * cpu,int * count)2803 static char **riscv_isa_extensions_list(RISCVCPU *cpu, int *count)
2804 {
2805 int maxlen = ARRAY_SIZE(riscv_single_letter_exts) + ARRAY_SIZE(isa_edata_arr);
2806 char **extensions = g_new(char *, maxlen);
2807
2808 for (int i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2809 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2810 extensions[*count] = g_new(char, 2);
2811 snprintf(extensions[*count], 2, "%c",
2812 qemu_tolower(riscv_single_letter_exts[i]));
2813 (*count)++;
2814 }
2815 }
2816
2817 for (const RISCVIsaExtData *edata = isa_edata_arr; edata->name; edata++) {
2818 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset)) {
2819 extensions[*count] = g_strdup(edata->name);
2820 (*count)++;
2821 }
2822 }
2823
2824 return extensions;
2825 }
2826
riscv_isa_write_fdt(RISCVCPU * cpu,void * fdt,char * nodename)2827 void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename)
2828 {
2829 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
2830 const size_t maxlen = sizeof("rv128i");
2831 g_autofree char *isa_base = g_new(char, maxlen);
2832 g_autofree char *riscv_isa;
2833 char **isa_extensions;
2834 int count = 0;
2835 int xlen = riscv_cpu_max_xlen(mcc);
2836
2837 riscv_isa = riscv_isa_string(cpu);
2838 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", riscv_isa);
2839
2840 snprintf(isa_base, maxlen, "rv%di", xlen);
2841 qemu_fdt_setprop_string(fdt, nodename, "riscv,isa-base", isa_base);
2842
2843 isa_extensions = riscv_isa_extensions_list(cpu, &count);
2844 qemu_fdt_setprop_string_array(fdt, nodename, "riscv,isa-extensions",
2845 isa_extensions, count);
2846
2847 for (int i = 0; i < count; i++) {
2848 g_free(isa_extensions[i]);
2849 }
2850
2851 g_free(isa_extensions);
2852 }
2853 #endif
2854
2855 #define DEFINE_CPU(type_name, misa_mxl_max, initfn) \
2856 { \
2857 .name = (type_name), \
2858 .parent = TYPE_RISCV_CPU, \
2859 .instance_init = (initfn), \
2860 .class_init = riscv_cpu_class_init, \
2861 .class_data = (void *)(misa_mxl_max) \
2862 }
2863
2864 #define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \
2865 { \
2866 .name = (type_name), \
2867 .parent = TYPE_RISCV_DYNAMIC_CPU, \
2868 .instance_init = (initfn), \
2869 .class_init = riscv_cpu_class_init, \
2870 .class_data = (void *)(misa_mxl_max) \
2871 }
2872
2873 #define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \
2874 { \
2875 .name = (type_name), \
2876 .parent = TYPE_RISCV_VENDOR_CPU, \
2877 .instance_init = (initfn), \
2878 .class_init = riscv_cpu_class_init, \
2879 .class_data = (void *)(misa_mxl_max) \
2880 }
2881
2882 #define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \
2883 { \
2884 .name = (type_name), \
2885 .parent = TYPE_RISCV_BARE_CPU, \
2886 .instance_init = (initfn), \
2887 .class_init = riscv_cpu_class_init, \
2888 .class_data = (void *)(misa_mxl_max) \
2889 }
2890
2891 #define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \
2892 { \
2893 .name = (type_name), \
2894 .parent = TYPE_RISCV_BARE_CPU, \
2895 .instance_init = (initfn), \
2896 .class_init = riscv_cpu_class_init, \
2897 .class_data = (void *)(misa_mxl_max) \
2898 }
2899
2900 static const TypeInfo riscv_cpu_type_infos[] = {
2901 {
2902 .name = TYPE_RISCV_CPU,
2903 .parent = TYPE_CPU,
2904 .instance_size = sizeof(RISCVCPU),
2905 .instance_align = __alignof(RISCVCPU),
2906 .instance_init = riscv_cpu_init,
2907 .instance_post_init = riscv_cpu_post_init,
2908 .abstract = true,
2909 .class_size = sizeof(RISCVCPUClass),
2910 .class_init = riscv_cpu_common_class_init,
2911 },
2912 {
2913 .name = TYPE_RISCV_DYNAMIC_CPU,
2914 .parent = TYPE_RISCV_CPU,
2915 .abstract = true,
2916 },
2917 {
2918 .name = TYPE_RISCV_VENDOR_CPU,
2919 .parent = TYPE_RISCV_CPU,
2920 .abstract = true,
2921 },
2922 {
2923 .name = TYPE_RISCV_BARE_CPU,
2924 .parent = TYPE_RISCV_CPU,
2925 .instance_init = riscv_bare_cpu_init,
2926 .abstract = true,
2927 },
2928 #if defined(TARGET_RISCV32)
2929 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init),
2930 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init),
2931 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init),
2932 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init),
2933 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init),
2934 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init),
2935 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init),
2936 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init),
2937 #elif defined(TARGET_RISCV64)
2938 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init),
2939 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init),
2940 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init),
2941 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init),
2942 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init),
2943 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init),
2944 DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init),
2945 #ifdef CONFIG_TCG
2946 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init),
2947 #endif /* CONFIG_TCG */
2948 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init),
2949 DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init),
2950 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init),
2951 DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init),
2952 #endif /* TARGET_RISCV64 */
2953 };
2954
2955 DEFINE_TYPES(riscv_cpu_type_infos)
2956