1 /* 2 * QEMU RISC-V CPU 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/qemu-print.h" 22 #include "qemu/ctype.h" 23 #include "qemu/log.h" 24 #include "cpu.h" 25 #include "cpu_vendorid.h" 26 #include "pmu.h" 27 #include "internals.h" 28 #include "time_helper.h" 29 #include "exec/exec-all.h" 30 #include "qapi/error.h" 31 #include "qapi/visitor.h" 32 #include "qemu/error-report.h" 33 #include "hw/qdev-properties.h" 34 #include "migration/vmstate.h" 35 #include "fpu/softfloat-helpers.h" 36 #include "sysemu/kvm.h" 37 #include "sysemu/tcg.h" 38 #include "kvm_riscv.h" 39 #include "tcg/tcg.h" 40 41 /* RISC-V CPU definitions */ 42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH"; 43 44 struct isa_ext_data { 45 const char *name; 46 int min_version; 47 int ext_enable_offset; 48 }; 49 50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ 51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)} 52 53 /* 54 * From vector_helper.c 55 * Note that vector data is stored in host-endian 64-bit chunks, 56 * so addressing bytes needs a host-endian fixup. 57 */ 58 #if HOST_BIG_ENDIAN 59 #define BYTE(x) ((x) ^ 7) 60 #else 61 #define BYTE(x) (x) 62 #endif 63 64 /* 65 * Here are the ordering rules of extension naming defined by RISC-V 66 * specification : 67 * 1. All extensions should be separated from other multi-letter extensions 68 * by an underscore. 69 * 2. The first letter following the 'Z' conventionally indicates the most 70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH. 71 * If multiple 'Z' extensions are named, they should be ordered first 72 * by category, then alphabetically within a category. 73 * 3. Standard supervisor-level extensions (starts with 'S') should be 74 * listed after standard unprivileged extensions. If multiple 75 * supervisor-level extensions are listed, they should be ordered 76 * alphabetically. 77 * 4. Non-standard extensions (starts with 'X') must be listed after all 78 * standard extensions. They must be separated from other multi-letter 79 * extensions by an underscore. 80 * 81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv() 82 * instead. 83 */ 84 static const struct isa_ext_data isa_edata_arr[] = { 85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom), 86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz), 87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), 88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr), 89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei), 90 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), 91 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), 92 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), 93 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), 94 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), 95 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh), 96 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin), 97 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx), 98 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx), 99 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca), 100 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb), 101 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), 102 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), 103 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), 104 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), 105 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), 106 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), 107 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb), 108 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc), 109 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb), 110 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc), 111 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx), 112 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs), 113 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk), 114 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn), 115 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd), 116 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne), 117 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh), 118 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr), 119 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks), 120 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed), 121 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh), 122 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt), 123 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb), 124 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc), 125 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f), 126 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f), 127 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d), 128 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin), 129 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma), 130 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh), 131 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin), 132 ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg), 133 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned), 134 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha), 135 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb), 136 ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh), 137 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), 138 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), 139 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), 140 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp), 141 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), 142 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), 143 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf), 144 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), 145 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu), 146 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), 147 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), 148 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), 149 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), 150 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb), 151 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs), 152 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo), 153 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov), 154 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx), 155 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv), 156 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac), 157 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx), 158 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair), 159 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync), 160 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps), 161 }; 162 163 static bool isa_ext_is_enabled(RISCVCPU *cpu, 164 const struct isa_ext_data *edata) 165 { 166 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 167 168 return *ext_enabled; 169 } 170 171 static void isa_ext_update_enabled(RISCVCPU *cpu, 172 const struct isa_ext_data *edata, bool en) 173 { 174 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset; 175 176 *ext_enabled = en; 177 } 178 179 const char * const riscv_int_regnames[] = { 180 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1", 181 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3", 182 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4", 183 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11", 184 "x28/t3", "x29/t4", "x30/t5", "x31/t6" 185 }; 186 187 const char * const riscv_int_regnamesh[] = { 188 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h", 189 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h", 190 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h", 191 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h", 192 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h", 193 "x30h/t5h", "x31h/t6h" 194 }; 195 196 const char * const riscv_fpr_regnames[] = { 197 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5", 198 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1", 199 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7", 200 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7", 201 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9", 202 "f30/ft10", "f31/ft11" 203 }; 204 205 const char * const riscv_rvv_regnames[] = { 206 "v0", "v1", "v2", "v3", "v4", "v5", "v6", 207 "v7", "v8", "v9", "v10", "v11", "v12", "v13", 208 "v14", "v15", "v16", "v17", "v18", "v19", "v20", 209 "v21", "v22", "v23", "v24", "v25", "v26", "v27", 210 "v28", "v29", "v30", "v31" 211 }; 212 213 static const char * const riscv_excp_names[] = { 214 "misaligned_fetch", 215 "fault_fetch", 216 "illegal_instruction", 217 "breakpoint", 218 "misaligned_load", 219 "fault_load", 220 "misaligned_store", 221 "fault_store", 222 "user_ecall", 223 "supervisor_ecall", 224 "hypervisor_ecall", 225 "machine_ecall", 226 "exec_page_fault", 227 "load_page_fault", 228 "reserved", 229 "store_page_fault", 230 "reserved", 231 "reserved", 232 "reserved", 233 "reserved", 234 "guest_exec_page_fault", 235 "guest_load_page_fault", 236 "reserved", 237 "guest_store_page_fault", 238 }; 239 240 static const char * const riscv_intr_names[] = { 241 "u_software", 242 "s_software", 243 "vs_software", 244 "m_software", 245 "u_timer", 246 "s_timer", 247 "vs_timer", 248 "m_timer", 249 "u_external", 250 "s_external", 251 "vs_external", 252 "m_external", 253 "reserved", 254 "reserved", 255 "reserved", 256 "reserved" 257 }; 258 259 static void riscv_cpu_add_user_properties(Object *obj); 260 261 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) 262 { 263 if (async) { 264 return (cause < ARRAY_SIZE(riscv_intr_names)) ? 265 riscv_intr_names[cause] : "(unknown)"; 266 } else { 267 return (cause < ARRAY_SIZE(riscv_excp_names)) ? 268 riscv_excp_names[cause] : "(unknown)"; 269 } 270 } 271 272 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) 273 { 274 env->misa_mxl_max = env->misa_mxl = mxl; 275 env->misa_ext_mask = env->misa_ext = ext; 276 } 277 278 #ifndef CONFIG_USER_ONLY 279 static uint8_t satp_mode_from_str(const char *satp_mode_str) 280 { 281 if (!strncmp(satp_mode_str, "mbare", 5)) { 282 return VM_1_10_MBARE; 283 } 284 285 if (!strncmp(satp_mode_str, "sv32", 4)) { 286 return VM_1_10_SV32; 287 } 288 289 if (!strncmp(satp_mode_str, "sv39", 4)) { 290 return VM_1_10_SV39; 291 } 292 293 if (!strncmp(satp_mode_str, "sv48", 4)) { 294 return VM_1_10_SV48; 295 } 296 297 if (!strncmp(satp_mode_str, "sv57", 4)) { 298 return VM_1_10_SV57; 299 } 300 301 if (!strncmp(satp_mode_str, "sv64", 4)) { 302 return VM_1_10_SV64; 303 } 304 305 g_assert_not_reached(); 306 } 307 308 uint8_t satp_mode_max_from_map(uint32_t map) 309 { 310 /* map here has at least one bit set, so no problem with clz */ 311 return 31 - __builtin_clz(map); 312 } 313 314 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) 315 { 316 if (is_32_bit) { 317 switch (satp_mode) { 318 case VM_1_10_SV32: 319 return "sv32"; 320 case VM_1_10_MBARE: 321 return "none"; 322 } 323 } else { 324 switch (satp_mode) { 325 case VM_1_10_SV64: 326 return "sv64"; 327 case VM_1_10_SV57: 328 return "sv57"; 329 case VM_1_10_SV48: 330 return "sv48"; 331 case VM_1_10_SV39: 332 return "sv39"; 333 case VM_1_10_MBARE: 334 return "none"; 335 } 336 } 337 338 g_assert_not_reached(); 339 } 340 341 static void set_satp_mode_max_supported(RISCVCPU *cpu, 342 uint8_t satp_mode) 343 { 344 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 345 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; 346 347 for (int i = 0; i <= satp_mode; ++i) { 348 if (valid_vm[i]) { 349 cpu->cfg.satp_mode.supported |= (1 << i); 350 } 351 } 352 } 353 354 /* Set the satp mode to the max supported */ 355 static void set_satp_mode_default_map(RISCVCPU *cpu) 356 { 357 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; 358 } 359 #endif 360 361 static void riscv_any_cpu_init(Object *obj) 362 { 363 RISCVCPU *cpu = RISCV_CPU(obj); 364 CPURISCVState *env = &cpu->env; 365 #if defined(TARGET_RISCV32) 366 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 367 #elif defined(TARGET_RISCV64) 368 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); 369 #endif 370 371 #ifndef CONFIG_USER_ONLY 372 set_satp_mode_max_supported(RISCV_CPU(obj), 373 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? 374 VM_1_10_SV32 : VM_1_10_SV57); 375 #endif 376 377 env->priv_ver = PRIV_VERSION_LATEST; 378 379 /* inherited from parent obj via riscv_cpu_init() */ 380 cpu->cfg.ext_ifencei = true; 381 cpu->cfg.ext_icsr = true; 382 cpu->cfg.mmu = true; 383 cpu->cfg.pmp = true; 384 } 385 386 #if defined(TARGET_RISCV64) 387 static void rv64_base_cpu_init(Object *obj) 388 { 389 CPURISCVState *env = &RISCV_CPU(obj)->env; 390 /* We set this in the realise function */ 391 set_misa(env, MXL_RV64, 0); 392 riscv_cpu_add_user_properties(obj); 393 /* Set latest version of privileged specification */ 394 env->priv_ver = PRIV_VERSION_LATEST; 395 #ifndef CONFIG_USER_ONLY 396 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 397 #endif 398 } 399 400 static void rv64_sifive_u_cpu_init(Object *obj) 401 { 402 RISCVCPU *cpu = RISCV_CPU(obj); 403 CPURISCVState *env = &cpu->env; 404 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 405 env->priv_ver = PRIV_VERSION_1_10_0; 406 #ifndef CONFIG_USER_ONLY 407 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); 408 #endif 409 410 /* inherited from parent obj via riscv_cpu_init() */ 411 cpu->cfg.ext_ifencei = true; 412 cpu->cfg.ext_icsr = true; 413 cpu->cfg.mmu = true; 414 cpu->cfg.pmp = true; 415 } 416 417 static void rv64_sifive_e_cpu_init(Object *obj) 418 { 419 CPURISCVState *env = &RISCV_CPU(obj)->env; 420 RISCVCPU *cpu = RISCV_CPU(obj); 421 422 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); 423 env->priv_ver = PRIV_VERSION_1_10_0; 424 #ifndef CONFIG_USER_ONLY 425 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 426 #endif 427 428 /* inherited from parent obj via riscv_cpu_init() */ 429 cpu->cfg.ext_ifencei = true; 430 cpu->cfg.ext_icsr = true; 431 cpu->cfg.pmp = true; 432 } 433 434 static void rv64_thead_c906_cpu_init(Object *obj) 435 { 436 CPURISCVState *env = &RISCV_CPU(obj)->env; 437 RISCVCPU *cpu = RISCV_CPU(obj); 438 439 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU); 440 env->priv_ver = PRIV_VERSION_1_11_0; 441 442 cpu->cfg.ext_zfa = true; 443 cpu->cfg.ext_zfh = true; 444 cpu->cfg.mmu = true; 445 cpu->cfg.ext_xtheadba = true; 446 cpu->cfg.ext_xtheadbb = true; 447 cpu->cfg.ext_xtheadbs = true; 448 cpu->cfg.ext_xtheadcmo = true; 449 cpu->cfg.ext_xtheadcondmov = true; 450 cpu->cfg.ext_xtheadfmemidx = true; 451 cpu->cfg.ext_xtheadmac = true; 452 cpu->cfg.ext_xtheadmemidx = true; 453 cpu->cfg.ext_xtheadmempair = true; 454 cpu->cfg.ext_xtheadsync = true; 455 456 cpu->cfg.mvendorid = THEAD_VENDOR_ID; 457 #ifndef CONFIG_USER_ONLY 458 set_satp_mode_max_supported(cpu, VM_1_10_SV39); 459 #endif 460 461 /* inherited from parent obj via riscv_cpu_init() */ 462 cpu->cfg.pmp = true; 463 } 464 465 static void rv64_veyron_v1_cpu_init(Object *obj) 466 { 467 CPURISCVState *env = &RISCV_CPU(obj)->env; 468 RISCVCPU *cpu = RISCV_CPU(obj); 469 470 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH); 471 env->priv_ver = PRIV_VERSION_1_12_0; 472 473 /* Enable ISA extensions */ 474 cpu->cfg.mmu = true; 475 cpu->cfg.ext_ifencei = true; 476 cpu->cfg.ext_icsr = true; 477 cpu->cfg.pmp = true; 478 cpu->cfg.ext_icbom = true; 479 cpu->cfg.cbom_blocksize = 64; 480 cpu->cfg.cboz_blocksize = 64; 481 cpu->cfg.ext_icboz = true; 482 cpu->cfg.ext_smaia = true; 483 cpu->cfg.ext_ssaia = true; 484 cpu->cfg.ext_sscofpmf = true; 485 cpu->cfg.ext_sstc = true; 486 cpu->cfg.ext_svinval = true; 487 cpu->cfg.ext_svnapot = true; 488 cpu->cfg.ext_svpbmt = true; 489 cpu->cfg.ext_smstateen = true; 490 cpu->cfg.ext_zba = true; 491 cpu->cfg.ext_zbb = true; 492 cpu->cfg.ext_zbc = true; 493 cpu->cfg.ext_zbs = true; 494 cpu->cfg.ext_XVentanaCondOps = true; 495 496 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; 497 cpu->cfg.marchid = VEYRON_V1_MARCHID; 498 cpu->cfg.mimpid = VEYRON_V1_MIMPID; 499 500 #ifndef CONFIG_USER_ONLY 501 set_satp_mode_max_supported(cpu, VM_1_10_SV48); 502 #endif 503 } 504 505 static void rv128_base_cpu_init(Object *obj) 506 { 507 if (qemu_tcg_mttcg_enabled()) { 508 /* Missing 128-bit aligned atomics */ 509 error_report("128-bit RISC-V currently does not work with Multi " 510 "Threaded TCG. Please use: -accel tcg,thread=single"); 511 exit(EXIT_FAILURE); 512 } 513 CPURISCVState *env = &RISCV_CPU(obj)->env; 514 /* We set this in the realise function */ 515 set_misa(env, MXL_RV128, 0); 516 riscv_cpu_add_user_properties(obj); 517 /* Set latest version of privileged specification */ 518 env->priv_ver = PRIV_VERSION_LATEST; 519 #ifndef CONFIG_USER_ONLY 520 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); 521 #endif 522 } 523 #else 524 static void rv32_base_cpu_init(Object *obj) 525 { 526 CPURISCVState *env = &RISCV_CPU(obj)->env; 527 /* We set this in the realise function */ 528 set_misa(env, MXL_RV32, 0); 529 riscv_cpu_add_user_properties(obj); 530 /* Set latest version of privileged specification */ 531 env->priv_ver = PRIV_VERSION_LATEST; 532 #ifndef CONFIG_USER_ONLY 533 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 534 #endif 535 } 536 537 static void rv32_sifive_u_cpu_init(Object *obj) 538 { 539 RISCVCPU *cpu = RISCV_CPU(obj); 540 CPURISCVState *env = &cpu->env; 541 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); 542 env->priv_ver = PRIV_VERSION_1_10_0; 543 #ifndef CONFIG_USER_ONLY 544 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); 545 #endif 546 547 /* inherited from parent obj via riscv_cpu_init() */ 548 cpu->cfg.ext_ifencei = true; 549 cpu->cfg.ext_icsr = true; 550 cpu->cfg.mmu = true; 551 cpu->cfg.pmp = true; 552 } 553 554 static void rv32_sifive_e_cpu_init(Object *obj) 555 { 556 CPURISCVState *env = &RISCV_CPU(obj)->env; 557 RISCVCPU *cpu = RISCV_CPU(obj); 558 559 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); 560 env->priv_ver = PRIV_VERSION_1_10_0; 561 #ifndef CONFIG_USER_ONLY 562 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 563 #endif 564 565 /* inherited from parent obj via riscv_cpu_init() */ 566 cpu->cfg.ext_ifencei = true; 567 cpu->cfg.ext_icsr = true; 568 cpu->cfg.pmp = true; 569 } 570 571 static void rv32_ibex_cpu_init(Object *obj) 572 { 573 CPURISCVState *env = &RISCV_CPU(obj)->env; 574 RISCVCPU *cpu = RISCV_CPU(obj); 575 576 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); 577 env->priv_ver = PRIV_VERSION_1_11_0; 578 #ifndef CONFIG_USER_ONLY 579 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 580 #endif 581 cpu->cfg.epmp = true; 582 583 /* inherited from parent obj via riscv_cpu_init() */ 584 cpu->cfg.ext_ifencei = true; 585 cpu->cfg.ext_icsr = true; 586 cpu->cfg.pmp = true; 587 } 588 589 static void rv32_imafcu_nommu_cpu_init(Object *obj) 590 { 591 CPURISCVState *env = &RISCV_CPU(obj)->env; 592 RISCVCPU *cpu = RISCV_CPU(obj); 593 594 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); 595 env->priv_ver = PRIV_VERSION_1_10_0; 596 #ifndef CONFIG_USER_ONLY 597 set_satp_mode_max_supported(cpu, VM_1_10_MBARE); 598 #endif 599 600 /* inherited from parent obj via riscv_cpu_init() */ 601 cpu->cfg.ext_ifencei = true; 602 cpu->cfg.ext_icsr = true; 603 cpu->cfg.pmp = true; 604 } 605 #endif 606 607 #if defined(CONFIG_KVM) 608 static void riscv_host_cpu_init(Object *obj) 609 { 610 CPURISCVState *env = &RISCV_CPU(obj)->env; 611 #if defined(TARGET_RISCV32) 612 set_misa(env, MXL_RV32, 0); 613 #elif defined(TARGET_RISCV64) 614 set_misa(env, MXL_RV64, 0); 615 #endif 616 riscv_cpu_add_user_properties(obj); 617 } 618 #endif /* CONFIG_KVM */ 619 620 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) 621 { 622 ObjectClass *oc; 623 char *typename; 624 char **cpuname; 625 626 cpuname = g_strsplit(cpu_model, ",", 1); 627 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]); 628 oc = object_class_by_name(typename); 629 g_strfreev(cpuname); 630 g_free(typename); 631 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) || 632 object_class_is_abstract(oc)) { 633 return NULL; 634 } 635 return oc; 636 } 637 638 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) 639 { 640 RISCVCPU *cpu = RISCV_CPU(cs); 641 CPURISCVState *env = &cpu->env; 642 int i, j; 643 uint8_t *p; 644 645 #if !defined(CONFIG_USER_ONLY) 646 if (riscv_has_ext(env, RVH)) { 647 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled); 648 } 649 #endif 650 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); 651 #ifndef CONFIG_USER_ONLY 652 { 653 static const int dump_csrs[] = { 654 CSR_MHARTID, 655 CSR_MSTATUS, 656 CSR_MSTATUSH, 657 /* 658 * CSR_SSTATUS is intentionally omitted here as its value 659 * can be figured out by looking at CSR_MSTATUS 660 */ 661 CSR_HSTATUS, 662 CSR_VSSTATUS, 663 CSR_MIP, 664 CSR_MIE, 665 CSR_MIDELEG, 666 CSR_HIDELEG, 667 CSR_MEDELEG, 668 CSR_HEDELEG, 669 CSR_MTVEC, 670 CSR_STVEC, 671 CSR_VSTVEC, 672 CSR_MEPC, 673 CSR_SEPC, 674 CSR_VSEPC, 675 CSR_MCAUSE, 676 CSR_SCAUSE, 677 CSR_VSCAUSE, 678 CSR_MTVAL, 679 CSR_STVAL, 680 CSR_HTVAL, 681 CSR_MTVAL2, 682 CSR_MSCRATCH, 683 CSR_SSCRATCH, 684 CSR_SATP, 685 CSR_MMTE, 686 CSR_UPMBASE, 687 CSR_UPMMASK, 688 CSR_SPMBASE, 689 CSR_SPMMASK, 690 CSR_MPMBASE, 691 CSR_MPMMASK, 692 }; 693 694 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { 695 int csrno = dump_csrs[i]; 696 target_ulong val = 0; 697 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 698 699 /* 700 * Rely on the smode, hmode, etc, predicates within csr.c 701 * to do the filtering of the registers that are present. 702 */ 703 if (res == RISCV_EXCP_NONE) { 704 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 705 csr_ops[csrno].name, val); 706 } 707 } 708 } 709 #endif 710 711 for (i = 0; i < 32; i++) { 712 qemu_fprintf(f, " %-8s " TARGET_FMT_lx, 713 riscv_int_regnames[i], env->gpr[i]); 714 if ((i & 3) == 3) { 715 qemu_fprintf(f, "\n"); 716 } 717 } 718 if (flags & CPU_DUMP_FPU) { 719 for (i = 0; i < 32; i++) { 720 qemu_fprintf(f, " %-8s %016" PRIx64, 721 riscv_fpr_regnames[i], env->fpr[i]); 722 if ((i & 3) == 3) { 723 qemu_fprintf(f, "\n"); 724 } 725 } 726 } 727 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { 728 static const int dump_rvv_csrs[] = { 729 CSR_VSTART, 730 CSR_VXSAT, 731 CSR_VXRM, 732 CSR_VCSR, 733 CSR_VL, 734 CSR_VTYPE, 735 CSR_VLENB, 736 }; 737 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) { 738 int csrno = dump_rvv_csrs[i]; 739 target_ulong val = 0; 740 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); 741 742 /* 743 * Rely on the smode, hmode, etc, predicates within csr.c 744 * to do the filtering of the registers that are present. 745 */ 746 if (res == RISCV_EXCP_NONE) { 747 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", 748 csr_ops[csrno].name, val); 749 } 750 } 751 uint16_t vlenb = cpu->cfg.vlen >> 3; 752 753 for (i = 0; i < 32; i++) { 754 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]); 755 p = (uint8_t *)env->vreg; 756 for (j = vlenb - 1 ; j >= 0; j--) { 757 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j))); 758 } 759 qemu_fprintf(f, "\n"); 760 } 761 } 762 } 763 764 static void riscv_cpu_set_pc(CPUState *cs, vaddr value) 765 { 766 RISCVCPU *cpu = RISCV_CPU(cs); 767 CPURISCVState *env = &cpu->env; 768 769 if (env->xl == MXL_RV32) { 770 env->pc = (int32_t)value; 771 } else { 772 env->pc = value; 773 } 774 } 775 776 static vaddr riscv_cpu_get_pc(CPUState *cs) 777 { 778 RISCVCPU *cpu = RISCV_CPU(cs); 779 CPURISCVState *env = &cpu->env; 780 781 /* Match cpu_get_tb_cpu_state. */ 782 if (env->xl == MXL_RV32) { 783 return env->pc & UINT32_MAX; 784 } 785 return env->pc; 786 } 787 788 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 789 const TranslationBlock *tb) 790 { 791 if (!(tb_cflags(tb) & CF_PCREL)) { 792 RISCVCPU *cpu = RISCV_CPU(cs); 793 CPURISCVState *env = &cpu->env; 794 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 795 796 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 797 798 if (xl == MXL_RV32) { 799 env->pc = (int32_t) tb->pc; 800 } else { 801 env->pc = tb->pc; 802 } 803 } 804 } 805 806 static bool riscv_cpu_has_work(CPUState *cs) 807 { 808 #ifndef CONFIG_USER_ONLY 809 RISCVCPU *cpu = RISCV_CPU(cs); 810 CPURISCVState *env = &cpu->env; 811 /* 812 * Definition of the WFI instruction requires it to ignore the privilege 813 * mode and delegation registers, but respect individual enables 814 */ 815 return riscv_cpu_all_pending(env) != 0; 816 #else 817 return true; 818 #endif 819 } 820 821 static void riscv_restore_state_to_opc(CPUState *cs, 822 const TranslationBlock *tb, 823 const uint64_t *data) 824 { 825 RISCVCPU *cpu = RISCV_CPU(cs); 826 CPURISCVState *env = &cpu->env; 827 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 828 target_ulong pc; 829 830 if (tb_cflags(tb) & CF_PCREL) { 831 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 832 } else { 833 pc = data[0]; 834 } 835 836 if (xl == MXL_RV32) { 837 env->pc = (int32_t)pc; 838 } else { 839 env->pc = pc; 840 } 841 env->bins = data[1]; 842 } 843 844 static void riscv_cpu_reset_hold(Object *obj) 845 { 846 #ifndef CONFIG_USER_ONLY 847 uint8_t iprio; 848 int i, irq, rdzero; 849 #endif 850 CPUState *cs = CPU(obj); 851 RISCVCPU *cpu = RISCV_CPU(cs); 852 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 853 CPURISCVState *env = &cpu->env; 854 855 if (mcc->parent_phases.hold) { 856 mcc->parent_phases.hold(obj); 857 } 858 #ifndef CONFIG_USER_ONLY 859 env->misa_mxl = env->misa_mxl_max; 860 env->priv = PRV_M; 861 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); 862 if (env->misa_mxl > MXL_RV32) { 863 /* 864 * The reset status of SXL/UXL is undefined, but mstatus is WARL 865 * and we must ensure that the value after init is valid for read. 866 */ 867 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); 868 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); 869 if (riscv_has_ext(env, RVH)) { 870 env->vsstatus = set_field(env->vsstatus, 871 MSTATUS64_SXL, env->misa_mxl); 872 env->vsstatus = set_field(env->vsstatus, 873 MSTATUS64_UXL, env->misa_mxl); 874 env->mstatus_hs = set_field(env->mstatus_hs, 875 MSTATUS64_SXL, env->misa_mxl); 876 env->mstatus_hs = set_field(env->mstatus_hs, 877 MSTATUS64_UXL, env->misa_mxl); 878 } 879 } 880 env->mcause = 0; 881 env->miclaim = MIP_SGEIP; 882 env->pc = env->resetvec; 883 env->bins = 0; 884 env->two_stage_lookup = false; 885 886 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | 887 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); 888 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | 889 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); 890 891 /* Initialized default priorities of local interrupts. */ 892 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { 893 iprio = riscv_cpu_default_priority(i); 894 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; 895 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; 896 env->hviprio[i] = 0; 897 } 898 i = 0; 899 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { 900 if (!rdzero) { 901 env->hviprio[irq] = env->miprio[irq]; 902 } 903 i++; 904 } 905 /* mmte is supposed to have pm.current hardwired to 1 */ 906 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); 907 #endif 908 env->xl = riscv_cpu_mxl(env); 909 riscv_cpu_update_mask(env); 910 cs->exception_index = RISCV_EXCP_NONE; 911 env->load_res = -1; 912 set_default_nan_mode(1, &env->fp_status); 913 914 #ifndef CONFIG_USER_ONLY 915 if (cpu->cfg.debug) { 916 riscv_trigger_init(env); 917 } 918 919 if (kvm_enabled()) { 920 kvm_riscv_reset_vcpu(cpu); 921 } 922 #endif 923 } 924 925 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) 926 { 927 RISCVCPU *cpu = RISCV_CPU(s); 928 CPURISCVState *env = &cpu->env; 929 info->target_info = &cpu->cfg; 930 931 switch (env->xl) { 932 case MXL_RV32: 933 info->print_insn = print_insn_riscv32; 934 break; 935 case MXL_RV64: 936 info->print_insn = print_insn_riscv64; 937 break; 938 case MXL_RV128: 939 info->print_insn = print_insn_riscv128; 940 break; 941 default: 942 g_assert_not_reached(); 943 } 944 } 945 946 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 947 Error **errp) 948 { 949 int vext_version = VEXT_VERSION_1_00_0; 950 951 if (!is_power_of_2(cfg->vlen)) { 952 error_setg(errp, "Vector extension VLEN must be power of 2"); 953 return; 954 } 955 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 956 error_setg(errp, 957 "Vector extension implementation only supports VLEN " 958 "in the range [128, %d]", RV_VLEN_MAX); 959 return; 960 } 961 if (!is_power_of_2(cfg->elen)) { 962 error_setg(errp, "Vector extension ELEN must be power of 2"); 963 return; 964 } 965 if (cfg->elen > 64 || cfg->elen < 8) { 966 error_setg(errp, 967 "Vector extension implementation only supports ELEN " 968 "in the range [8, 64]"); 969 return; 970 } 971 if (cfg->vext_spec) { 972 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 973 vext_version = VEXT_VERSION_1_00_0; 974 } else { 975 error_setg(errp, "Unsupported vector spec version '%s'", 976 cfg->vext_spec); 977 return; 978 } 979 } else { 980 qemu_log("vector version is not specified, " 981 "use the default value v1.0\n"); 982 } 983 env->vext_ver = vext_version; 984 } 985 986 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 987 { 988 CPURISCVState *env = &cpu->env; 989 int priv_version = -1; 990 991 if (cpu->cfg.priv_spec) { 992 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 993 priv_version = PRIV_VERSION_1_12_0; 994 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 995 priv_version = PRIV_VERSION_1_11_0; 996 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 997 priv_version = PRIV_VERSION_1_10_0; 998 } else { 999 error_setg(errp, 1000 "Unsupported privilege spec version '%s'", 1001 cpu->cfg.priv_spec); 1002 return; 1003 } 1004 1005 env->priv_ver = priv_version; 1006 } 1007 } 1008 1009 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 1010 { 1011 CPURISCVState *env = &cpu->env; 1012 int i; 1013 1014 /* Force disable extensions if priv spec version does not match */ 1015 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 1016 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) && 1017 (env->priv_ver < isa_edata_arr[i].min_version)) { 1018 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false); 1019 #ifndef CONFIG_USER_ONLY 1020 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 1021 " because privilege spec version does not match", 1022 isa_edata_arr[i].name, env->mhartid); 1023 #else 1024 warn_report("disabling %s extension because " 1025 "privilege spec version does not match", 1026 isa_edata_arr[i].name); 1027 #endif 1028 } 1029 } 1030 } 1031 1032 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 1033 { 1034 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 1035 CPUClass *cc = CPU_CLASS(mcc); 1036 CPURISCVState *env = &cpu->env; 1037 1038 /* Validate that MISA_MXL is set properly. */ 1039 switch (env->misa_mxl_max) { 1040 #ifdef TARGET_RISCV64 1041 case MXL_RV64: 1042 case MXL_RV128: 1043 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 1044 break; 1045 #endif 1046 case MXL_RV32: 1047 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 1048 break; 1049 default: 1050 g_assert_not_reached(); 1051 } 1052 1053 if (env->misa_mxl_max != env->misa_mxl) { 1054 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 1055 return; 1056 } 1057 } 1058 1059 /* 1060 * Check consistency between chosen extensions while setting 1061 * cpu->cfg accordingly. 1062 */ 1063 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 1064 { 1065 CPURISCVState *env = &cpu->env; 1066 Error *local_err = NULL; 1067 1068 /* Do some ISA extension error checking */ 1069 if (riscv_has_ext(env, RVG) && 1070 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 1071 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 1072 riscv_has_ext(env, RVD) && 1073 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) { 1074 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei"); 1075 cpu->cfg.ext_icsr = true; 1076 cpu->cfg.ext_ifencei = true; 1077 1078 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 1079 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 1080 } 1081 1082 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 1083 error_setg(errp, 1084 "I and E extensions are incompatible"); 1085 return; 1086 } 1087 1088 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 1089 error_setg(errp, 1090 "Either I or E extension must be set"); 1091 return; 1092 } 1093 1094 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 1095 error_setg(errp, 1096 "Setting S extension without U extension is illegal"); 1097 return; 1098 } 1099 1100 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 1101 error_setg(errp, 1102 "H depends on an I base integer ISA with 32 x registers"); 1103 return; 1104 } 1105 1106 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 1107 error_setg(errp, "H extension implicitly requires S-mode"); 1108 return; 1109 } 1110 1111 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) { 1112 error_setg(errp, "F extension requires Zicsr"); 1113 return; 1114 } 1115 1116 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 1117 error_setg(errp, "Zawrs extension requires A extension"); 1118 return; 1119 } 1120 1121 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 1122 error_setg(errp, "Zfa extension requires F extension"); 1123 return; 1124 } 1125 1126 if (cpu->cfg.ext_zfh) { 1127 cpu->cfg.ext_zfhmin = true; 1128 } 1129 1130 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 1131 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 1132 return; 1133 } 1134 1135 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 1136 error_setg(errp, "Zfbfmin extension depends on F extension"); 1137 return; 1138 } 1139 1140 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 1141 error_setg(errp, "D extension requires F extension"); 1142 return; 1143 } 1144 1145 if (riscv_has_ext(env, RVV)) { 1146 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 1147 if (local_err != NULL) { 1148 error_propagate(errp, local_err); 1149 return; 1150 } 1151 1152 /* The V vector extension depends on the Zve64d extension */ 1153 cpu->cfg.ext_zve64d = true; 1154 } 1155 1156 /* The Zve64d extension depends on the Zve64f extension */ 1157 if (cpu->cfg.ext_zve64d) { 1158 cpu->cfg.ext_zve64f = true; 1159 } 1160 1161 /* The Zve64f extension depends on the Zve32f extension */ 1162 if (cpu->cfg.ext_zve64f) { 1163 cpu->cfg.ext_zve32f = true; 1164 } 1165 1166 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 1167 error_setg(errp, "Zve64d/V extensions require D extension"); 1168 return; 1169 } 1170 1171 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 1172 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 1173 return; 1174 } 1175 1176 if (cpu->cfg.ext_zvfh) { 1177 cpu->cfg.ext_zvfhmin = true; 1178 } 1179 1180 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 1181 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 1182 return; 1183 } 1184 1185 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 1186 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 1187 return; 1188 } 1189 1190 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 1191 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 1192 return; 1193 } 1194 1195 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 1196 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 1197 return; 1198 } 1199 1200 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 1201 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 1202 return; 1203 } 1204 1205 /* Set the ISA extensions, checks should have happened above */ 1206 if (cpu->cfg.ext_zhinx) { 1207 cpu->cfg.ext_zhinxmin = true; 1208 } 1209 1210 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 1211 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 1212 return; 1213 } 1214 1215 if (cpu->cfg.ext_zfinx) { 1216 if (!cpu->cfg.ext_icsr) { 1217 error_setg(errp, "Zfinx extension requires Zicsr"); 1218 return; 1219 } 1220 if (riscv_has_ext(env, RVF)) { 1221 error_setg(errp, 1222 "Zfinx cannot be supported together with F extension"); 1223 return; 1224 } 1225 } 1226 1227 if (cpu->cfg.ext_zce) { 1228 cpu->cfg.ext_zca = true; 1229 cpu->cfg.ext_zcb = true; 1230 cpu->cfg.ext_zcmp = true; 1231 cpu->cfg.ext_zcmt = true; 1232 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1233 cpu->cfg.ext_zcf = true; 1234 } 1235 } 1236 1237 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 1238 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 1239 cpu->cfg.ext_zca = true; 1240 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 1241 cpu->cfg.ext_zcf = true; 1242 } 1243 if (riscv_has_ext(env, RVD)) { 1244 cpu->cfg.ext_zcd = true; 1245 } 1246 } 1247 1248 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 1249 error_setg(errp, "Zcf extension is only relevant to RV32"); 1250 return; 1251 } 1252 1253 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 1254 error_setg(errp, "Zcf extension requires F extension"); 1255 return; 1256 } 1257 1258 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 1259 error_setg(errp, "Zcd extension requires D extension"); 1260 return; 1261 } 1262 1263 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 1264 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 1265 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 1266 "extension"); 1267 return; 1268 } 1269 1270 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 1271 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 1272 "Zcd extension"); 1273 return; 1274 } 1275 1276 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) { 1277 error_setg(errp, "Zcmt extension requires Zicsr extension"); 1278 return; 1279 } 1280 1281 /* 1282 * In principle Zve*x would also suffice here, were they supported 1283 * in qemu 1284 */ 1285 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned || 1286 cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) { 1287 error_setg(errp, 1288 "Vector crypto extensions require V or Zve* extensions"); 1289 return; 1290 } 1291 1292 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) { 1293 error_setg( 1294 errp, 1295 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions"); 1296 return; 1297 } 1298 1299 if (cpu->cfg.ext_zk) { 1300 cpu->cfg.ext_zkn = true; 1301 cpu->cfg.ext_zkr = true; 1302 cpu->cfg.ext_zkt = true; 1303 } 1304 1305 if (cpu->cfg.ext_zkn) { 1306 cpu->cfg.ext_zbkb = true; 1307 cpu->cfg.ext_zbkc = true; 1308 cpu->cfg.ext_zbkx = true; 1309 cpu->cfg.ext_zkne = true; 1310 cpu->cfg.ext_zknd = true; 1311 cpu->cfg.ext_zknh = true; 1312 } 1313 1314 if (cpu->cfg.ext_zks) { 1315 cpu->cfg.ext_zbkb = true; 1316 cpu->cfg.ext_zbkc = true; 1317 cpu->cfg.ext_zbkx = true; 1318 cpu->cfg.ext_zksed = true; 1319 cpu->cfg.ext_zksh = true; 1320 } 1321 1322 /* 1323 * Disable isa extensions based on priv spec after we 1324 * validated and set everything we need. 1325 */ 1326 riscv_cpu_disable_priv_spec_isa_exts(cpu); 1327 } 1328 1329 #ifndef CONFIG_USER_ONLY 1330 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) 1331 { 1332 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; 1333 uint8_t satp_mode_map_max; 1334 uint8_t satp_mode_supported_max = 1335 satp_mode_max_from_map(cpu->cfg.satp_mode.supported); 1336 1337 if (cpu->cfg.satp_mode.map == 0) { 1338 if (cpu->cfg.satp_mode.init == 0) { 1339 /* If unset by the user, we fallback to the default satp mode. */ 1340 set_satp_mode_default_map(cpu); 1341 } else { 1342 /* 1343 * Find the lowest level that was disabled and then enable the 1344 * first valid level below which can be found in 1345 * valid_vm_1_10_32/64. 1346 */ 1347 for (int i = 1; i < 16; ++i) { 1348 if ((cpu->cfg.satp_mode.init & (1 << i)) && 1349 (cpu->cfg.satp_mode.supported & (1 << i))) { 1350 for (int j = i - 1; j >= 0; --j) { 1351 if (cpu->cfg.satp_mode.supported & (1 << j)) { 1352 cpu->cfg.satp_mode.map |= (1 << j); 1353 break; 1354 } 1355 } 1356 break; 1357 } 1358 } 1359 } 1360 } 1361 1362 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); 1363 1364 /* Make sure the user asked for a supported configuration (HW and qemu) */ 1365 if (satp_mode_map_max > satp_mode_supported_max) { 1366 error_setg(errp, "satp_mode %s is higher than hw max capability %s", 1367 satp_mode_str(satp_mode_map_max, rv32), 1368 satp_mode_str(satp_mode_supported_max, rv32)); 1369 return; 1370 } 1371 1372 /* 1373 * Make sure the user did not ask for an invalid configuration as per 1374 * the specification. 1375 */ 1376 if (!rv32) { 1377 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1378 if (!(cpu->cfg.satp_mode.map & (1 << i)) && 1379 (cpu->cfg.satp_mode.init & (1 << i)) && 1380 (cpu->cfg.satp_mode.supported & (1 << i))) { 1381 error_setg(errp, "cannot disable %s satp mode if %s " 1382 "is enabled", satp_mode_str(i, false), 1383 satp_mode_str(satp_mode_map_max, false)); 1384 return; 1385 } 1386 } 1387 } 1388 1389 /* Finally expand the map so that all valid modes are set */ 1390 for (int i = satp_mode_map_max - 1; i >= 0; --i) { 1391 if (cpu->cfg.satp_mode.supported & (1 << i)) { 1392 cpu->cfg.satp_mode.map |= (1 << i); 1393 } 1394 } 1395 } 1396 #endif 1397 1398 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 1399 { 1400 #ifndef CONFIG_USER_ONLY 1401 Error *local_err = NULL; 1402 1403 riscv_cpu_satp_mode_finalize(cpu, &local_err); 1404 if (local_err != NULL) { 1405 error_propagate(errp, local_err); 1406 return; 1407 } 1408 #endif 1409 } 1410 1411 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 1412 { 1413 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 1414 error_setg(errp, "H extension requires priv spec 1.12.0"); 1415 return; 1416 } 1417 } 1418 1419 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp) 1420 { 1421 RISCVCPU *cpu = RISCV_CPU(dev); 1422 CPURISCVState *env = &cpu->env; 1423 Error *local_err = NULL; 1424 1425 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) { 1426 error_setg(errp, "'host' CPU is not compatible with TCG acceleration"); 1427 return; 1428 } 1429 1430 riscv_cpu_validate_misa_mxl(cpu, &local_err); 1431 if (local_err != NULL) { 1432 error_propagate(errp, local_err); 1433 return; 1434 } 1435 1436 riscv_cpu_validate_priv_spec(cpu, &local_err); 1437 if (local_err != NULL) { 1438 error_propagate(errp, local_err); 1439 return; 1440 } 1441 1442 riscv_cpu_validate_misa_priv(env, &local_err); 1443 if (local_err != NULL) { 1444 error_propagate(errp, local_err); 1445 return; 1446 } 1447 1448 if (cpu->cfg.epmp && !cpu->cfg.pmp) { 1449 /* 1450 * Enhanced PMP should only be available 1451 * on harts with PMP support 1452 */ 1453 error_setg(errp, "Invalid configuration: EPMP requires PMP support"); 1454 return; 1455 } 1456 1457 riscv_cpu_validate_set_extensions(cpu, &local_err); 1458 if (local_err != NULL) { 1459 error_propagate(errp, local_err); 1460 return; 1461 } 1462 1463 #ifndef CONFIG_USER_ONLY 1464 CPU(dev)->tcg_cflags |= CF_PCREL; 1465 1466 if (cpu->cfg.ext_sstc) { 1467 riscv_timer_init(cpu); 1468 } 1469 1470 if (cpu->cfg.pmu_num) { 1471 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) { 1472 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 1473 riscv_pmu_timer_cb, cpu); 1474 } 1475 } 1476 #endif 1477 } 1478 1479 static void riscv_cpu_realize(DeviceState *dev, Error **errp) 1480 { 1481 CPUState *cs = CPU(dev); 1482 RISCVCPU *cpu = RISCV_CPU(dev); 1483 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); 1484 Error *local_err = NULL; 1485 1486 cpu_exec_realizefn(cs, &local_err); 1487 if (local_err != NULL) { 1488 error_propagate(errp, local_err); 1489 return; 1490 } 1491 1492 if (tcg_enabled()) { 1493 riscv_cpu_realize_tcg(dev, &local_err); 1494 if (local_err != NULL) { 1495 error_propagate(errp, local_err); 1496 return; 1497 } 1498 } 1499 1500 riscv_cpu_finalize_features(cpu, &local_err); 1501 if (local_err != NULL) { 1502 error_propagate(errp, local_err); 1503 return; 1504 } 1505 1506 riscv_cpu_register_gdb_regs_for_features(cs); 1507 1508 qemu_init_vcpu(cs); 1509 cpu_reset(cs); 1510 1511 mcc->parent_realize(dev, errp); 1512 } 1513 1514 #ifndef CONFIG_USER_ONLY 1515 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, 1516 void *opaque, Error **errp) 1517 { 1518 RISCVSATPMap *satp_map = opaque; 1519 uint8_t satp = satp_mode_from_str(name); 1520 bool value; 1521 1522 value = satp_map->map & (1 << satp); 1523 1524 visit_type_bool(v, name, &value, errp); 1525 } 1526 1527 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, 1528 void *opaque, Error **errp) 1529 { 1530 RISCVSATPMap *satp_map = opaque; 1531 uint8_t satp = satp_mode_from_str(name); 1532 bool value; 1533 1534 if (!visit_type_bool(v, name, &value, errp)) { 1535 return; 1536 } 1537 1538 satp_map->map = deposit32(satp_map->map, satp, 1, value); 1539 satp_map->init |= 1 << satp; 1540 } 1541 1542 static void riscv_add_satp_mode_properties(Object *obj) 1543 { 1544 RISCVCPU *cpu = RISCV_CPU(obj); 1545 1546 if (cpu->env.misa_mxl == MXL_RV32) { 1547 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, 1548 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1549 } else { 1550 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, 1551 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1552 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, 1553 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1554 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, 1555 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1556 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, 1557 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); 1558 } 1559 } 1560 1561 static void riscv_cpu_set_irq(void *opaque, int irq, int level) 1562 { 1563 RISCVCPU *cpu = RISCV_CPU(opaque); 1564 CPURISCVState *env = &cpu->env; 1565 1566 if (irq < IRQ_LOCAL_MAX) { 1567 switch (irq) { 1568 case IRQ_U_SOFT: 1569 case IRQ_S_SOFT: 1570 case IRQ_VS_SOFT: 1571 case IRQ_M_SOFT: 1572 case IRQ_U_TIMER: 1573 case IRQ_S_TIMER: 1574 case IRQ_VS_TIMER: 1575 case IRQ_M_TIMER: 1576 case IRQ_U_EXT: 1577 case IRQ_VS_EXT: 1578 case IRQ_M_EXT: 1579 if (kvm_enabled()) { 1580 kvm_riscv_set_irq(cpu, irq, level); 1581 } else { 1582 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level)); 1583 } 1584 break; 1585 case IRQ_S_EXT: 1586 if (kvm_enabled()) { 1587 kvm_riscv_set_irq(cpu, irq, level); 1588 } else { 1589 env->external_seip = level; 1590 riscv_cpu_update_mip(env, 1 << irq, 1591 BOOL_TO_MASK(level | env->software_seip)); 1592 } 1593 break; 1594 default: 1595 g_assert_not_reached(); 1596 } 1597 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { 1598 /* Require H-extension for handling guest local interrupts */ 1599 if (!riscv_has_ext(env, RVH)) { 1600 g_assert_not_reached(); 1601 } 1602 1603 /* Compute bit position in HGEIP CSR */ 1604 irq = irq - IRQ_LOCAL_MAX + 1; 1605 if (env->geilen < irq) { 1606 g_assert_not_reached(); 1607 } 1608 1609 /* Update HGEIP CSR */ 1610 env->hgeip &= ~((target_ulong)1 << irq); 1611 if (level) { 1612 env->hgeip |= (target_ulong)1 << irq; 1613 } 1614 1615 /* Update mip.SGEIP bit */ 1616 riscv_cpu_update_mip(env, MIP_SGEIP, 1617 BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); 1618 } else { 1619 g_assert_not_reached(); 1620 } 1621 } 1622 #endif /* CONFIG_USER_ONLY */ 1623 1624 static void riscv_cpu_init(Object *obj) 1625 { 1626 RISCVCPU *cpu = RISCV_CPU(obj); 1627 1628 cpu_set_cpustate_pointers(cpu); 1629 1630 #ifndef CONFIG_USER_ONLY 1631 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 1632 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); 1633 #endif /* CONFIG_USER_ONLY */ 1634 } 1635 1636 typedef struct RISCVCPUMisaExtConfig { 1637 const char *name; 1638 const char *description; 1639 target_ulong misa_bit; 1640 bool enabled; 1641 } RISCVCPUMisaExtConfig; 1642 1643 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1644 void *opaque, Error **errp) 1645 { 1646 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1647 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1648 RISCVCPU *cpu = RISCV_CPU(obj); 1649 CPURISCVState *env = &cpu->env; 1650 bool value; 1651 1652 if (!visit_type_bool(v, name, &value, errp)) { 1653 return; 1654 } 1655 1656 if (value) { 1657 env->misa_ext |= misa_bit; 1658 env->misa_ext_mask |= misa_bit; 1659 } else { 1660 env->misa_ext &= ~misa_bit; 1661 env->misa_ext_mask &= ~misa_bit; 1662 } 1663 } 1664 1665 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 1666 void *opaque, Error **errp) 1667 { 1668 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 1669 target_ulong misa_bit = misa_ext_cfg->misa_bit; 1670 RISCVCPU *cpu = RISCV_CPU(obj); 1671 CPURISCVState *env = &cpu->env; 1672 bool value; 1673 1674 value = env->misa_ext & misa_bit; 1675 1676 visit_type_bool(v, name, &value, errp); 1677 } 1678 1679 typedef struct misa_ext_info { 1680 const char *name; 1681 const char *description; 1682 } MISAExtInfo; 1683 1684 #define MISA_INFO_IDX(_bit) \ 1685 __builtin_ctz(_bit) 1686 1687 #define MISA_EXT_INFO(_bit, _propname, _descr) \ 1688 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr} 1689 1690 static const MISAExtInfo misa_ext_info_arr[] = { 1691 MISA_EXT_INFO(RVA, "a", "Atomic instructions"), 1692 MISA_EXT_INFO(RVC, "c", "Compressed instructions"), 1693 MISA_EXT_INFO(RVD, "d", "Double-precision float point"), 1694 MISA_EXT_INFO(RVF, "f", "Single-precision float point"), 1695 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"), 1696 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"), 1697 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"), 1698 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"), 1699 MISA_EXT_INFO(RVU, "u", "User-level instructions"), 1700 MISA_EXT_INFO(RVH, "h", "Hypervisor"), 1701 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"), 1702 MISA_EXT_INFO(RVV, "v", "Vector operations"), 1703 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"), 1704 }; 1705 1706 static int riscv_validate_misa_info_idx(uint32_t bit) 1707 { 1708 int idx; 1709 1710 /* 1711 * Our lowest valid input (RVA) is 1 and 1712 * __builtin_ctz() is UB with zero. 1713 */ 1714 g_assert(bit != 0); 1715 idx = MISA_INFO_IDX(bit); 1716 1717 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr)); 1718 return idx; 1719 } 1720 1721 const char *riscv_get_misa_ext_name(uint32_t bit) 1722 { 1723 int idx = riscv_validate_misa_info_idx(bit); 1724 const char *val = misa_ext_info_arr[idx].name; 1725 1726 g_assert(val != NULL); 1727 return val; 1728 } 1729 1730 const char *riscv_get_misa_ext_description(uint32_t bit) 1731 { 1732 int idx = riscv_validate_misa_info_idx(bit); 1733 const char *val = misa_ext_info_arr[idx].description; 1734 1735 g_assert(val != NULL); 1736 return val; 1737 } 1738 1739 #define MISA_CFG(_bit, _enabled) \ 1740 {.misa_bit = _bit, .enabled = _enabled} 1741 1742 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 1743 MISA_CFG(RVA, true), 1744 MISA_CFG(RVC, true), 1745 MISA_CFG(RVD, true), 1746 MISA_CFG(RVF, true), 1747 MISA_CFG(RVI, true), 1748 MISA_CFG(RVE, false), 1749 MISA_CFG(RVM, true), 1750 MISA_CFG(RVS, true), 1751 MISA_CFG(RVU, true), 1752 MISA_CFG(RVH, true), 1753 MISA_CFG(RVJ, false), 1754 MISA_CFG(RVV, false), 1755 MISA_CFG(RVG, false), 1756 }; 1757 1758 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 1759 { 1760 int i; 1761 1762 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 1763 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 1764 int bit = misa_cfg->misa_bit; 1765 1766 misa_cfg->name = riscv_get_misa_ext_name(bit); 1767 misa_cfg->description = riscv_get_misa_ext_description(bit); 1768 1769 /* Check if KVM already created the property */ 1770 if (object_property_find(cpu_obj, misa_cfg->name)) { 1771 continue; 1772 } 1773 1774 object_property_add(cpu_obj, misa_cfg->name, "bool", 1775 cpu_get_misa_ext_cfg, 1776 cpu_set_misa_ext_cfg, 1777 NULL, (void *)misa_cfg); 1778 object_property_set_description(cpu_obj, misa_cfg->name, 1779 misa_cfg->description); 1780 object_property_set_bool(cpu_obj, misa_cfg->name, 1781 misa_cfg->enabled, NULL); 1782 } 1783 } 1784 1785 static Property riscv_cpu_extensions[] = { 1786 /* Defaults for standard extensions */ 1787 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16), 1788 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false), 1789 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), 1790 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), 1791 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true), 1792 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true), 1793 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true), 1794 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false), 1795 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), 1796 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), 1797 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), 1798 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), 1799 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), 1800 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), 1801 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), 1802 1803 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), 1804 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), 1805 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), 1806 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), 1807 1808 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false), 1809 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), 1810 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), 1811 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), 1812 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), 1813 1814 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), 1815 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), 1816 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), 1817 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false), 1818 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false), 1819 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false), 1820 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), 1821 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false), 1822 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false), 1823 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false), 1824 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false), 1825 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false), 1826 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false), 1827 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false), 1828 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false), 1829 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false), 1830 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false), 1831 1832 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false), 1833 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false), 1834 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false), 1835 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false), 1836 1837 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true), 1838 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), 1839 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true), 1840 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), 1841 1842 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false), 1843 1844 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false), 1845 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false), 1846 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false), 1847 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false), 1848 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false), 1849 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false), 1850 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false), 1851 1852 /* Vendor-specific custom extensions */ 1853 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false), 1854 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false), 1855 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false), 1856 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false), 1857 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false), 1858 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false), 1859 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false), 1860 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false), 1861 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false), 1862 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false), 1863 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false), 1864 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), 1865 1866 /* These are experimental so mark with 'x-' */ 1867 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), 1868 1869 /* ePMP 0.9.3 */ 1870 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), 1871 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), 1872 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), 1873 1874 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), 1875 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), 1876 1877 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false), 1878 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false), 1879 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false), 1880 1881 /* Vector cryptography extensions */ 1882 DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false), 1883 DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false), 1884 DEFINE_PROP_BOOL("x-zvkg", RISCVCPU, cfg.ext_zvkg, false), 1885 DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false), 1886 DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false), 1887 DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false), 1888 DEFINE_PROP_BOOL("x-zvksh", RISCVCPU, cfg.ext_zvksh, false), 1889 1890 DEFINE_PROP_END_OF_LIST(), 1891 }; 1892 1893 1894 #ifndef CONFIG_USER_ONLY 1895 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, 1896 const char *name, 1897 void *opaque, Error **errp) 1898 { 1899 const char *propname = opaque; 1900 bool value; 1901 1902 if (!visit_type_bool(v, name, &value, errp)) { 1903 return; 1904 } 1905 1906 if (value) { 1907 error_setg(errp, "extension %s is not available with KVM", 1908 propname); 1909 } 1910 } 1911 #endif 1912 1913 /* 1914 * Add CPU properties with user-facing flags. 1915 * 1916 * This will overwrite existing env->misa_ext values with the 1917 * defaults set via riscv_cpu_add_misa_properties(). 1918 */ 1919 static void riscv_cpu_add_user_properties(Object *obj) 1920 { 1921 Property *prop; 1922 DeviceState *dev = DEVICE(obj); 1923 1924 #ifndef CONFIG_USER_ONLY 1925 riscv_add_satp_mode_properties(obj); 1926 1927 if (kvm_enabled()) { 1928 kvm_riscv_init_user_properties(obj); 1929 } 1930 #endif 1931 1932 riscv_cpu_add_misa_properties(obj); 1933 1934 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 1935 #ifndef CONFIG_USER_ONLY 1936 if (kvm_enabled()) { 1937 /* Check if KVM created the property already */ 1938 if (object_property_find(obj, prop->name)) { 1939 continue; 1940 } 1941 1942 /* 1943 * Set the default to disabled for every extension 1944 * unknown to KVM and error out if the user attempts 1945 * to enable any of them. 1946 * 1947 * We're giving a pass for non-bool properties since they're 1948 * not related to the availability of extensions and can be 1949 * safely ignored as is. 1950 */ 1951 if (prop->info == &qdev_prop_bool) { 1952 object_property_add(obj, prop->name, "bool", 1953 NULL, cpu_set_cfg_unavailable, 1954 NULL, (void *)prop->name); 1955 continue; 1956 } 1957 } 1958 #endif 1959 qdev_property_add_static(dev, prop); 1960 } 1961 } 1962 1963 static Property riscv_cpu_properties[] = { 1964 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), 1965 1966 #ifndef CONFIG_USER_ONLY 1967 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC), 1968 #endif 1969 1970 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false), 1971 1972 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), 1973 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), 1974 1975 /* 1976 * write_misa() is marked as experimental for now so mark 1977 * it with -x and default to 'false'. 1978 */ 1979 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), 1980 DEFINE_PROP_END_OF_LIST(), 1981 }; 1982 1983 static gchar *riscv_gdb_arch_name(CPUState *cs) 1984 { 1985 RISCVCPU *cpu = RISCV_CPU(cs); 1986 CPURISCVState *env = &cpu->env; 1987 1988 switch (riscv_cpu_mxl(env)) { 1989 case MXL_RV32: 1990 return g_strdup("riscv:rv32"); 1991 case MXL_RV64: 1992 case MXL_RV128: 1993 return g_strdup("riscv:rv64"); 1994 default: 1995 g_assert_not_reached(); 1996 } 1997 } 1998 1999 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) 2000 { 2001 RISCVCPU *cpu = RISCV_CPU(cs); 2002 2003 if (strcmp(xmlname, "riscv-csr.xml") == 0) { 2004 return cpu->dyn_csr_xml; 2005 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) { 2006 return cpu->dyn_vreg_xml; 2007 } 2008 2009 return NULL; 2010 } 2011 2012 #ifndef CONFIG_USER_ONLY 2013 static int64_t riscv_get_arch_id(CPUState *cs) 2014 { 2015 RISCVCPU *cpu = RISCV_CPU(cs); 2016 2017 return cpu->env.mhartid; 2018 } 2019 2020 #include "hw/core/sysemu-cpu-ops.h" 2021 2022 static const struct SysemuCPUOps riscv_sysemu_ops = { 2023 .get_phys_page_debug = riscv_cpu_get_phys_page_debug, 2024 .write_elf64_note = riscv_cpu_write_elf64_note, 2025 .write_elf32_note = riscv_cpu_write_elf32_note, 2026 .legacy_vmsd = &vmstate_riscv_cpu, 2027 }; 2028 #endif 2029 2030 #include "hw/core/tcg-cpu-ops.h" 2031 2032 static const struct TCGCPUOps riscv_tcg_ops = { 2033 .initialize = riscv_translate_init, 2034 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 2035 .restore_state_to_opc = riscv_restore_state_to_opc, 2036 2037 #ifndef CONFIG_USER_ONLY 2038 .tlb_fill = riscv_cpu_tlb_fill, 2039 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 2040 .do_interrupt = riscv_cpu_do_interrupt, 2041 .do_transaction_failed = riscv_cpu_do_transaction_failed, 2042 .do_unaligned_access = riscv_cpu_do_unaligned_access, 2043 .debug_excp_handler = riscv_cpu_debug_excp_handler, 2044 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 2045 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 2046 #endif /* !CONFIG_USER_ONLY */ 2047 }; 2048 2049 static bool riscv_cpu_is_dynamic(Object *cpu_obj) 2050 { 2051 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 2052 } 2053 2054 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, 2055 void *opaque, Error **errp) 2056 { 2057 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2058 RISCVCPU *cpu = RISCV_CPU(obj); 2059 uint32_t prev_val = cpu->cfg.mvendorid; 2060 uint32_t value; 2061 2062 if (!visit_type_uint32(v, name, &value, errp)) { 2063 return; 2064 } 2065 2066 if (!dynamic_cpu && prev_val != value) { 2067 error_setg(errp, "Unable to change %s mvendorid (0x%x)", 2068 object_get_typename(obj), prev_val); 2069 return; 2070 } 2071 2072 cpu->cfg.mvendorid = value; 2073 } 2074 2075 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, 2076 void *opaque, Error **errp) 2077 { 2078 bool value = RISCV_CPU(obj)->cfg.mvendorid; 2079 2080 visit_type_bool(v, name, &value, errp); 2081 } 2082 2083 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, 2084 void *opaque, Error **errp) 2085 { 2086 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2087 RISCVCPU *cpu = RISCV_CPU(obj); 2088 uint64_t prev_val = cpu->cfg.mimpid; 2089 uint64_t value; 2090 2091 if (!visit_type_uint64(v, name, &value, errp)) { 2092 return; 2093 } 2094 2095 if (!dynamic_cpu && prev_val != value) { 2096 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")", 2097 object_get_typename(obj), prev_val); 2098 return; 2099 } 2100 2101 cpu->cfg.mimpid = value; 2102 } 2103 2104 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, 2105 void *opaque, Error **errp) 2106 { 2107 bool value = RISCV_CPU(obj)->cfg.mimpid; 2108 2109 visit_type_bool(v, name, &value, errp); 2110 } 2111 2112 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, 2113 void *opaque, Error **errp) 2114 { 2115 bool dynamic_cpu = riscv_cpu_is_dynamic(obj); 2116 RISCVCPU *cpu = RISCV_CPU(obj); 2117 uint64_t prev_val = cpu->cfg.marchid; 2118 uint64_t value, invalid_val; 2119 uint32_t mxlen = 0; 2120 2121 if (!visit_type_uint64(v, name, &value, errp)) { 2122 return; 2123 } 2124 2125 if (!dynamic_cpu && prev_val != value) { 2126 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")", 2127 object_get_typename(obj), prev_val); 2128 return; 2129 } 2130 2131 switch (riscv_cpu_mxl(&cpu->env)) { 2132 case MXL_RV32: 2133 mxlen = 32; 2134 break; 2135 case MXL_RV64: 2136 case MXL_RV128: 2137 mxlen = 64; 2138 break; 2139 default: 2140 g_assert_not_reached(); 2141 } 2142 2143 invalid_val = 1LL << (mxlen - 1); 2144 2145 if (value == invalid_val) { 2146 error_setg(errp, "Unable to set marchid with MSB (%u) bit set " 2147 "and the remaining bits zero", mxlen); 2148 return; 2149 } 2150 2151 cpu->cfg.marchid = value; 2152 } 2153 2154 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, 2155 void *opaque, Error **errp) 2156 { 2157 bool value = RISCV_CPU(obj)->cfg.marchid; 2158 2159 visit_type_bool(v, name, &value, errp); 2160 } 2161 2162 static void riscv_cpu_class_init(ObjectClass *c, void *data) 2163 { 2164 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); 2165 CPUClass *cc = CPU_CLASS(c); 2166 DeviceClass *dc = DEVICE_CLASS(c); 2167 ResettableClass *rc = RESETTABLE_CLASS(c); 2168 2169 device_class_set_parent_realize(dc, riscv_cpu_realize, 2170 &mcc->parent_realize); 2171 2172 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL, 2173 &mcc->parent_phases); 2174 2175 cc->class_by_name = riscv_cpu_class_by_name; 2176 cc->has_work = riscv_cpu_has_work; 2177 cc->dump_state = riscv_cpu_dump_state; 2178 cc->set_pc = riscv_cpu_set_pc; 2179 cc->get_pc = riscv_cpu_get_pc; 2180 cc->gdb_read_register = riscv_cpu_gdb_read_register; 2181 cc->gdb_write_register = riscv_cpu_gdb_write_register; 2182 cc->gdb_num_core_regs = 33; 2183 cc->gdb_stop_before_watchpoint = true; 2184 cc->disas_set_info = riscv_cpu_disas_set_info; 2185 #ifndef CONFIG_USER_ONLY 2186 cc->sysemu_ops = &riscv_sysemu_ops; 2187 cc->get_arch_id = riscv_get_arch_id; 2188 #endif 2189 cc->gdb_arch_name = riscv_gdb_arch_name; 2190 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml; 2191 cc->tcg_ops = &riscv_tcg_ops; 2192 2193 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid, 2194 cpu_set_mvendorid, NULL, NULL); 2195 2196 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid, 2197 cpu_set_mimpid, NULL, NULL); 2198 2199 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid, 2200 cpu_set_marchid, NULL, NULL); 2201 2202 device_class_set_props(dc, riscv_cpu_properties); 2203 } 2204 2205 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, 2206 int max_str_len) 2207 { 2208 char *old = *isa_str; 2209 char *new = *isa_str; 2210 int i; 2211 2212 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) { 2213 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) { 2214 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL); 2215 g_free(old); 2216 old = new; 2217 } 2218 } 2219 2220 *isa_str = new; 2221 } 2222 2223 char *riscv_isa_string(RISCVCPU *cpu) 2224 { 2225 int i; 2226 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts); 2227 char *isa_str = g_new(char, maxlen); 2228 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); 2229 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) { 2230 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) { 2231 *p++ = qemu_tolower(riscv_single_letter_exts[i]); 2232 } 2233 } 2234 *p = '\0'; 2235 if (!cpu->cfg.short_isa_string) { 2236 riscv_isa_string_ext(cpu, &isa_str, maxlen); 2237 } 2238 return isa_str; 2239 } 2240 2241 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) 2242 { 2243 ObjectClass *class_a = (ObjectClass *)a; 2244 ObjectClass *class_b = (ObjectClass *)b; 2245 const char *name_a, *name_b; 2246 2247 name_a = object_class_get_name(class_a); 2248 name_b = object_class_get_name(class_b); 2249 return strcmp(name_a, name_b); 2250 } 2251 2252 static void riscv_cpu_list_entry(gpointer data, gpointer user_data) 2253 { 2254 const char *typename = object_class_get_name(OBJECT_CLASS(data)); 2255 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); 2256 2257 qemu_printf("%.*s\n", len, typename); 2258 } 2259 2260 void riscv_cpu_list(void) 2261 { 2262 GSList *list; 2263 2264 list = object_class_get_list(TYPE_RISCV_CPU, false); 2265 list = g_slist_sort(list, riscv_cpu_list_compare); 2266 g_slist_foreach(list, riscv_cpu_list_entry, NULL); 2267 g_slist_free(list); 2268 } 2269 2270 #define DEFINE_CPU(type_name, initfn) \ 2271 { \ 2272 .name = type_name, \ 2273 .parent = TYPE_RISCV_CPU, \ 2274 .instance_init = initfn \ 2275 } 2276 2277 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \ 2278 { \ 2279 .name = type_name, \ 2280 .parent = TYPE_RISCV_DYNAMIC_CPU, \ 2281 .instance_init = initfn \ 2282 } 2283 2284 static const TypeInfo riscv_cpu_type_infos[] = { 2285 { 2286 .name = TYPE_RISCV_CPU, 2287 .parent = TYPE_CPU, 2288 .instance_size = sizeof(RISCVCPU), 2289 .instance_align = __alignof__(RISCVCPU), 2290 .instance_init = riscv_cpu_init, 2291 .abstract = true, 2292 .class_size = sizeof(RISCVCPUClass), 2293 .class_init = riscv_cpu_class_init, 2294 }, 2295 { 2296 .name = TYPE_RISCV_DYNAMIC_CPU, 2297 .parent = TYPE_RISCV_CPU, 2298 .abstract = true, 2299 }, 2300 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), 2301 #if defined(CONFIG_KVM) 2302 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init), 2303 #endif 2304 #if defined(TARGET_RISCV32) 2305 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), 2306 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), 2307 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), 2308 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), 2309 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), 2310 #elif defined(TARGET_RISCV64) 2311 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), 2312 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), 2313 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), 2314 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), 2315 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), 2316 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), 2317 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), 2318 #endif 2319 }; 2320 2321 DEFINE_TYPES(riscv_cpu_type_infos) 2322