1 /* 2 * riscv TCG cpu class initialization 3 * 4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu 5 * Copyright (c) 2017-2018 SiFive, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2 or later, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "exec/exec-all.h" 22 #include "tcg-cpu.h" 23 #include "cpu.h" 24 #include "pmu.h" 25 #include "time_helper.h" 26 #include "qapi/error.h" 27 #include "qapi/visitor.h" 28 #include "qemu/accel.h" 29 #include "qemu/error-report.h" 30 #include "qemu/log.h" 31 #include "hw/core/accel-cpu.h" 32 #include "hw/core/tcg-cpu-ops.h" 33 #include "tcg/tcg.h" 34 35 /* Hash that stores user set extensions */ 36 static GHashTable *multi_ext_user_opts; 37 38 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) 39 { 40 return g_hash_table_contains(multi_ext_user_opts, 41 GUINT_TO_POINTER(ext_offset)); 42 } 43 44 static void riscv_cpu_synchronize_from_tb(CPUState *cs, 45 const TranslationBlock *tb) 46 { 47 if (!(tb_cflags(tb) & CF_PCREL)) { 48 RISCVCPU *cpu = RISCV_CPU(cs); 49 CPURISCVState *env = &cpu->env; 50 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 51 52 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL)); 53 54 if (xl == MXL_RV32) { 55 env->pc = (int32_t) tb->pc; 56 } else { 57 env->pc = tb->pc; 58 } 59 } 60 } 61 62 static void riscv_restore_state_to_opc(CPUState *cs, 63 const TranslationBlock *tb, 64 const uint64_t *data) 65 { 66 RISCVCPU *cpu = RISCV_CPU(cs); 67 CPURISCVState *env = &cpu->env; 68 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL); 69 target_ulong pc; 70 71 if (tb_cflags(tb) & CF_PCREL) { 72 pc = (env->pc & TARGET_PAGE_MASK) | data[0]; 73 } else { 74 pc = data[0]; 75 } 76 77 if (xl == MXL_RV32) { 78 env->pc = (int32_t)pc; 79 } else { 80 env->pc = pc; 81 } 82 env->bins = data[1]; 83 } 84 85 static const struct TCGCPUOps riscv_tcg_ops = { 86 .initialize = riscv_translate_init, 87 .synchronize_from_tb = riscv_cpu_synchronize_from_tb, 88 .restore_state_to_opc = riscv_restore_state_to_opc, 89 90 #ifndef CONFIG_USER_ONLY 91 .tlb_fill = riscv_cpu_tlb_fill, 92 .cpu_exec_interrupt = riscv_cpu_exec_interrupt, 93 .do_interrupt = riscv_cpu_do_interrupt, 94 .do_transaction_failed = riscv_cpu_do_transaction_failed, 95 .do_unaligned_access = riscv_cpu_do_unaligned_access, 96 .debug_excp_handler = riscv_cpu_debug_excp_handler, 97 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint, 98 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint, 99 #endif /* !CONFIG_USER_ONLY */ 100 }; 101 102 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) 103 { 104 const RISCVIsaExtData *edata; 105 106 for (edata = isa_edata_arr; edata && edata->name; edata++) { 107 if (edata->ext_enable_offset != ext_offset) { 108 continue; 109 } 110 111 return edata->min_version; 112 } 113 114 g_assert_not_reached(); 115 } 116 117 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, 118 bool value) 119 { 120 CPURISCVState *env = &cpu->env; 121 bool prev_val = isa_ext_is_enabled(cpu, ext_offset); 122 int min_version; 123 124 if (prev_val == value) { 125 return; 126 } 127 128 if (cpu_cfg_ext_is_user_set(ext_offset)) { 129 return; 130 } 131 132 if (value && env->priv_ver != PRIV_VERSION_LATEST) { 133 /* Do not enable it if priv_ver is older than min_version */ 134 min_version = cpu_cfg_ext_get_min_version(ext_offset); 135 if (env->priv_ver < min_version) { 136 return; 137 } 138 } 139 140 isa_ext_update_enabled(cpu, ext_offset, value); 141 } 142 143 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) 144 { 145 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) { 146 error_setg(errp, "H extension requires priv spec 1.12.0"); 147 return; 148 } 149 } 150 151 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp) 152 { 153 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); 154 CPUClass *cc = CPU_CLASS(mcc); 155 CPURISCVState *env = &cpu->env; 156 157 /* Validate that MISA_MXL is set properly. */ 158 switch (env->misa_mxl_max) { 159 #ifdef TARGET_RISCV64 160 case MXL_RV64: 161 case MXL_RV128: 162 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; 163 break; 164 #endif 165 case MXL_RV32: 166 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml"; 167 break; 168 default: 169 g_assert_not_reached(); 170 } 171 172 if (env->misa_mxl_max != env->misa_mxl) { 173 error_setg(errp, "misa_mxl_max must be equal to misa_mxl"); 174 return; 175 } 176 } 177 178 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp) 179 { 180 CPURISCVState *env = &cpu->env; 181 int priv_version = -1; 182 183 if (cpu->cfg.priv_spec) { 184 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) { 185 priv_version = PRIV_VERSION_1_12_0; 186 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { 187 priv_version = PRIV_VERSION_1_11_0; 188 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { 189 priv_version = PRIV_VERSION_1_10_0; 190 } else { 191 error_setg(errp, 192 "Unsupported privilege spec version '%s'", 193 cpu->cfg.priv_spec); 194 return; 195 } 196 197 env->priv_ver = priv_version; 198 } 199 } 200 201 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, 202 Error **errp) 203 { 204 if (!is_power_of_2(cfg->vlen)) { 205 error_setg(errp, "Vector extension VLEN must be power of 2"); 206 return; 207 } 208 209 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) { 210 error_setg(errp, 211 "Vector extension implementation only supports VLEN " 212 "in the range [128, %d]", RV_VLEN_MAX); 213 return; 214 } 215 216 if (!is_power_of_2(cfg->elen)) { 217 error_setg(errp, "Vector extension ELEN must be power of 2"); 218 return; 219 } 220 221 if (cfg->elen > 64 || cfg->elen < 8) { 222 error_setg(errp, 223 "Vector extension implementation only supports ELEN " 224 "in the range [8, 64]"); 225 return; 226 } 227 228 if (cfg->vext_spec) { 229 if (!g_strcmp0(cfg->vext_spec, "v1.0")) { 230 env->vext_ver = VEXT_VERSION_1_00_0; 231 } else { 232 error_setg(errp, "Unsupported vector spec version '%s'", 233 cfg->vext_spec); 234 return; 235 } 236 } else if (env->vext_ver == 0) { 237 qemu_log("vector version is not specified, " 238 "use the default value v1.0\n"); 239 240 env->vext_ver = VEXT_VERSION_1_00_0; 241 } 242 } 243 244 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) 245 { 246 CPURISCVState *env = &cpu->env; 247 const RISCVIsaExtData *edata; 248 249 /* Force disable extensions if priv spec version does not match */ 250 for (edata = isa_edata_arr; edata && edata->name; edata++) { 251 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) && 252 (env->priv_ver < edata->min_version)) { 253 /* 254 * These two extensions are always enabled as they were supported 255 * by QEMU before they were added as extensions in the ISA. 256 */ 257 if (!strcmp(edata->name, "zicntr") || 258 !strcmp(edata->name, "zihpm")) { 259 continue; 260 } 261 262 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); 263 #ifndef CONFIG_USER_ONLY 264 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx 265 " because privilege spec version does not match", 266 edata->name, env->mhartid); 267 #else 268 warn_report("disabling %s extension because " 269 "privilege spec version does not match", 270 edata->name); 271 #endif 272 } 273 } 274 } 275 276 /* 277 * Check consistency between chosen extensions while setting 278 * cpu->cfg accordingly. 279 */ 280 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) 281 { 282 CPURISCVState *env = &cpu->env; 283 Error *local_err = NULL; 284 285 /* Do some ISA extension error checking */ 286 if (riscv_has_ext(env, RVG) && 287 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && 288 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && 289 riscv_has_ext(env, RVD) && 290 cpu->cfg.ext_zicsr && cpu->cfg.ext_zifencei)) { 291 292 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr)) && 293 !cpu->cfg.ext_zicsr) { 294 error_setg(errp, "RVG requires Zicsr but user set Zicsr to false"); 295 return; 296 } 297 298 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei)) && 299 !cpu->cfg.ext_zifencei) { 300 error_setg(errp, "RVG requires Zifencei but user set " 301 "Zifencei to false"); 302 return; 303 } 304 305 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zicsr), true); 306 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zifencei), true); 307 308 env->misa_ext |= RVI | RVM | RVA | RVF | RVD; 309 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; 310 } 311 312 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { 313 error_setg(errp, 314 "I and E extensions are incompatible"); 315 return; 316 } 317 318 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) { 319 error_setg(errp, 320 "Either I or E extension must be set"); 321 return; 322 } 323 324 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) { 325 error_setg(errp, 326 "Setting S extension without U extension is illegal"); 327 return; 328 } 329 330 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) { 331 error_setg(errp, 332 "H depends on an I base integer ISA with 32 x registers"); 333 return; 334 } 335 336 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) { 337 error_setg(errp, "H extension implicitly requires S-mode"); 338 return; 339 } 340 341 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) { 342 error_setg(errp, "F extension requires Zicsr"); 343 return; 344 } 345 346 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { 347 error_setg(errp, "Zacas extension requires A extension"); 348 return; 349 } 350 351 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { 352 error_setg(errp, "Zawrs extension requires A extension"); 353 return; 354 } 355 356 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) { 357 error_setg(errp, "Zfa extension requires F extension"); 358 return; 359 } 360 361 if (cpu->cfg.ext_zfh) { 362 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zfhmin), true); 363 } 364 365 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) { 366 error_setg(errp, "Zfh/Zfhmin extensions require F extension"); 367 return; 368 } 369 370 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) { 371 error_setg(errp, "Zfbfmin extension depends on F extension"); 372 return; 373 } 374 375 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) { 376 error_setg(errp, "D extension requires F extension"); 377 return; 378 } 379 380 if (riscv_has_ext(env, RVV)) { 381 riscv_cpu_validate_v(env, &cpu->cfg, &local_err); 382 if (local_err != NULL) { 383 error_propagate(errp, local_err); 384 return; 385 } 386 387 /* The V vector extension depends on the Zve64d extension */ 388 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64d), true); 389 } 390 391 /* The Zve64d extension depends on the Zve64f extension */ 392 if (cpu->cfg.ext_zve64d) { 393 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64f), true); 394 } 395 396 /* The Zve64f extension depends on the Zve32f extension */ 397 if (cpu->cfg.ext_zve64f) { 398 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32f), true); 399 } 400 401 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) { 402 error_setg(errp, "Zve64d/V extensions require D extension"); 403 return; 404 } 405 406 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) { 407 error_setg(errp, "Zve32f/Zve64f extensions require F extension"); 408 return; 409 } 410 411 if (cpu->cfg.ext_zvfh) { 412 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvfhmin), true); 413 } 414 415 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { 416 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); 417 return; 418 } 419 420 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { 421 error_setg(errp, "Zvfh extensions requires Zfhmin extension"); 422 return; 423 } 424 425 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) { 426 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension"); 427 return; 428 } 429 430 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) { 431 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension"); 432 return; 433 } 434 435 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) { 436 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension"); 437 return; 438 } 439 440 /* Set the ISA extensions, checks should have happened above */ 441 if (cpu->cfg.ext_zhinx) { 442 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 443 } 444 445 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) { 446 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx"); 447 return; 448 } 449 450 if (cpu->cfg.ext_zfinx) { 451 if (!cpu->cfg.ext_zicsr) { 452 error_setg(errp, "Zfinx extension requires Zicsr"); 453 return; 454 } 455 if (riscv_has_ext(env, RVF)) { 456 error_setg(errp, 457 "Zfinx cannot be supported together with F extension"); 458 return; 459 } 460 } 461 462 if (cpu->cfg.ext_zce) { 463 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 464 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true); 465 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); 466 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); 467 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 468 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 469 } 470 } 471 472 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */ 473 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { 474 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); 475 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) { 476 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); 477 } 478 if (riscv_has_ext(env, RVD)) { 479 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true); 480 } 481 } 482 483 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { 484 error_setg(errp, "Zcf extension is only relevant to RV32"); 485 return; 486 } 487 488 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) { 489 error_setg(errp, "Zcf extension requires F extension"); 490 return; 491 } 492 493 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) { 494 error_setg(errp, "Zcd extension requires D extension"); 495 return; 496 } 497 498 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb || 499 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) { 500 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca " 501 "extension"); 502 return; 503 } 504 505 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) { 506 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with " 507 "Zcd extension"); 508 return; 509 } 510 511 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) { 512 error_setg(errp, "Zcmt extension requires Zicsr extension"); 513 return; 514 } 515 516 /* 517 * Shorthand vector crypto extensions 518 */ 519 if (cpu->cfg.ext_zvknc) { 520 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true); 521 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 522 } 523 524 if (cpu->cfg.ext_zvkng) { 525 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true); 526 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true); 527 } 528 529 if (cpu->cfg.ext_zvkn) { 530 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkned), true); 531 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvknhb), true); 532 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true); 533 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true); 534 } 535 536 if (cpu->cfg.ext_zvksc) { 537 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true); 538 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 539 } 540 541 if (cpu->cfg.ext_zvksg) { 542 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true); 543 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true); 544 } 545 546 if (cpu->cfg.ext_zvks) { 547 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksed), true); 548 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksh), true); 549 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true); 550 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true); 551 } 552 553 if (cpu->cfg.ext_zvkt) { 554 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbb), true); 555 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true); 556 } 557 558 /* 559 * In principle Zve*x would also suffice here, were they supported 560 * in qemu 561 */ 562 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg || 563 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || 564 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) { 565 error_setg(errp, 566 "Vector crypto extensions require V or Zve* extensions"); 567 return; 568 } 569 570 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) { 571 error_setg( 572 errp, 573 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions"); 574 return; 575 } 576 577 if (cpu->cfg.ext_zk) { 578 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkn), true); 579 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkr), true); 580 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkt), true); 581 } 582 583 if (cpu->cfg.ext_zkn) { 584 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 585 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 586 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 587 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkne), true); 588 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknd), true); 589 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknh), true); 590 } 591 592 if (cpu->cfg.ext_zks) { 593 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true); 594 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true); 595 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true); 596 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksed), true); 597 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true); 598 } 599 600 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) { 601 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) { 602 error_setg(errp, "zicntr requires zicsr"); 603 return; 604 } 605 cpu->cfg.ext_zicntr = false; 606 } 607 608 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) { 609 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) { 610 error_setg(errp, "zihpm requires zicsr"); 611 return; 612 } 613 cpu->cfg.ext_zihpm = false; 614 } 615 616 if (!cpu->cfg.ext_zihpm) { 617 cpu->cfg.pmu_mask = 0; 618 cpu->pmu_avail_ctrs = 0; 619 } 620 621 /* 622 * Disable isa extensions based on priv spec after we 623 * validated and set everything we need. 624 */ 625 riscv_cpu_disable_priv_spec_isa_exts(cpu); 626 } 627 628 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) 629 { 630 CPURISCVState *env = &cpu->env; 631 Error *local_err = NULL; 632 633 riscv_cpu_validate_priv_spec(cpu, &local_err); 634 if (local_err != NULL) { 635 error_propagate(errp, local_err); 636 return; 637 } 638 639 riscv_cpu_validate_misa_priv(env, &local_err); 640 if (local_err != NULL) { 641 error_propagate(errp, local_err); 642 return; 643 } 644 645 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { 646 /* 647 * Enhanced PMP should only be available 648 * on harts with PMP support 649 */ 650 error_setg(errp, "Invalid configuration: Smepmp requires PMP support"); 651 return; 652 } 653 654 riscv_cpu_validate_set_extensions(cpu, &local_err); 655 if (local_err != NULL) { 656 error_propagate(errp, local_err); 657 return; 658 } 659 } 660 661 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu) 662 { 663 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL; 664 } 665 666 static bool riscv_cpu_is_generic(Object *cpu_obj) 667 { 668 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; 669 } 670 671 /* 672 * We'll get here via the following path: 673 * 674 * riscv_cpu_realize() 675 * -> cpu_exec_realizefn() 676 * -> tcg_cpu_realize() (via accel_cpu_common_realize()) 677 */ 678 static bool tcg_cpu_realize(CPUState *cs, Error **errp) 679 { 680 RISCVCPU *cpu = RISCV_CPU(cs); 681 Error *local_err = NULL; 682 683 if (!riscv_cpu_tcg_compatible(cpu)) { 684 g_autofree char *name = riscv_cpu_get_name(cpu); 685 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration", 686 name); 687 return false; 688 } 689 690 riscv_cpu_validate_misa_mxl(cpu, &local_err); 691 if (local_err != NULL) { 692 error_propagate(errp, local_err); 693 return false; 694 } 695 696 #ifndef CONFIG_USER_ONLY 697 CPURISCVState *env = &cpu->env; 698 699 CPU(cs)->tcg_cflags |= CF_PCREL; 700 701 if (cpu->cfg.ext_sstc) { 702 riscv_timer_init(cpu); 703 } 704 705 if (cpu->cfg.pmu_mask) { 706 riscv_pmu_init(cpu, &local_err); 707 if (local_err != NULL) { 708 error_propagate(errp, local_err); 709 return false; 710 } 711 712 if (cpu->cfg.ext_sscofpmf) { 713 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 714 riscv_pmu_timer_cb, cpu); 715 } 716 } 717 718 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ 719 if (riscv_has_ext(env, RVH)) { 720 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; 721 } 722 #endif 723 724 return true; 725 } 726 727 typedef struct RISCVCPUMisaExtConfig { 728 target_ulong misa_bit; 729 bool enabled; 730 } RISCVCPUMisaExtConfig; 731 732 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 733 void *opaque, Error **errp) 734 { 735 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 736 target_ulong misa_bit = misa_ext_cfg->misa_bit; 737 RISCVCPU *cpu = RISCV_CPU(obj); 738 CPURISCVState *env = &cpu->env; 739 bool generic_cpu = riscv_cpu_is_generic(obj); 740 bool prev_val, value; 741 742 if (!visit_type_bool(v, name, &value, errp)) { 743 return; 744 } 745 746 prev_val = env->misa_ext & misa_bit; 747 748 if (value == prev_val) { 749 return; 750 } 751 752 if (value) { 753 if (!generic_cpu) { 754 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 755 error_setg(errp, "'%s' CPU does not allow enabling extensions", 756 cpuname); 757 return; 758 } 759 760 env->misa_ext |= misa_bit; 761 env->misa_ext_mask |= misa_bit; 762 } else { 763 env->misa_ext &= ~misa_bit; 764 env->misa_ext_mask &= ~misa_bit; 765 } 766 } 767 768 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, 769 void *opaque, Error **errp) 770 { 771 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque; 772 target_ulong misa_bit = misa_ext_cfg->misa_bit; 773 RISCVCPU *cpu = RISCV_CPU(obj); 774 CPURISCVState *env = &cpu->env; 775 bool value; 776 777 value = env->misa_ext & misa_bit; 778 779 visit_type_bool(v, name, &value, errp); 780 } 781 782 #define MISA_CFG(_bit, _enabled) \ 783 {.misa_bit = _bit, .enabled = _enabled} 784 785 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { 786 MISA_CFG(RVA, true), 787 MISA_CFG(RVC, true), 788 MISA_CFG(RVD, true), 789 MISA_CFG(RVF, true), 790 MISA_CFG(RVI, true), 791 MISA_CFG(RVE, false), 792 MISA_CFG(RVM, true), 793 MISA_CFG(RVS, true), 794 MISA_CFG(RVU, true), 795 MISA_CFG(RVH, true), 796 MISA_CFG(RVJ, false), 797 MISA_CFG(RVV, false), 798 MISA_CFG(RVG, false), 799 }; 800 801 /* 802 * We do not support user choice tracking for MISA 803 * extensions yet because, so far, we do not silently 804 * change MISA bits during realize() (RVG enables MISA 805 * bits but the user is warned about it). 806 */ 807 static void riscv_cpu_add_misa_properties(Object *cpu_obj) 808 { 809 bool use_def_vals = riscv_cpu_is_generic(cpu_obj); 810 int i; 811 812 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) { 813 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i]; 814 int bit = misa_cfg->misa_bit; 815 const char *name = riscv_get_misa_ext_name(bit); 816 const char *desc = riscv_get_misa_ext_description(bit); 817 818 /* Check if KVM already created the property */ 819 if (object_property_find(cpu_obj, name)) { 820 continue; 821 } 822 823 object_property_add(cpu_obj, name, "bool", 824 cpu_get_misa_ext_cfg, 825 cpu_set_misa_ext_cfg, 826 NULL, (void *)misa_cfg); 827 object_property_set_description(cpu_obj, name, desc); 828 if (use_def_vals) { 829 object_property_set_bool(cpu_obj, name, misa_cfg->enabled, NULL); 830 } 831 } 832 } 833 834 static bool cpu_ext_is_deprecated(const char *ext_name) 835 { 836 return isupper(ext_name[0]); 837 } 838 839 /* 840 * String will be allocated in the heap. Caller is responsible 841 * for freeing it. 842 */ 843 static char *cpu_ext_to_lower(const char *ext_name) 844 { 845 char *ret = g_malloc0(strlen(ext_name) + 1); 846 847 strcpy(ret, ext_name); 848 ret[0] = tolower(ret[0]); 849 850 return ret; 851 } 852 853 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 854 void *opaque, Error **errp) 855 { 856 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 857 RISCVCPU *cpu = RISCV_CPU(obj); 858 bool generic_cpu = riscv_cpu_is_generic(obj); 859 bool prev_val, value; 860 861 if (!visit_type_bool(v, name, &value, errp)) { 862 return; 863 } 864 865 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { 866 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); 867 868 warn_report("CPU property '%s' is deprecated. Please use '%s' instead", 869 multi_ext_cfg->name, lower); 870 } 871 872 g_hash_table_insert(multi_ext_user_opts, 873 GUINT_TO_POINTER(multi_ext_cfg->offset), 874 (gpointer)value); 875 876 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); 877 878 if (value == prev_val) { 879 return; 880 } 881 882 if (value && !generic_cpu) { 883 g_autofree char *cpuname = riscv_cpu_get_name(cpu); 884 error_setg(errp, "'%s' CPU does not allow enabling extensions", 885 cpuname); 886 return; 887 } 888 889 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); 890 } 891 892 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name, 893 void *opaque, Error **errp) 894 { 895 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; 896 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset); 897 898 visit_type_bool(v, name, &value, errp); 899 } 900 901 static void cpu_add_multi_ext_prop(Object *cpu_obj, 902 const RISCVCPUMultiExtConfig *multi_cfg) 903 { 904 bool generic_cpu = riscv_cpu_is_generic(cpu_obj); 905 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); 906 907 object_property_add(cpu_obj, multi_cfg->name, "bool", 908 cpu_get_multi_ext_cfg, 909 cpu_set_multi_ext_cfg, 910 NULL, (void *)multi_cfg); 911 912 if (!generic_cpu || deprecated_ext) { 913 return; 914 } 915 916 /* 917 * Set def val directly instead of using 918 * object_property_set_bool() to save the set() 919 * callback hash for user inputs. 920 */ 921 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset, 922 multi_cfg->enabled); 923 } 924 925 static void riscv_cpu_add_multiext_prop_array(Object *obj, 926 const RISCVCPUMultiExtConfig *array) 927 { 928 const RISCVCPUMultiExtConfig *prop; 929 930 g_assert(array); 931 932 for (prop = array; prop && prop->name; prop++) { 933 cpu_add_multi_ext_prop(obj, prop); 934 } 935 } 936 937 /* 938 * Add CPU properties with user-facing flags. 939 * 940 * This will overwrite existing env->misa_ext values with the 941 * defaults set via riscv_cpu_add_misa_properties(). 942 */ 943 static void riscv_cpu_add_user_properties(Object *obj) 944 { 945 #ifndef CONFIG_USER_ONLY 946 riscv_add_satp_mode_properties(obj); 947 #endif 948 949 riscv_cpu_add_misa_properties(obj); 950 951 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions); 952 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); 953 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); 954 955 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); 956 957 for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) { 958 qdev_property_add_static(DEVICE(obj), prop); 959 } 960 } 961 962 /* 963 * The 'max' type CPU will have all possible ratified 964 * non-vendor extensions enabled. 965 */ 966 static void riscv_init_max_cpu_extensions(Object *obj) 967 { 968 RISCVCPU *cpu = RISCV_CPU(obj); 969 CPURISCVState *env = &cpu->env; 970 const RISCVCPUMultiExtConfig *prop; 971 972 /* Enable RVG, RVJ and RVV that are disabled by default */ 973 riscv_cpu_set_misa(env, env->misa_mxl, env->misa_ext | RVG | RVJ | RVV); 974 975 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { 976 isa_ext_update_enabled(cpu, prop->offset, true); 977 } 978 979 /* set vector version */ 980 env->vext_ver = VEXT_VERSION_1_00_0; 981 982 /* Zfinx is not compatible with F. Disable it */ 983 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false); 984 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false); 985 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false); 986 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false); 987 988 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false); 989 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false); 990 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false); 991 992 if (env->misa_mxl != MXL_RV32) { 993 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); 994 } 995 } 996 997 static bool riscv_cpu_has_max_extensions(Object *cpu_obj) 998 { 999 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL; 1000 } 1001 1002 static void tcg_cpu_instance_init(CPUState *cs) 1003 { 1004 RISCVCPU *cpu = RISCV_CPU(cs); 1005 Object *obj = OBJECT(cpu); 1006 1007 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); 1008 riscv_cpu_add_user_properties(obj); 1009 1010 if (riscv_cpu_has_max_extensions(obj)) { 1011 riscv_init_max_cpu_extensions(obj); 1012 } 1013 } 1014 1015 static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) 1016 { 1017 /* 1018 * All cpus use the same set of operations. 1019 */ 1020 cc->tcg_ops = &riscv_tcg_ops; 1021 } 1022 1023 static void tcg_cpu_class_init(CPUClass *cc) 1024 { 1025 cc->init_accel_cpu = tcg_cpu_init_ops; 1026 } 1027 1028 static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data) 1029 { 1030 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); 1031 1032 acc->cpu_class_init = tcg_cpu_class_init; 1033 acc->cpu_instance_init = tcg_cpu_instance_init; 1034 acc->cpu_target_realize = tcg_cpu_realize; 1035 } 1036 1037 static const TypeInfo tcg_cpu_accel_type_info = { 1038 .name = ACCEL_CPU_NAME("tcg"), 1039 1040 .parent = TYPE_ACCEL_CPU, 1041 .class_init = tcg_cpu_accel_class_init, 1042 .abstract = true, 1043 }; 1044 1045 static void tcg_cpu_accel_register_types(void) 1046 { 1047 type_register_static(&tcg_cpu_accel_type_info); 1048 } 1049 type_init(tcg_cpu_accel_register_types); 1050