diff options
Diffstat (limited to 'target/riscv/tcg/tcg-cpu.c')
-rw-r--r-- | target/riscv/tcg/tcg-cpu.c | 358 |
1 files changed, 301 insertions, 57 deletions
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c index b8814ab..55fd9e5 100644 --- a/target/riscv/tcg/tcg-cpu.c +++ b/target/riscv/tcg/tcg-cpu.c @@ -18,9 +18,10 @@ */ #include "qemu/osdep.h" -#include "exec/exec-all.h" +#include "exec/translation-block.h" #include "tcg-cpu.h" #include "cpu.h" +#include "exec/target_page.h" #include "internals.h" #include "pmu.h" #include "time_helper.h" @@ -29,11 +30,13 @@ #include "qemu/accel.h" #include "qemu/error-report.h" #include "qemu/log.h" -#include "hw/core/accel-cpu.h" -#include "hw/core/tcg-cpu-ops.h" +#include "accel/accel-cpu-target.h" +#include "accel/tcg/cpu-ops.h" #include "tcg/tcg.h" #ifndef CONFIG_USER_ONLY #include "hw/boards.h" +#include "system/tcg.h" +#include "exec/icount.h" #endif /* Hash that stores user set extensions */ @@ -90,6 +93,108 @@ static const char *cpu_priv_ver_to_str(int priv_ver) return priv_spec_str; } +static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return riscv_env_mmu_index(cpu_env(cs), ifetch); +} + +static TCGTBCPUState riscv_get_tb_cpu_state(CPUState *cs) +{ + CPURISCVState *env = cpu_env(cs); + RISCVCPU *cpu = env_archcpu(env); + RISCVExtStatus fs, vs; + uint32_t flags = 0; + bool pm_signext = riscv_cpu_virt_mem_enabled(env); + + if (cpu->cfg.ext_zve32x) { + /* + * If env->vl equals to VLMAX, we can use generic vector operation + * expanders (GVEC) to accerlate the vector operations. + * However, as LMUL could be a fractional number. The maximum + * vector size can be operated might be less than 8 bytes, + * which is not supported by GVEC. So we set vl_eq_vlmax flag to true + * only when maxsz >= 8 bytes. + */ + + /* lmul encoded as in DisasContext::lmul */ + int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3); + uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW); + uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul); + uint32_t maxsz = vlmax << vsew; + bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && + (maxsz >= 8); + flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); + flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew); + flags = FIELD_DP32(flags, TB_FLAGS, LMUL, + FIELD_EX64(env->vtype, VTYPE, VLMUL)); + flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); + flags = FIELD_DP32(flags, TB_FLAGS, VTA, + FIELD_EX64(env->vtype, VTYPE, VTA)); + flags = FIELD_DP32(flags, TB_FLAGS, VMA, + FIELD_EX64(env->vtype, VTYPE, VMA)); + flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0); + } else { + flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); + } + + if (cpu_get_fcfien(env)) { + /* + * For Forward CFI, only the expectation of a lpad at + * the start of the block is tracked via env->elp. env->elp + * is turned on during jalr translation. + */ + flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp); + flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1); + } + + if (cpu_get_bcfien(env)) { + flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1); + } + +#ifdef CONFIG_USER_ONLY + fs = EXT_STATUS_DIRTY; + vs = EXT_STATUS_DIRTY; +#else + flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv); + + flags |= riscv_env_mmu_index(env, 0); + fs = get_field(env->mstatus, MSTATUS_FS); + vs = get_field(env->mstatus, MSTATUS_VS); + + if (env->virt_enabled) { + flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1); + /* + * Merge DISABLED and !DIRTY states using MIN. + * We will set both fields when dirtying. + */ + fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS)); + vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS)); + } + + /* With Zfinx, floating point is enabled/disabled by Smstateen. */ + if (!riscv_has_ext(env, RVF)) { + fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE) + ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED; + } + + if (cpu->cfg.debug && !icount_enabled()) { + flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); + } +#endif + + flags = FIELD_DP32(flags, TB_FLAGS, FS, fs); + flags = FIELD_DP32(flags, TB_FLAGS, VS, vs); + flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); + flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env)); + flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env)); + flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext); + + return (TCGTBCPUState){ + .pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc, + .flags = flags + }; +} + static void riscv_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -129,17 +234,51 @@ static void riscv_restore_state_to_opc(CPUState *cs, env->pc = pc; } env->bins = data[1]; + env->excp_uw2 = data[2]; } -static const TCGCPUOps riscv_tcg_ops = { +#ifndef CONFIG_USER_ONLY +static vaddr riscv_pointer_wrap(CPUState *cs, int mmu_idx, + vaddr result, vaddr base) +{ + CPURISCVState *env = cpu_env(cs); + uint32_t pm_len; + bool pm_signext; + + if (cpu_address_xl(env) == MXL_RV32) { + return (uint32_t)result; + } + + pm_len = riscv_pm_get_pmlen(riscv_pm_get_pmm(env)); + if (pm_len == 0) { + return result; + } + + pm_signext = riscv_cpu_virt_mem_enabled(env); + if (pm_signext) { + return sextract64(result, 0, 64 - pm_len); + } + return extract64(result, 0, 64 - pm_len); +} +#endif + +const TCGCPUOps riscv_tcg_ops = { + .mttcg_supported = true, + .guest_default_memory_order = 0, + .initialize = riscv_translate_init, + .translate_code = riscv_translate_code, + .get_tb_cpu_state = riscv_get_tb_cpu_state, .synchronize_from_tb = riscv_cpu_synchronize_from_tb, .restore_state_to_opc = riscv_restore_state_to_opc, + .mmu_index = riscv_cpu_mmu_index, #ifndef CONFIG_USER_ONLY .tlb_fill = riscv_cpu_tlb_fill, + .pointer_wrap = riscv_pointer_wrap, .cpu_exec_interrupt = riscv_cpu_exec_interrupt, .cpu_exec_halt = riscv_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = riscv_cpu_do_interrupt, .do_transaction_failed = riscv_cpu_do_transaction_failed, .do_unaligned_access = riscv_cpu_do_unaligned_access, @@ -203,10 +342,20 @@ static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) * All other named features are already enabled * in riscv_tcg_cpu_instance_init(). */ - if (feat_offset == CPU_CFG_OFFSET(ext_zic64b)) { + switch (feat_offset) { + case CPU_CFG_OFFSET(ext_zic64b): cpu->cfg.cbom_blocksize = 64; cpu->cfg.cbop_blocksize = 64; cpu->cfg.cboz_blocksize = 64; + break; + case CPU_CFG_OFFSET(ext_sha): + if (!cpu_misa_ext_is_user_set(RVH)) { + riscv_cpu_write_misa_bit(cpu, RVH, true); + } + /* fallthrough */ + case CPU_CFG_OFFSET(ext_ssstateen): + cpu->cfg.ext_smstateen = true; + break; } } @@ -303,6 +452,15 @@ static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) } isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); + + /* + * Do not show user warnings for named features that users + * can't enable/disable in the command line. See commit + * 68c9e54bea for more info. + */ + if (cpu_cfg_offset_is_named_feat(edata->ext_enable_offset)) { + continue; + } #ifndef CONFIG_USER_ONLY warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx " because privilege spec version does not match", @@ -330,11 +488,16 @@ static void riscv_cpu_update_named_features(RISCVCPU *cpu) cpu->cfg.has_priv_1_13 = true; } - /* zic64b is 1.12 or later */ cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 && cpu->cfg.cbop_blocksize == 64 && - cpu->cfg.cboz_blocksize == 64 && - cpu->cfg.has_priv_1_12; + cpu->cfg.cboz_blocksize == 64; + + cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen; + + cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) && + cpu->cfg.ext_ssstateen; + + cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11; } static void riscv_cpu_validate_g(RISCVCPU *cpu) @@ -554,7 +717,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } - if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { + if (mcc->def->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { error_setg(errp, "Zcf extension is only relevant to RV32"); return; } @@ -618,11 +781,55 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) cpu->cfg.ext_zihpm = false; } + if (cpu->cfg.ext_zicfiss) { + if (!cpu->cfg.ext_zicsr) { + error_setg(errp, "zicfiss extension requires zicsr extension"); + return; + } + if (!riscv_has_ext(env, RVA)) { + error_setg(errp, "zicfiss extension requires A extension"); + return; + } + if (!riscv_has_ext(env, RVS)) { + error_setg(errp, "zicfiss extension requires S"); + return; + } + if (!cpu->cfg.ext_zimop) { + error_setg(errp, "zicfiss extension requires zimop extension"); + return; + } + if (cpu->cfg.ext_zca && !cpu->cfg.ext_zcmop) { + error_setg(errp, "zicfiss with zca requires zcmop extension"); + return; + } + } + if (!cpu->cfg.ext_zihpm) { cpu->cfg.pmu_mask = 0; cpu->pmu_avail_ctrs = 0; } + if (cpu->cfg.ext_zicfilp && !cpu->cfg.ext_zicsr) { + error_setg(errp, "zicfilp extension requires zicsr extension"); + return; + } + + if (mcc->def->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) { + error_setg(errp, "svukte is not supported for RV32"); + return; + } + + if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) && + (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) { + if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) || + cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) { + error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind"); + return; + } + cpu->cfg.ext_smctr = false; + cpu->cfg.ext_ssctr = false; + } + /* * Disable isa extensions based on priv spec after we * validated and set everything we need. @@ -635,8 +842,9 @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, RISCVCPUProfile *profile, bool send_warn) { - int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); + int satp_max = cpu->cfg.max_satp_mode; + assert(satp_max >= 0); if (profile->satp_mode > satp_max) { if (send_warn) { bool is_32bit = riscv_cpu_is_32bit(cpu); @@ -655,13 +863,29 @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, } #endif +static void riscv_cpu_check_parent_profile(RISCVCPU *cpu, + RISCVCPUProfile *profile, + RISCVCPUProfile *parent) +{ + const char *parent_name; + bool parent_enabled; + + if (!profile->enabled || !parent) { + return; + } + + parent_name = parent->name; + parent_enabled = object_property_get_bool(OBJECT(cpu), parent_name, NULL); + profile->enabled = parent_enabled; +} + static void riscv_cpu_validate_profile(RISCVCPU *cpu, RISCVCPUProfile *profile) { CPURISCVState *env = &cpu->env; const char *warn_msg = "Profile %s mandates disabled extension %s"; bool send_warn = profile->user_set && profile->enabled; - bool parent_enabled, profile_impl = true; + bool profile_impl = true; int i; #ifndef CONFIG_USER_ONLY @@ -672,7 +896,7 @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu, #endif if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && - profile->priv_spec != env->priv_ver) { + profile->priv_spec > env->priv_ver) { profile_impl = false; if (send_warn) { @@ -715,12 +939,8 @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu, profile->enabled = profile_impl; - if (profile->parent != NULL) { - parent_enabled = object_property_get_bool(OBJECT(cpu), - profile->parent->name, - NULL); - profile->enabled = profile->enabled && parent_enabled; - } + riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent); + riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent); } static void riscv_cpu_validate_profiles(RISCVCPU *cpu) @@ -778,11 +998,18 @@ static void cpu_enable_implied_rule(RISCVCPU *cpu, if (!enabled) { /* Enable the implied MISAs. */ if (rule->implied_misa_exts) { - riscv_cpu_set_misa_ext(env, - env->misa_ext | rule->implied_misa_exts); - for (i = 0; misa_bits[i] != 0; i++) { if (rule->implied_misa_exts & misa_bits[i]) { + /* + * If the user disabled the misa_bit do not re-enable it + * and do not apply any implied rules related to it. + */ + if (cpu_misa_ext_is_user_set(misa_bits[i]) && + !(env->misa_ext & misa_bits[i])) { + continue; + } + + riscv_cpu_set_misa_ext(env, env->misa_ext | misa_bits[i]); ir = g_hash_table_lookup(misa_ext_implied_rules, GUINT_TO_POINTER(misa_bits[i])); @@ -825,7 +1052,7 @@ static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); - if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { + if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); } } @@ -834,7 +1061,7 @@ static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); - if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { + if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); } @@ -898,6 +1125,20 @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) error_propagate(errp, local_err); return; } +#ifndef CONFIG_USER_ONLY + if (cpu->cfg.pmu_mask) { + riscv_pmu_init(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + + if (cpu->cfg.ext_sscofpmf) { + cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + riscv_pmu_timer_cb, cpu); + } + } +#endif } void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu) @@ -944,8 +1185,17 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) } #ifndef CONFIG_USER_ONLY + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); + + if (mcc->def->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) { + /* Missing 128-bit aligned atomics */ + error_setg(errp, + "128-bit RISC-V currently does not work with Multi " + "Threaded TCG. Please use: -accel tcg,thread=single"); + return false; + } + CPURISCVState *env = &cpu->env; - Error *local_err = NULL; tcg_cflags_set(CPU(cs), CF_PCREL); @@ -953,19 +1203,6 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) riscv_timer_init(cpu); } - if (cpu->cfg.pmu_mask) { - riscv_pmu_init(cpu, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return false; - } - - if (cpu->cfg.ext_sscofpmf) { - cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, - riscv_pmu_timer_cb, cpu); - } - } - /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */ if (riscv_has_ext(env, RVH)) { env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP; @@ -1050,7 +1287,6 @@ static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = { MISA_CFG(RVS, true), MISA_CFG(RVU, true), MISA_CFG(RVH, true), - MISA_CFG(RVJ, false), MISA_CFG(RVV, false), MISA_CFG(RVG, false), MISA_CFG(RVB, false), @@ -1117,8 +1353,13 @@ static void cpu_set_profile(Object *obj, Visitor *v, const char *name, profile->user_set = true; profile->enabled = value; - if (profile->parent != NULL) { - object_property_set_bool(obj, profile->parent->name, + if (profile->u_parent != NULL) { + object_property_set_bool(obj, profile->u_parent->name, + profile->enabled, NULL); + } + + if (profile->s_parent != NULL) { + object_property_set_bool(obj, profile->s_parent->name, profile->enabled, NULL); } @@ -1337,8 +1578,8 @@ static void riscv_init_max_cpu_extensions(Object *obj) CPURISCVState *env = &cpu->env; const RISCVCPUMultiExtConfig *prop; - /* Enable RVG, RVJ and RVV that are disabled by default */ - riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV); + /* Enable RVG and RVV that are disabled by default */ + riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV); for (prop = riscv_cpu_extensions; prop && prop->name; prop++) { isa_ext_update_enabled(cpu, prop->offset, true); @@ -1366,6 +1607,23 @@ static void riscv_init_max_cpu_extensions(Object *obj) if (env->misa_mxl != MXL_RV32) { isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); } + + /* + * TODO: ext_smrnmi requires OpenSBI changes that our current + * image does not have. Disable it for now. + */ + if (cpu->cfg.ext_smrnmi) { + isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false); + } + + /* + * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup + * to avoid generating a double trap. OpenSBI does not currently support it, + * disable it for now. + */ + if (cpu->cfg.ext_smdbltrp) { + isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false); + } } static bool riscv_cpu_has_max_extensions(Object *cpu_obj) @@ -1396,24 +1654,10 @@ static void riscv_tcg_cpu_instance_init(CPUState *cs) } } -static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) -{ - /* - * All cpus use the same set of operations. - */ - cc->tcg_ops = &riscv_tcg_ops; -} - -static void riscv_tcg_cpu_class_init(CPUClass *cc) -{ - cc->init_accel_cpu = riscv_tcg_cpu_init_ops; -} - -static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) +static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, const void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); - acc->cpu_class_init = riscv_tcg_cpu_class_init; acc->cpu_instance_init = riscv_tcg_cpu_instance_init; acc->cpu_target_realize = riscv_tcg_cpu_realize; } |