diff options
Diffstat (limited to 'target/riscv')
35 files changed, 2605 insertions, 1597 deletions
diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c index b99c4a3..e9c8d7f 100644 --- a/target/riscv/bitmanip_helper.c +++ b/target/riscv/bitmanip_helper.c @@ -20,7 +20,7 @@ #include "qemu/osdep.h" #include "qemu/host-utils.h" -#include "exec/exec-all.h" +#include "exec/target_long.h" #include "exec/helper-proto.h" #include "tcg/tcg.h" diff --git a/target/riscv/cpu-param.h b/target/riscv/cpu-param.h index fba30e9..cfdc67c 100644 --- a/target/riscv/cpu-param.h +++ b/target/riscv/cpu-param.h @@ -16,6 +16,14 @@ # define TARGET_VIRT_ADDR_SPACE_BITS 32 /* sv32 */ #endif #define TARGET_PAGE_BITS 12 /* 4 KiB Pages */ + +/* + * RISC-V-specific extra insn start words: + * 1: Original instruction opcode + * 2: more information about instruction + */ +#define TARGET_INSN_START_EXTRA_WORDS 2 + /* * The current MMU Modes are: * - U mode 0b000 @@ -26,6 +34,4 @@ * - M mode HLV/HLVX/HSV 0b111 */ -#define TCG_GUEST_DEFAULT_MO 0 - #endif diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h index 4cfdb74..75f4e43 100644 --- a/target/riscv/cpu-qom.h +++ b/target/riscv/cpu-qom.h @@ -44,15 +44,18 @@ #define TYPE_RISCV_CPU_RVA23S64 RISCV_CPU_TYPE_NAME("rva23s64") #define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex") #define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c") +#define TYPE_RISCV_CPU_SIFIVE_E RISCV_CPU_TYPE_NAME("sifive-e") #define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31") #define TYPE_RISCV_CPU_SIFIVE_E34 RISCV_CPU_TYPE_NAME("sifive-e34") #define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51") +#define TYPE_RISCV_CPU_SIFIVE_U RISCV_CPU_TYPE_NAME("sifive-u") #define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34") #define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54") #define TYPE_RISCV_CPU_THEAD_C906 RISCV_CPU_TYPE_NAME("thead-c906") #define TYPE_RISCV_CPU_VEYRON_V1 RISCV_CPU_TYPE_NAME("veyron-v1") #define TYPE_RISCV_CPU_TT_ASCALON RISCV_CPU_TYPE_NAME("tt-ascalon") #define TYPE_RISCV_CPU_XIANGSHAN_NANHU RISCV_CPU_TYPE_NAME("xiangshan-nanhu") +#define TYPE_RISCV_CPU_XIANGSHAN_KMH RISCV_CPU_TYPE_NAME("xiangshan-kunminghu") #define TYPE_RISCV_CPU_HOST RISCV_CPU_TYPE_NAME("host") OBJECT_DECLARE_CPU_TYPE(RISCVCPU, RISCVCPUClass, RISCV_CPU) diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 09ded68..d055ddf 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -24,7 +24,6 @@ #include "cpu.h" #include "cpu_vendorid.h" #include "internals.h" -#include "exec/exec-all.h" #include "qapi/error.h" #include "qapi/visitor.h" #include "qemu/error-report.h" @@ -74,6 +73,13 @@ bool riscv_cpu_option_set(const char *optname) return g_hash_table_contains(general_user_opts, optname); } +static void riscv_cpu_cfg_merge(RISCVCPUConfig *dest, const RISCVCPUConfig *src) +{ +#define BOOL_FIELD(x) dest->x |= src->x; +#define TYPED_FIELD(type, x, default_) if (src->x != default_) dest->x = src->x; +#include "cpu_cfg_fields.h.inc" +} + #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} @@ -121,8 +127,8 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), - ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), + ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), @@ -183,6 +189,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt), ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), + ISA_EXT_DATA_ENTRY(sdtrig, PRIV_VERSION_1_12_0, debug), ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12), ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha), ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12), @@ -210,6 +217,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm), ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm), ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen), + ISA_EXT_DATA_ENTRY(ssstrict, PRIV_VERSION_1_12_0, has_priv_1_12), ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc), ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12), ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12), @@ -222,6 +230,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval), ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot), ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt), + ISA_EXT_DATA_ENTRY(svrsw60t59b, PRIV_VERSION_1_13_0, ext_svrsw60t59b), ISA_EXT_DATA_ENTRY(svukte, PRIV_VERSION_1_13_0, ext_svukte), ISA_EXT_DATA_ENTRY(svvptc, PRIV_VERSION_1_13_0, ext_svvptc), ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba), @@ -357,7 +366,7 @@ void riscv_cpu_set_misa_ext(CPURISCVState *env, uint32_t ext) int riscv_cpu_max_xlen(RISCVCPUClass *mcc) { - return 16 << mcc->misa_mxl_max; + return 16 << mcc->def->misa_mxl_max; } #ifndef CONFIG_USER_ONLY @@ -390,7 +399,7 @@ static uint8_t satp_mode_from_str(const char *satp_mode_str) g_assert_not_reached(); } -uint8_t satp_mode_max_from_map(uint32_t map) +static uint8_t satp_mode_max_from_map(uint32_t map) { /* * 'map = 0' will make us return (31 - 32), which C will @@ -434,17 +443,23 @@ const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit) g_assert_not_reached(); } -static void set_satp_mode_max_supported(RISCVCPU *cpu, - uint8_t satp_mode) +static bool get_satp_mode_supported(RISCVCPU *cpu, uint16_t *supported) { - bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; + bool rv32 = riscv_cpu_is_32bit(cpu); const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; + int satp_mode = cpu->cfg.max_satp_mode; + + if (satp_mode == -1) { + return false; + } + *supported = 0; for (int i = 0; i <= satp_mode; ++i) { if (valid_vm[i]) { - cpu->cfg.satp_mode.supported |= (1 << i); + *supported |= (1 << i); } } + return true; } /* Set the satp mode to the max supported */ @@ -453,382 +468,26 @@ static void set_satp_mode_default_map(RISCVCPU *cpu) /* * Bare CPUs do not default to the max available. * Users must set a valid satp_mode in the command - * line. + * line. Otherwise, leave the existing max_satp_mode + * in place. */ if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { warn_report("No satp mode set. Defaulting to 'bare'"); - cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); - return; + cpu->cfg.max_satp_mode = VM_1_10_MBARE; } - - cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; -} -#endif - -static void riscv_max_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; - - env->priv_ver = PRIV_VERSION_LATEST; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), - riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ? - VM_1_10_SV32 : VM_1_10_SV57); -#endif -} - -#if defined(TARGET_RISCV64) -static void rv64_base_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; - - /* Set latest version of privileged specification */ - env->priv_ver = PRIV_VERSION_LATEST; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); -#endif -} - -static void rv64_sifive_u_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); - env->priv_ver = PRIV_VERSION_1_10_0; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39); -#endif - - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; } - -static void rv64_sifive_e_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); - env->priv_ver = PRIV_VERSION_1_10_0; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_MBARE); #endif - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.pmp = true; -} - -static void rv64_thead_c906_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU); - env->priv_ver = PRIV_VERSION_1_11_0; - - cpu->cfg.ext_zfa = true; - cpu->cfg.ext_zfh = true; - cpu->cfg.mmu = true; - cpu->cfg.ext_xtheadba = true; - cpu->cfg.ext_xtheadbb = true; - cpu->cfg.ext_xtheadbs = true; - cpu->cfg.ext_xtheadcmo = true; - cpu->cfg.ext_xtheadcondmov = true; - cpu->cfg.ext_xtheadfmemidx = true; - cpu->cfg.ext_xtheadmac = true; - cpu->cfg.ext_xtheadmemidx = true; - cpu->cfg.ext_xtheadmempair = true; - cpu->cfg.ext_xtheadsync = true; - - cpu->cfg.mvendorid = THEAD_VENDOR_ID; #ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_SV39); - th_register_custom_csrs(cpu); -#endif - - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.pmp = true; -} - -static void rv64_veyron_v1_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH); - env->priv_ver = PRIV_VERSION_1_12_0; - - /* Enable ISA extensions */ - cpu->cfg.mmu = true; - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.pmp = true; - cpu->cfg.ext_zicbom = true; - cpu->cfg.cbom_blocksize = 64; - cpu->cfg.cboz_blocksize = 64; - cpu->cfg.ext_zicboz = true; - cpu->cfg.ext_smaia = true; - cpu->cfg.ext_ssaia = true; - cpu->cfg.ext_sscofpmf = true; - cpu->cfg.ext_sstc = true; - cpu->cfg.ext_svinval = true; - cpu->cfg.ext_svnapot = true; - cpu->cfg.ext_svpbmt = true; - cpu->cfg.ext_smstateen = true; - cpu->cfg.ext_zba = true; - cpu->cfg.ext_zbb = true; - cpu->cfg.ext_zbc = true; - cpu->cfg.ext_zbs = true; - cpu->cfg.ext_XVentanaCondOps = true; - - cpu->cfg.mvendorid = VEYRON_V1_MVENDORID; - cpu->cfg.marchid = VEYRON_V1_MARCHID; - cpu->cfg.mimpid = VEYRON_V1_MIMPID; - -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_SV48); -#endif -} - -/* Tenstorrent Ascalon */ -static void rv64_tt_ascalon_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVG | RVC | RVS | RVU | RVH | RVV); - env->priv_ver = PRIV_VERSION_1_13_0; - - /* Enable ISA extensions */ - cpu->cfg.mmu = true; - cpu->cfg.vlenb = 256 >> 3; - cpu->cfg.elen = 64; - cpu->env.vext_ver = VEXT_VERSION_1_00_0; - cpu->cfg.rvv_ma_all_1s = true; - cpu->cfg.rvv_ta_all_1s = true; - cpu->cfg.misa_w = true; - cpu->cfg.pmp = true; - cpu->cfg.cbom_blocksize = 64; - cpu->cfg.cbop_blocksize = 64; - cpu->cfg.cboz_blocksize = 64; - cpu->cfg.ext_zic64b = true; - cpu->cfg.ext_zicbom = true; - cpu->cfg.ext_zicbop = true; - cpu->cfg.ext_zicboz = true; - cpu->cfg.ext_zicntr = true; - cpu->cfg.ext_zicond = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zihintntl = true; - cpu->cfg.ext_zihintpause = true; - cpu->cfg.ext_zihpm = true; - cpu->cfg.ext_zimop = true; - cpu->cfg.ext_zawrs = true; - cpu->cfg.ext_zfa = true; - cpu->cfg.ext_zfbfmin = true; - cpu->cfg.ext_zfh = true; - cpu->cfg.ext_zfhmin = true; - cpu->cfg.ext_zcb = true; - cpu->cfg.ext_zcmop = true; - cpu->cfg.ext_zba = true; - cpu->cfg.ext_zbb = true; - cpu->cfg.ext_zbs = true; - cpu->cfg.ext_zkt = true; - cpu->cfg.ext_zvbb = true; - cpu->cfg.ext_zvbc = true; - cpu->cfg.ext_zvfbfmin = true; - cpu->cfg.ext_zvfbfwma = true; - cpu->cfg.ext_zvfh = true; - cpu->cfg.ext_zvfhmin = true; - cpu->cfg.ext_zvkng = true; - cpu->cfg.ext_smaia = true; - cpu->cfg.ext_smstateen = true; - cpu->cfg.ext_ssaia = true; - cpu->cfg.ext_sscofpmf = true; - cpu->cfg.ext_sstc = true; - cpu->cfg.ext_svade = true; - cpu->cfg.ext_svinval = true; - cpu->cfg.ext_svnapot = true; - cpu->cfg.ext_svpbmt = true; - -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_SV57); -#endif -} - -static void rv64_xiangshan_nanhu_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVG | RVC | RVB | RVS | RVU); - env->priv_ver = PRIV_VERSION_1_12_0; - - /* Enable ISA extensions */ - cpu->cfg.ext_zbc = true; - cpu->cfg.ext_zbkb = true; - cpu->cfg.ext_zbkc = true; - cpu->cfg.ext_zbkx = true; - cpu->cfg.ext_zknd = true; - cpu->cfg.ext_zkne = true; - cpu->cfg.ext_zknh = true; - cpu->cfg.ext_zksed = true; - cpu->cfg.ext_zksh = true; - cpu->cfg.ext_svinval = true; - - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; - -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_SV39); -#endif -} - -#ifdef CONFIG_TCG -static void rv128_base_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; - - /* Set latest version of privileged specification */ - env->priv_ver = PRIV_VERSION_LATEST; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); -#endif -} -#endif /* CONFIG_TCG */ - -static void rv64i_bare_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - riscv_cpu_set_misa_ext(env, RVI); -} - -static void rv64e_bare_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - riscv_cpu_set_misa_ext(env, RVE); -} - -#endif /* !TARGET_RISCV64 */ - -#if defined(TARGET_RISCV32) || \ - (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) - -static void rv32_base_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; - - /* Set latest version of privileged specification */ - env->priv_ver = PRIV_VERSION_LATEST; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); -#endif -} - -static void rv32_sifive_u_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - CPURISCVState *env = &cpu->env; - riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); - env->priv_ver = PRIV_VERSION_1_10_0; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32); -#endif - - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.mmu = true; - cpu->cfg.pmp = true; -} - -static void rv32_sifive_e_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVC | RVU); - env->priv_ver = PRIV_VERSION_1_10_0; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_MBARE); -#endif - - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.pmp = true; -} - -static void rv32_ibex_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVI | RVM | RVC | RVU); - env->priv_ver = PRIV_VERSION_1_12_0; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_MBARE); -#endif - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.pmp = true; - cpu->cfg.ext_smepmp = true; - - cpu->cfg.ext_zba = true; - cpu->cfg.ext_zbb = true; - cpu->cfg.ext_zbc = true; - cpu->cfg.ext_zbs = true; -} - -static void rv32_imafcu_nommu_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - RISCVCPU *cpu = RISCV_CPU(obj); - - riscv_cpu_set_misa_ext(env, RVI | RVM | RVA | RVF | RVC | RVU); - env->priv_ver = PRIV_VERSION_1_10_0; -#ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_MBARE); -#endif - - /* inherited from parent obj via riscv_cpu_init() */ - cpu->cfg.ext_zifencei = true; - cpu->cfg.ext_zicsr = true; - cpu->cfg.pmp = true; -} - -static void rv32i_bare_cpu_init(Object *obj) -{ - CPURISCVState *env = &RISCV_CPU(obj)->env; - riscv_cpu_set_misa_ext(env, RVI); -} - -static void rv32e_bare_cpu_init(Object *obj) +static void riscv_register_custom_csrs(RISCVCPU *cpu, const RISCVCSR *csr_list) { - CPURISCVState *env = &RISCV_CPU(obj)->env; - riscv_cpu_set_misa_ext(env, RVE); + for (size_t i = 0; csr_list[i].csr_ops.name; i++) { + int csrno = csr_list[i].csrno; + const riscv_csr_operations *csr_ops = &csr_list[i].csr_ops; + if (!csr_list[i].insertion_test || csr_list[i].insertion_test(cpu)) { + riscv_set_csr_ops(csrno, csr_ops); + } + } } #endif @@ -1021,11 +680,6 @@ bool riscv_cpu_has_work(CPUState *cs) } #endif /* !CONFIG_USER_ONLY */ -static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) -{ - return riscv_env_mmu_index(cpu_env(cs), ifetch); -} - static void riscv_cpu_reset_hold(Object *obj, ResetType type) { #ifndef CONFIG_USER_ONLY @@ -1041,7 +695,7 @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type) mcc->parent_phases.hold(obj, type); } #ifndef CONFIG_USER_ONLY - env->misa_mxl = mcc->misa_mxl_max; + env->misa_mxl = mcc->def->misa_mxl_max; env->priv = PRV_M; env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); if (env->misa_mxl > MXL_RV32) { @@ -1178,18 +832,16 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) { bool rv32 = riscv_cpu_is_32bit(cpu); - uint8_t satp_mode_map_max, satp_mode_supported_max; + uint16_t supported; + uint8_t satp_mode_map_max; - /* The CPU wants the OS to decide which satp mode to use */ - if (cpu->cfg.satp_mode.supported == 0) { + if (!get_satp_mode_supported(cpu, &supported)) { + /* The CPU wants the hypervisor to decide which satp mode to allow */ return; } - satp_mode_supported_max = - satp_mode_max_from_map(cpu->cfg.satp_mode.supported); - - if (cpu->cfg.satp_mode.map == 0) { - if (cpu->cfg.satp_mode.init == 0) { + if (cpu->satp_modes.map == 0) { + if (cpu->satp_modes.init == 0) { /* If unset by the user, we fallback to the default satp mode. */ set_satp_mode_default_map(cpu); } else { @@ -1199,27 +851,27 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) * valid_vm_1_10_32/64. */ for (int i = 1; i < 16; ++i) { - if ((cpu->cfg.satp_mode.init & (1 << i)) && - (cpu->cfg.satp_mode.supported & (1 << i))) { + if ((cpu->satp_modes.init & (1 << i)) && + supported & (1 << i)) { for (int j = i - 1; j >= 0; --j) { - if (cpu->cfg.satp_mode.supported & (1 << j)) { - cpu->cfg.satp_mode.map |= (1 << j); - break; + if (supported & (1 << j)) { + cpu->cfg.max_satp_mode = j; + return; } } - break; } } } + return; } - satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); + satp_mode_map_max = satp_mode_max_from_map(cpu->satp_modes.map); /* Make sure the user asked for a supported configuration (HW and qemu) */ - if (satp_mode_map_max > satp_mode_supported_max) { + if (satp_mode_map_max > cpu->cfg.max_satp_mode) { error_setg(errp, "satp_mode %s is higher than hw max capability %s", satp_mode_str(satp_mode_map_max, rv32), - satp_mode_str(satp_mode_supported_max, rv32)); + satp_mode_str(cpu->cfg.max_satp_mode, rv32)); return; } @@ -1229,9 +881,9 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) */ if (!rv32) { for (int i = satp_mode_map_max - 1; i >= 0; --i) { - if (!(cpu->cfg.satp_mode.map & (1 << i)) && - (cpu->cfg.satp_mode.init & (1 << i)) && - (cpu->cfg.satp_mode.supported & (1 << i))) { + if (!(cpu->satp_modes.map & (1 << i)) && + (cpu->satp_modes.init & (1 << i)) && + (supported & (1 << i))) { error_setg(errp, "cannot disable %s satp mode if %s " "is enabled", satp_mode_str(i, false), satp_mode_str(satp_mode_map_max, false)); @@ -1240,12 +892,7 @@ static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) } } - /* Finally expand the map so that all valid modes are set */ - for (int i = satp_mode_map_max - 1; i >= 0; --i) { - if (cpu->cfg.satp_mode.supported & (1 << i)) { - cpu->cfg.satp_mode.map |= (1 << i); - } - } + cpu->cfg.max_satp_mode = satp_mode_map_max; } #endif @@ -1323,11 +970,11 @@ bool riscv_cpu_accelerator_compatible(RISCVCPU *cpu) static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - RISCVSATPMap *satp_map = opaque; + RISCVSATPModes *satp_modes = opaque; uint8_t satp = satp_mode_from_str(name); bool value; - value = satp_map->map & (1 << satp); + value = satp_modes->map & (1 << satp); visit_type_bool(v, name, &value, errp); } @@ -1335,7 +982,7 @@ static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name, static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - RISCVSATPMap *satp_map = opaque; + RISCVSATPModes *satp_modes = opaque; uint8_t satp = satp_mode_from_str(name); bool value; @@ -1343,8 +990,8 @@ static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name, return; } - satp_map->map = deposit32(satp_map->map, satp, 1, value); - satp_map->init |= 1 << satp; + satp_modes->map = deposit32(satp_modes->map, satp, 1, value); + satp_modes->init |= 1 << satp; } void riscv_add_satp_mode_properties(Object *obj) @@ -1353,16 +1000,16 @@ void riscv_add_satp_mode_properties(Object *obj) if (cpu->env.misa_mxl == MXL_RV32) { object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp, - cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + cpu_riscv_set_satp, NULL, &cpu->satp_modes); } else { object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp, - cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + cpu_riscv_set_satp, NULL, &cpu->satp_modes); object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp, - cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + cpu_riscv_set_satp, NULL, &cpu->satp_modes); object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp, - cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + cpu_riscv_set_satp, NULL, &cpu->satp_modes); object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp, - cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode); + cpu_riscv_set_satp, NULL, &cpu->satp_modes); } } @@ -1439,18 +1086,13 @@ static bool riscv_cpu_is_dynamic(Object *cpu_obj) return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; } -static void riscv_cpu_post_init(Object *obj) -{ - accel_cpu_instance_init(CPU(obj)); -} - static void riscv_cpu_init(Object *obj) { RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(obj); RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; - env->misa_mxl = mcc->misa_mxl_max; + env->misa_mxl = mcc->def->misa_mxl_max; #ifndef CONFIG_USER_ONLY qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq, @@ -1468,8 +1110,8 @@ static void riscv_cpu_init(Object *obj) * for all CPUs. Each accelerator will decide what to do when * users disable them. */ - RISCV_CPU(obj)->cfg.ext_zicntr = true; - RISCV_CPU(obj)->cfg.ext_zihpm = true; + RISCV_CPU(obj)->cfg.ext_zicntr = !mcc->def->bare; + RISCV_CPU(obj)->cfg.ext_zihpm = !mcc->def->bare; /* Default values for non-bool cpu properties */ cpu->cfg.pmu_mask = MAKE_64BIT_MASK(3, 16); @@ -1478,35 +1120,30 @@ static void riscv_cpu_init(Object *obj) cpu->cfg.cbom_blocksize = 64; cpu->cfg.cbop_blocksize = 64; cpu->cfg.cboz_blocksize = 64; + cpu->cfg.pmp_regions = 16; cpu->env.vext_ver = VEXT_VERSION_1_00_0; -} + cpu->cfg.max_satp_mode = -1; -static void riscv_bare_cpu_init(Object *obj) -{ - RISCVCPU *cpu = RISCV_CPU(obj); - - /* - * Bare CPUs do not inherit the timer and performance - * counters from the parent class (see riscv_cpu_init() - * for info on why the parent enables them). - * - * Users have to explicitly enable these counters for - * bare CPUs. - */ - cpu->cfg.ext_zicntr = false; - cpu->cfg.ext_zihpm = false; + if (mcc->def->profile) { + mcc->def->profile->enabled = true; + } - /* Set to QEMU's first supported priv version */ - cpu->env.priv_ver = PRIV_VERSION_1_10_0; + env->misa_ext_mask = env->misa_ext = mcc->def->misa_ext; + riscv_cpu_cfg_merge(&cpu->cfg, &mcc->def->cfg); - /* - * Support all available satp_mode settings. The default - * value will be set to MBARE if the user doesn't set - * satp_mode manually (see set_satp_mode_default()). - */ + if (mcc->def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) { + cpu->env.priv_ver = mcc->def->priv_spec; + } + if (mcc->def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) { + cpu->env.vext_ver = mcc->def->vext_spec; + } #ifndef CONFIG_USER_ONLY - set_satp_mode_max_supported(cpu, VM_1_10_SV64); + if (mcc->def->custom_csrs) { + riscv_register_custom_csrs(cpu, mcc->def->custom_csrs); + } #endif + + accel_cpu_instance_init(CPU(obj)); } typedef struct misa_ext_info { @@ -1541,7 +1178,7 @@ static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc) CPUClass *cc = CPU_CLASS(mcc); /* Validate that MISA_MXL is set properly. */ - switch (mcc->misa_mxl_max) { + switch (mcc->def->misa_mxl_max) { #ifdef TARGET_RISCV64 case MXL_RV64: case MXL_RV128: @@ -1649,6 +1286,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false), MULTI_EXT_CFG_BOOL("svnapot", ext_svnapot, false), MULTI_EXT_CFG_BOOL("svpbmt", ext_svpbmt, false), + MULTI_EXT_CFG_BOOL("svrsw60t59b", ext_svrsw60t59b, false), MULTI_EXT_CFG_BOOL("svvptc", ext_svvptc, true), MULTI_EXT_CFG_BOOL("zicntr", ext_zicntr, true), @@ -1742,31 +1380,24 @@ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { * 'Named features' is the name we give to extensions that we * don't want to expose to users. They are either immutable * (always enabled/disable) or they'll vary depending on - * the resulting CPU state. They have riscv,isa strings - * and priv_ver like regular extensions. + * the resulting CPU state. + * + * Some of them are always enabled depending on priv version + * of the CPU and are declared directly in isa_edata_arr[]. + * The ones listed here have special checks during finalize() + * time and require their own flags like regular extensions. + * See riscv_cpu_update_named_features() for more info. */ const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true), MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true), MULTI_EXT_CFG_BOOL("sha", ext_sha, true), - MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true), - - { }, -}; -/* Deprecated entries marked for future removal */ -const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { - MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), - MULTI_EXT_CFG_BOOL("Zicsr", ext_zicsr, true), - MULTI_EXT_CFG_BOOL("Zihintntl", ext_zihintntl, true), - MULTI_EXT_CFG_BOOL("Zihintpause", ext_zihintpause, true), - MULTI_EXT_CFG_BOOL("Zawrs", ext_zawrs, true), - MULTI_EXT_CFG_BOOL("Zfa", ext_zfa, true), - MULTI_EXT_CFG_BOOL("Zfh", ext_zfh, false), - MULTI_EXT_CFG_BOOL("Zfhmin", ext_zfhmin, false), - MULTI_EXT_CFG_BOOL("Zve32f", ext_zve32f, false), - MULTI_EXT_CFG_BOOL("Zve64f", ext_zve64f, false), - MULTI_EXT_CFG_BOOL("Zve64d", ext_zve64d, false), + /* + * 'ziccrse' has its own flag because the KVM driver + * wants to enable/disable it on its own accord. + */ + MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true), { }, }; @@ -1935,6 +1566,46 @@ static const PropertyInfo prop_pmp = { .set = prop_pmp_set, }; +static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(obj); + uint8_t value; + + visit_type_uint8(v, name, &value, errp); + + if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) { + cpu_set_prop_err(cpu, name, errp); + return; + } + + if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > OLD_MAX_RISCV_PMPS) { + error_setg(errp, "Number of PMP regions exceeds maximum available"); + return; + } else if (value > MAX_RISCV_PMPS) { + error_setg(errp, "Number of PMP regions exceeds maximum available"); + return; + } + + cpu_option_add_user_setting(name, value); + cpu->cfg.pmp_regions = value; +} + +static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint8_t value = RISCV_CPU(obj)->cfg.pmp_regions; + + visit_type_uint8(v, name, &value, errp); +} + +static const PropertyInfo prop_num_pmp_regions = { + .type = "uint8", + .description = "num-pmp-regions", + .get = prop_num_pmp_regions_get, + .set = prop_num_pmp_regions_set, +}; + static int priv_spec_from_str(const char *priv_spec_str) { int priv_version = -1; @@ -2934,6 +2605,7 @@ static const Property riscv_cpu_properties[] = { {.name = "mmu", .info = &prop_mmu}, {.name = "pmp", .info = &prop_pmp}, + {.name = "num-pmp-regions", .info = &prop_num_pmp_regions}, {.name = "priv_spec", .info = &prop_priv_spec}, {.name = "vext_spec", .info = &prop_vext_spec}, @@ -2962,6 +2634,7 @@ static const Property riscv_cpu_properties[] = { DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), DEFINE_PROP_BOOL("rvv_vl_half_avl", RISCVCPU, cfg.rvv_vl_half_avl, false), + DEFINE_PROP_BOOL("rvv_vsetvl_x0_vill", RISCVCPU, cfg.rvv_vsetvl_x0_vill, false), /* * write_misa() is marked as experimental for now so mark @@ -2970,36 +2643,6 @@ static const Property riscv_cpu_properties[] = { DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), }; -#if defined(TARGET_RISCV64) -static void rva22u64_profile_cpu_init(Object *obj) -{ - rv64i_bare_cpu_init(obj); - - RVA22U64.enabled = true; -} - -static void rva22s64_profile_cpu_init(Object *obj) -{ - rv64i_bare_cpu_init(obj); - - RVA22S64.enabled = true; -} - -static void rva23u64_profile_cpu_init(Object *obj) -{ - rv64i_bare_cpu_init(obj); - - RVA23U64.enabled = true; -} - -static void rva23s64_profile_cpu_init(Object *obj) -{ - rv64i_bare_cpu_init(obj); - - RVA23S64.enabled = true; -} -#endif - static const gchar *riscv_gdb_arch_name(CPUState *cs) { RISCVCPU *cpu = RISCV_CPU(cs); @@ -3035,7 +2678,7 @@ static const struct SysemuCPUOps riscv_sysemu_ops = { }; #endif -static void riscv_cpu_common_class_init(ObjectClass *c, void *data) +static void riscv_cpu_common_class_init(ObjectClass *c, const void *data) { RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); CPUClass *cc = CPU_CLASS(c); @@ -3049,7 +2692,6 @@ static void riscv_cpu_common_class_init(ObjectClass *c, void *data) &mcc->parent_phases); cc->class_by_name = riscv_cpu_class_by_name; - cc->mmu_index = riscv_cpu_mmu_index; cc->dump_state = riscv_cpu_dump_state; cc->set_pc = riscv_cpu_set_pc; cc->get_pc = riscv_cpu_get_pc; @@ -3062,16 +2704,94 @@ static void riscv_cpu_common_class_init(ObjectClass *c, void *data) cc->get_arch_id = riscv_get_arch_id; #endif cc->gdb_arch_name = riscv_gdb_arch_name; +#ifdef CONFIG_TCG + cc->tcg_ops = &riscv_tcg_ops; +#endif /* CONFIG_TCG */ device_class_set_props(dc, riscv_cpu_properties); } -static void riscv_cpu_class_init(ObjectClass *c, void *data) +static bool profile_extends(RISCVCPUProfile *trial, RISCVCPUProfile *parent) +{ + RISCVCPUProfile *curr; + if (!parent) { + return true; + } + + curr = trial; + while (curr) { + if (curr == parent) { + return true; + } + curr = curr->u_parent; + } + + curr = trial; + while (curr) { + if (curr == parent) { + return true; + } + curr = curr->s_parent; + } + + return false; +} + +static void riscv_cpu_class_base_init(ObjectClass *c, const void *data) { RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); + RISCVCPUClass *pcc = RISCV_CPU_CLASS(object_class_get_parent(c)); + + if (pcc->def) { + mcc->def = g_memdup2(pcc->def, sizeof(*pcc->def)); + } else { + mcc->def = g_new0(RISCVCPUDef, 1); + } + + if (data) { + const RISCVCPUDef *def = data; + mcc->def->bare |= def->bare; + if (def->profile) { + assert(profile_extends(def->profile, mcc->def->profile)); + assert(mcc->def->bare); + mcc->def->profile = def->profile; + } + if (def->misa_mxl_max) { + assert(def->misa_mxl_max <= MXL_RV128); + mcc->def->misa_mxl_max = def->misa_mxl_max; + +#ifndef CONFIG_USER_ONLY + /* + * Hack to simplify CPU class hierarchies that include both 32- and + * 64-bit models: reduce SV39/48/57/64 to SV32 for 32-bit models. + */ + if (mcc->def->misa_mxl_max == MXL_RV32 && + !valid_vm_1_10_32[mcc->def->cfg.max_satp_mode]) { + mcc->def->cfg.max_satp_mode = VM_1_10_SV32; + } +#endif + } + if (def->priv_spec != RISCV_PROFILE_ATTR_UNUSED) { + assert(def->priv_spec <= PRIV_VERSION_LATEST); + mcc->def->priv_spec = def->priv_spec; + } + if (def->vext_spec != RISCV_PROFILE_ATTR_UNUSED) { + assert(def->vext_spec != 0); + mcc->def->vext_spec = def->vext_spec; + } + mcc->def->misa_ext |= def->misa_ext; + + riscv_cpu_cfg_merge(&mcc->def->cfg, &def->cfg); - mcc->misa_mxl_max = (RISCVMXL)GPOINTER_TO_UINT(data); - riscv_cpu_validate_misa_mxl(mcc); + if (def->custom_csrs) { + assert(!mcc->def->custom_csrs); + mcc->def->custom_csrs = def->custom_csrs; + } + } + + if (!object_class_is_abstract(c)) { + riscv_cpu_validate_misa_mxl(mcc); + } } static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, @@ -3166,41 +2886,34 @@ void riscv_isa_write_fdt(RISCVCPU *cpu, void *fdt, char *nodename) } #endif -#define DEFINE_DYNAMIC_CPU(type_name, misa_mxl_max, initfn) \ +#define DEFINE_ABSTRACT_RISCV_CPU(type_name, parent_type_name, ...) \ { \ .name = (type_name), \ - .parent = TYPE_RISCV_DYNAMIC_CPU, \ - .instance_init = (initfn), \ - .class_init = riscv_cpu_class_init, \ - .class_data = GUINT_TO_POINTER(misa_mxl_max) \ + .parent = (parent_type_name), \ + .abstract = true, \ + .class_data = &(const RISCVCPUDef) { \ + .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \ + .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \ + .cfg.max_satp_mode = -1, \ + __VA_ARGS__ \ + }, \ } -#define DEFINE_VENDOR_CPU(type_name, misa_mxl_max, initfn) \ +#define DEFINE_RISCV_CPU(type_name, parent_type_name, ...) \ { \ .name = (type_name), \ - .parent = TYPE_RISCV_VENDOR_CPU, \ - .instance_init = (initfn), \ - .class_init = riscv_cpu_class_init, \ - .class_data = GUINT_TO_POINTER(misa_mxl_max) \ + .parent = (parent_type_name), \ + .class_data = &(const RISCVCPUDef) { \ + .priv_spec = RISCV_PROFILE_ATTR_UNUSED, \ + .vext_spec = RISCV_PROFILE_ATTR_UNUSED, \ + .cfg.max_satp_mode = -1, \ + __VA_ARGS__ \ + }, \ } -#define DEFINE_BARE_CPU(type_name, misa_mxl_max, initfn) \ - { \ - .name = (type_name), \ - .parent = TYPE_RISCV_BARE_CPU, \ - .instance_init = (initfn), \ - .class_init = riscv_cpu_class_init, \ - .class_data = GUINT_TO_POINTER(misa_mxl_max) \ - } - -#define DEFINE_PROFILE_CPU(type_name, misa_mxl_max, initfn) \ - { \ - .name = (type_name), \ - .parent = TYPE_RISCV_BARE_CPU, \ - .instance_init = (initfn), \ - .class_init = riscv_cpu_class_init, \ - .class_data = GUINT_TO_POINTER(misa_mxl_max) \ - } +#define DEFINE_PROFILE_CPU(type_name, parent_type_name, profile_) \ + DEFINE_RISCV_CPU(type_name, parent_type_name, \ + .profile = &(profile_)) static const TypeInfo riscv_cpu_type_infos[] = { { @@ -3209,67 +2922,370 @@ static const TypeInfo riscv_cpu_type_infos[] = { .instance_size = sizeof(RISCVCPU), .instance_align = __alignof(RISCVCPU), .instance_init = riscv_cpu_init, - .instance_post_init = riscv_cpu_post_init, .abstract = true, .class_size = sizeof(RISCVCPUClass), .class_init = riscv_cpu_common_class_init, + .class_base_init = riscv_cpu_class_base_init, }, - { - .name = TYPE_RISCV_DYNAMIC_CPU, - .parent = TYPE_RISCV_CPU, - .abstract = true, - }, - { - .name = TYPE_RISCV_VENDOR_CPU, - .parent = TYPE_RISCV_CPU, - .abstract = true, - }, - { - .name = TYPE_RISCV_BARE_CPU, - .parent = TYPE_RISCV_CPU, - .instance_init = riscv_bare_cpu_init, - .abstract = true, - }, + + DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_DYNAMIC_CPU, TYPE_RISCV_CPU, + .cfg.mmu = true, + .cfg.pmp = true, + .priv_spec = PRIV_VERSION_LATEST, + ), + + DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_VENDOR_CPU, TYPE_RISCV_CPU), + DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_BARE_CPU, TYPE_RISCV_CPU, + /* + * Bare CPUs do not inherit the timer and performance + * counters from the parent class (see riscv_cpu_init() + * for info on why the parent enables them). + * + * Users have to explicitly enable these counters for + * bare CPUs. + */ + .bare = true, + + /* Set to QEMU's first supported priv version */ + .priv_spec = PRIV_VERSION_1_10_0, + + /* + * Support all available satp_mode settings. By default + * only MBARE will be available if the user doesn't enable + * a mode manually (see riscv_cpu_satp_mode_finalize()). + */ +#ifdef TARGET_RISCV32 + .cfg.max_satp_mode = VM_1_10_SV32, +#else + .cfg.max_satp_mode = VM_1_10_SV57, +#endif + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX, TYPE_RISCV_DYNAMIC_CPU, #if defined(TARGET_RISCV32) - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV32, riscv_max_cpu_init), + .misa_mxl_max = MXL_RV32, + .cfg.max_satp_mode = VM_1_10_SV32, #elif defined(TARGET_RISCV64) - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, MXL_RV64, riscv_max_cpu_init), + .misa_mxl_max = MXL_RV64, + .cfg.max_satp_mode = VM_1_10_SV57, #endif + ), + + DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E, TYPE_RISCV_VENDOR_CPU, + .misa_ext = RVI | RVM | RVA | RVC | RVU, + .priv_spec = PRIV_VERSION_1_10_0, + .cfg.max_satp_mode = VM_1_10_MBARE, + .cfg.ext_zifencei = true, + .cfg.ext_zicsr = true, + .cfg.pmp = true, + .cfg.pmp_regions = 8 + ), + + DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU, + .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU, + .priv_spec = PRIV_VERSION_1_10_0, + + .cfg.max_satp_mode = VM_1_10_SV39, + .cfg.ext_zifencei = true, + .cfg.ext_zicsr = true, + .cfg.mmu = true, + .cfg.pmp = true, + .cfg.pmp_regions = 8 + ), #if defined(TARGET_RISCV32) || \ (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, MXL_RV32, rv32_base_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, MXL_RV32, rv32_ibex_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, MXL_RV32, rv32_sifive_e_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, MXL_RV32, rv32_imafcu_nommu_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, MXL_RV32, rv32_sifive_u_cpu_init), - DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32I, MXL_RV32, rv32i_bare_cpu_init), - DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV32E, MXL_RV32, rv32e_bare_cpu_init), + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE32, TYPE_RISCV_DYNAMIC_CPU, + .cfg.max_satp_mode = VM_1_10_SV32, + .misa_mxl_max = MXL_RV32, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_IBEX, TYPE_RISCV_VENDOR_CPU, + .misa_mxl_max = MXL_RV32, + .misa_ext = RVI | RVM | RVC | RVU, + .priv_spec = PRIV_VERSION_1_12_0, + .cfg.max_satp_mode = VM_1_10_MBARE, + .cfg.ext_zifencei = true, + .cfg.ext_zicsr = true, + .cfg.pmp = true, + .cfg.ext_smepmp = true, + + .cfg.ext_zba = true, + .cfg.ext_zbb = true, + .cfg.ext_zbc = true, + .cfg.ext_zbs = true + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E31, TYPE_RISCV_CPU_SIFIVE_E, + .misa_mxl_max = MXL_RV32 + ), + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E34, TYPE_RISCV_CPU_SIFIVE_E, + .misa_mxl_max = MXL_RV32, + .misa_ext = RVF, /* IMAFCU */ + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U34, TYPE_RISCV_CPU_SIFIVE_U, + .misa_mxl_max = MXL_RV32, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32I, TYPE_RISCV_BARE_CPU, + .misa_mxl_max = MXL_RV32, + .misa_ext = RVI + ), + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV32E, TYPE_RISCV_BARE_CPU, + .misa_mxl_max = MXL_RV32, + .misa_ext = RVE + ), #endif #if (defined(TARGET_RISCV64) && !defined(CONFIG_USER_ONLY)) - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX32, MXL_RV32, riscv_max_cpu_init), + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_MAX32, TYPE_RISCV_DYNAMIC_CPU, + .cfg.max_satp_mode = VM_1_10_SV32, + .misa_mxl_max = MXL_RV32, + ), #endif #if defined(TARGET_RISCV64) - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, MXL_RV64, rv64_base_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, MXL_RV64, rv64_sifive_e_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, MXL_RV64, rv64_sifive_u_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, MXL_RV64, rv64_sifive_u_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, MXL_RV64, rv64_thead_c906_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_TT_ASCALON, MXL_RV64, rv64_tt_ascalon_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, MXL_RV64, rv64_veyron_v1_cpu_init), - DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, - MXL_RV64, rv64_xiangshan_nanhu_cpu_init), -#ifdef CONFIG_TCG - DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, MXL_RV128, rv128_base_cpu_init), + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE64, TYPE_RISCV_DYNAMIC_CPU, + .cfg.max_satp_mode = VM_1_10_SV57, + .misa_mxl_max = MXL_RV64, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_E51, TYPE_RISCV_CPU_SIFIVE_E, + .misa_mxl_max = MXL_RV64 + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U54, TYPE_RISCV_CPU_SIFIVE_U, + .misa_mxl_max = MXL_RV64, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_SHAKTI_C, TYPE_RISCV_CPU_SIFIVE_U, + .misa_mxl_max = MXL_RV64, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_THEAD_C906, TYPE_RISCV_VENDOR_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVG | RVC | RVS | RVU, + .priv_spec = PRIV_VERSION_1_11_0, + + .cfg.ext_zfa = true, + .cfg.ext_zfh = true, + .cfg.mmu = true, + .cfg.ext_xtheadba = true, + .cfg.ext_xtheadbb = true, + .cfg.ext_xtheadbs = true, + .cfg.ext_xtheadcmo = true, + .cfg.ext_xtheadcondmov = true, + .cfg.ext_xtheadfmemidx = true, + .cfg.ext_xtheadmac = true, + .cfg.ext_xtheadmemidx = true, + .cfg.ext_xtheadmempair = true, + .cfg.ext_xtheadsync = true, + .cfg.pmp = true, + + .cfg.mvendorid = THEAD_VENDOR_ID, + + .cfg.max_satp_mode = VM_1_10_SV39, +#ifndef CONFIG_USER_ONLY + .custom_csrs = th_csr_list, +#endif + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_TT_ASCALON, TYPE_RISCV_VENDOR_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVG | RVC | RVS | RVU | RVH | RVV, + .priv_spec = PRIV_VERSION_1_13_0, + .vext_spec = VEXT_VERSION_1_00_0, + + /* ISA extensions */ + .cfg.mmu = true, + .cfg.vlenb = 256 >> 3, + .cfg.elen = 64, + .cfg.rvv_ma_all_1s = true, + .cfg.rvv_ta_all_1s = true, + .cfg.misa_w = true, + .cfg.pmp = true, + .cfg.cbom_blocksize = 64, + .cfg.cbop_blocksize = 64, + .cfg.cboz_blocksize = 64, + .cfg.ext_zic64b = true, + .cfg.ext_zicbom = true, + .cfg.ext_zicbop = true, + .cfg.ext_zicboz = true, + .cfg.ext_zicntr = true, + .cfg.ext_zicond = true, + .cfg.ext_zicsr = true, + .cfg.ext_zifencei = true, + .cfg.ext_zihintntl = true, + .cfg.ext_zihintpause = true, + .cfg.ext_zihpm = true, + .cfg.ext_zimop = true, + .cfg.ext_zawrs = true, + .cfg.ext_zfa = true, + .cfg.ext_zfbfmin = true, + .cfg.ext_zfh = true, + .cfg.ext_zfhmin = true, + .cfg.ext_zcb = true, + .cfg.ext_zcmop = true, + .cfg.ext_zba = true, + .cfg.ext_zbb = true, + .cfg.ext_zbs = true, + .cfg.ext_zkt = true, + .cfg.ext_zvbb = true, + .cfg.ext_zvbc = true, + .cfg.ext_zvfbfmin = true, + .cfg.ext_zvfbfwma = true, + .cfg.ext_zvfh = true, + .cfg.ext_zvfhmin = true, + .cfg.ext_zvkng = true, + .cfg.ext_smaia = true, + .cfg.ext_smstateen = true, + .cfg.ext_ssaia = true, + .cfg.ext_sscofpmf = true, + .cfg.ext_sstc = true, + .cfg.ext_svade = true, + .cfg.ext_svinval = true, + .cfg.ext_svnapot = true, + .cfg.ext_svpbmt = true, + + .cfg.max_satp_mode = VM_1_10_SV57, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_VEYRON_V1, TYPE_RISCV_VENDOR_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVG | RVC | RVS | RVU | RVH, + .priv_spec = PRIV_VERSION_1_12_0, + + /* ISA extensions */ + .cfg.mmu = true, + .cfg.ext_zifencei = true, + .cfg.ext_zicsr = true, + .cfg.pmp = true, + .cfg.ext_zicbom = true, + .cfg.cbom_blocksize = 64, + .cfg.cboz_blocksize = 64, + .cfg.ext_zicboz = true, + .cfg.ext_smaia = true, + .cfg.ext_ssaia = true, + .cfg.ext_sscofpmf = true, + .cfg.ext_sstc = true, + .cfg.ext_svinval = true, + .cfg.ext_svnapot = true, + .cfg.ext_svpbmt = true, + .cfg.ext_smstateen = true, + .cfg.ext_zba = true, + .cfg.ext_zbb = true, + .cfg.ext_zbc = true, + .cfg.ext_zbs = true, + .cfg.ext_XVentanaCondOps = true, + + .cfg.mvendorid = VEYRON_V1_MVENDORID, + .cfg.marchid = VEYRON_V1_MARCHID, + .cfg.mimpid = VEYRON_V1_MIMPID, + + .cfg.max_satp_mode = VM_1_10_SV48, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_XIANGSHAN_NANHU, TYPE_RISCV_VENDOR_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVG | RVC | RVB | RVS | RVU, + .priv_spec = PRIV_VERSION_1_12_0, + + /* ISA extensions */ + .cfg.ext_zbc = true, + .cfg.ext_zbkb = true, + .cfg.ext_zbkc = true, + .cfg.ext_zbkx = true, + .cfg.ext_zknd = true, + .cfg.ext_zkne = true, + .cfg.ext_zknh = true, + .cfg.ext_zksed = true, + .cfg.ext_zksh = true, + .cfg.ext_svinval = true, + + .cfg.mmu = true, + .cfg.pmp = true, + + .cfg.max_satp_mode = VM_1_10_SV39, + ), + + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_XIANGSHAN_KMH, TYPE_RISCV_VENDOR_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVG | RVC | RVB | RVS | RVU | RVH | RVV, + .priv_spec = PRIV_VERSION_1_13_0, + /* + * The RISC-V Instruction Set Manual: Volume I + * Unprivileged Architecture + */ + .cfg.ext_zicntr = true, + .cfg.ext_zihpm = true, + .cfg.ext_zihintntl = true, + .cfg.ext_zihintpause = true, + .cfg.ext_zimop = true, + .cfg.ext_zcmop = true, + .cfg.ext_zicond = true, + .cfg.ext_zawrs = true, + .cfg.ext_zacas = true, + .cfg.ext_zfh = true, + .cfg.ext_zfa = true, + .cfg.ext_zcb = true, + .cfg.ext_zbc = true, + .cfg.ext_zvfh = true, + .cfg.ext_zkn = true, + .cfg.ext_zks = true, + .cfg.ext_zkt = true, + .cfg.ext_zvbb = true, + .cfg.ext_zvkt = true, + /* + * The RISC-V Instruction Set Manual: Volume II + * Privileged Architecture + */ + .cfg.ext_smstateen = true, + .cfg.ext_smcsrind = true, + .cfg.ext_sscsrind = true, + .cfg.ext_svnapot = true, + .cfg.ext_svpbmt = true, + .cfg.ext_svinval = true, + .cfg.ext_sstc = true, + .cfg.ext_sscofpmf = true, + .cfg.ext_ssdbltrp = true, + .cfg.ext_ssnpm = true, + .cfg.ext_smnpm = true, + .cfg.ext_smmpm = true, + .cfg.ext_sspm = true, + .cfg.ext_supm = true, + /* The RISC-V Advanced Interrupt Architecture */ + .cfg.ext_smaia = true, + .cfg.ext_ssaia = true, + /* RVA23 Profiles */ + .cfg.ext_zicbom = true, + .cfg.ext_zicbop = true, + .cfg.ext_zicboz = true, + .cfg.ext_svade = true, + .cfg.mmu = true, + .cfg.pmp = true, + .cfg.max_satp_mode = VM_1_10_SV48, + ), + +#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_BASE128, TYPE_RISCV_DYNAMIC_CPU, + .cfg.max_satp_mode = VM_1_10_SV57, + .misa_mxl_max = MXL_RV128, + ), #endif /* CONFIG_TCG */ - DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, MXL_RV64, rv64i_bare_cpu_init), - DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init), - DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init), - DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init), - DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init), - DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init), + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64I, TYPE_RISCV_BARE_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVI + ), + DEFINE_RISCV_CPU(TYPE_RISCV_CPU_RV64E, TYPE_RISCV_BARE_CPU, + .misa_mxl_max = MXL_RV64, + .misa_ext = RVE + ), + + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, TYPE_RISCV_CPU_RV64I, RVA22U64), + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, TYPE_RISCV_CPU_RV64I, RVA22S64), + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, TYPE_RISCV_CPU_RV64I, RVA23U64), + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, TYPE_RISCV_CPU_RV64I, RVA23S64), #endif /* TARGET_RISCV64 */ }; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 51e49e0..4a862da 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -23,7 +23,9 @@ #include "hw/core/cpu.h" #include "hw/registerfields.h" #include "hw/qdev-properties.h" +#include "exec/cpu-common.h" #include "exec/cpu-defs.h" +#include "exec/cpu-interrupt.h" #include "exec/gdbstub.h" #include "qemu/cpu-float.h" #include "qom/object.h" @@ -44,12 +46,6 @@ typedef struct CPUArchState CPURISCVState; #endif /* - * RISC-V-specific extra insn start words: - * 1: Original instruction opcode - * 2: more information about instruction - */ -#define TARGET_INSN_START_EXTRA_WORDS 2 -/* * b0: Whether a instruction always raise a store AMO or not. */ #define RISCV_UW2_ALWAYS_STORE_AMO 1 @@ -79,13 +75,29 @@ const char *riscv_get_misa_ext_name(uint32_t bit); const char *riscv_get_misa_ext_description(uint32_t bit); #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop) +#define ENV_CSR_OFFSET(_csr) offsetof(CPURISCVState, _csr) typedef struct riscv_cpu_profile { struct riscv_cpu_profile *u_parent; struct riscv_cpu_profile *s_parent; const char *name; uint32_t misa_ext; + /* + * The profile is enabled/disabled via command line or + * via cpu_init(). Enabling a profile will add all its + * mandatory extensions in the CPU during init(). + */ bool enabled; + /* + * The profile is present in the CPU, i.e. the current set of + * CPU extensions complies with it. A profile can be enabled + * and not present (e.g. the user disabled a mandatory extension) + * and the other way around (e.g. all mandatory extensions are + * present in a non-profile CPU). + * + * QMP uses this flag. + */ + bool present; bool user_set; int priv_spec; int satp_mode; @@ -162,7 +174,8 @@ extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[]; #define MMU_USER_IDX 3 -#define MAX_RISCV_PMPS (16) +#define MAX_RISCV_PMPS (64) +#define OLD_MAX_RISCV_PMPS (16) #if !defined(CONFIG_USER_ONLY) #include "pmp.h" @@ -503,6 +516,19 @@ struct CPUArchState { }; /* + * map is a 16-bit bitmap: the most significant set bit in map is the maximum + * satp mode that is supported. It may be chosen by the user and must respect + * what qemu implements (valid_1_10_32/64) and what the hw is capable of + * (supported bitmap below). + * + * init is a 16-bit bitmap used to make sure the user selected a correct + * configuration as per the specification. + */ +typedef struct { + uint16_t map, init; +} RISCVSATPModes; + +/* * RISCVCPU: * @env: #CPURISCVState * @@ -518,6 +544,7 @@ struct ArchCPU { /* Configuration Settings */ RISCVCPUConfig cfg; + RISCVSATPModes satp_modes; QEMUTimer *pmu_timer; /* A bitmask of Available programmable counters */ @@ -527,6 +554,19 @@ struct ArchCPU { const GPtrArray *decoders; }; +typedef struct RISCVCSR RISCVCSR; + +typedef struct RISCVCPUDef { + RISCVMXL misa_mxl_max; /* max mxl for this cpu */ + RISCVCPUProfile *profile; + uint32_t misa_ext; + int priv_spec; + int32_t vext_spec; + RISCVCPUConfig cfg; + bool bare; + const RISCVCSR *custom_csrs; +} RISCVCPUDef; + /** * RISCVCPUClass: * @parent_realize: The parent class' realize handler. @@ -539,7 +579,7 @@ struct RISCVCPUClass { DeviceRealize parent_realize; ResettablePhases parent_phases; - RISCVMXL misa_mxl_max; /* max mxl for this cpu */ + RISCVCPUDef *def; }; static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) @@ -632,8 +672,6 @@ G_NORETURN void riscv_raise_exception(CPURISCVState *env, target_ulong riscv_cpu_get_fflags(CPURISCVState *env); void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong); -#include "exec/cpu-all.h" - FIELD(TB_FLAGS, MEM_IDX, 0, 3) FIELD(TB_FLAGS, FS, 3, 2) /* Vector flags */ @@ -808,9 +846,6 @@ static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew, return vlen >> (vsew + 3 - lmul); } -void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *pflags); - bool riscv_cpu_is_32bit(RISCVCPU *cpu); bool riscv_cpu_virt_mem_enabled(CPURISCVState *env); @@ -822,8 +857,8 @@ RISCVException riscv_csrr(CPURISCVState *env, int csrno, target_ulong *ret_value); RISCVException riscv_csrrw(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask); + target_ulong *ret_value, target_ulong new_value, + target_ulong write_mask, uintptr_t ra); RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, @@ -832,13 +867,13 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, static inline void riscv_csr_write(CPURISCVState *env, int csrno, target_ulong val) { - riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS)); + riscv_csrrw(env, csrno, NULL, val, MAKE_64BIT_MASK(0, TARGET_LONG_BITS), 0); } static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno) { target_ulong val = 0; - riscv_csrrw(env, csrno, &val, 0, 0); + riscv_csrrw(env, csrno, &val, 0, 0, 0); return val; } @@ -847,7 +882,8 @@ typedef RISCVException (*riscv_csr_predicate_fn)(CPURISCVState *env, typedef RISCVException (*riscv_csr_read_fn)(CPURISCVState *env, int csrno, target_ulong *ret_value); typedef RISCVException (*riscv_csr_write_fn)(CPURISCVState *env, int csrno, - target_ulong new_value); + target_ulong new_value, + uintptr_t ra); typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, @@ -856,8 +892,8 @@ typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, Int128 *ret_value); RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, - Int128 *ret_value, - Int128 new_value, Int128 write_mask); + Int128 *ret_value, Int128 new_value, + Int128 write_mask, uintptr_t ra); typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno, Int128 *ret_value); @@ -876,6 +912,12 @@ typedef struct { uint32_t min_priv_ver; } riscv_csr_operations; +struct RISCVCSR { + int csrno; + bool (*insertion_test)(RISCVCPU *cpu); + riscv_csr_operations csr_ops; +}; + /* CSR function table constants */ enum { CSR_TABLE_SIZE = 0x1000 @@ -910,7 +952,6 @@ extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[]; extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[]; extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[]; extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[]; -extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[]; typedef struct isa_ext_data { const char *name; @@ -930,18 +971,17 @@ extern riscv_csr_operations csr_ops[CSR_TABLE_SIZE]; extern const bool valid_vm_1_10_32[], valid_vm_1_10_64[]; void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops); -void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops); +void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops); void riscv_cpu_register_gdb_regs_for_features(CPUState *cs); target_ulong riscv_new_csr_seed(target_ulong new_value, target_ulong write_mask); -uint8_t satp_mode_max_from_map(uint32_t map); const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit); -/* Implemented in th_csr.c */ -void th_register_custom_csrs(RISCVCPU *cpu); +/* In th_csr.c */ +extern const RISCVCSR th_csr_list[]; const char *priv_spec_to_str(int priv_version); #endif /* RISCV_CPU_H */ diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index a30317c..b62dd82 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -372,6 +372,18 @@ #define CSR_PMPCFG1 0x3a1 #define CSR_PMPCFG2 0x3a2 #define CSR_PMPCFG3 0x3a3 +#define CSR_PMPCFG4 0x3a4 +#define CSR_PMPCFG5 0x3a5 +#define CSR_PMPCFG6 0x3a6 +#define CSR_PMPCFG7 0x3a7 +#define CSR_PMPCFG8 0x3a8 +#define CSR_PMPCFG9 0x3a9 +#define CSR_PMPCFG10 0x3aa +#define CSR_PMPCFG11 0x3ab +#define CSR_PMPCFG12 0x3ac +#define CSR_PMPCFG13 0x3ad +#define CSR_PMPCFG14 0x3ae +#define CSR_PMPCFG15 0x3af #define CSR_PMPADDR0 0x3b0 #define CSR_PMPADDR1 0x3b1 #define CSR_PMPADDR2 0x3b2 @@ -388,6 +400,54 @@ #define CSR_PMPADDR13 0x3bd #define CSR_PMPADDR14 0x3be #define CSR_PMPADDR15 0x3bf +#define CSR_PMPADDR16 0x3c0 +#define CSR_PMPADDR17 0x3c1 +#define CSR_PMPADDR18 0x3c2 +#define CSR_PMPADDR19 0x3c3 +#define CSR_PMPADDR20 0x3c4 +#define CSR_PMPADDR21 0x3c5 +#define CSR_PMPADDR22 0x3c6 +#define CSR_PMPADDR23 0x3c7 +#define CSR_PMPADDR24 0x3c8 +#define CSR_PMPADDR25 0x3c9 +#define CSR_PMPADDR26 0x3ca +#define CSR_PMPADDR27 0x3cb +#define CSR_PMPADDR28 0x3cc +#define CSR_PMPADDR29 0x3cd +#define CSR_PMPADDR30 0x3ce +#define CSR_PMPADDR31 0x3cf +#define CSR_PMPADDR32 0x3d0 +#define CSR_PMPADDR33 0x3d1 +#define CSR_PMPADDR34 0x3d2 +#define CSR_PMPADDR35 0x3d3 +#define CSR_PMPADDR36 0x3d4 +#define CSR_PMPADDR37 0x3d5 +#define CSR_PMPADDR38 0x3d6 +#define CSR_PMPADDR39 0x3d7 +#define CSR_PMPADDR40 0x3d8 +#define CSR_PMPADDR41 0x3d9 +#define CSR_PMPADDR42 0x3da +#define CSR_PMPADDR43 0x3db +#define CSR_PMPADDR44 0x3dc +#define CSR_PMPADDR45 0x3dd +#define CSR_PMPADDR46 0x3de +#define CSR_PMPADDR47 0x3df +#define CSR_PMPADDR48 0x3e0 +#define CSR_PMPADDR49 0x3e1 +#define CSR_PMPADDR50 0x3e2 +#define CSR_PMPADDR51 0x3e3 +#define CSR_PMPADDR52 0x3e4 +#define CSR_PMPADDR53 0x3e5 +#define CSR_PMPADDR54 0x3e6 +#define CSR_PMPADDR55 0x3e7 +#define CSR_PMPADDR56 0x3e8 +#define CSR_PMPADDR57 0x3e9 +#define CSR_PMPADDR58 0x3ea +#define CSR_PMPADDR59 0x3eb +#define CSR_PMPADDR60 0x3ec +#define CSR_PMPADDR61 0x3ed +#define CSR_PMPADDR62 0x3ee +#define CSR_PMPADDR63 0x3ef /* RNMI */ #define CSR_MNSCRATCH 0x740 @@ -675,7 +735,8 @@ typedef enum { #define PTE_SOFT 0x300 /* Reserved for Software */ #define PTE_PBMT 0x6000000000000000ULL /* Page-based memory types */ #define PTE_N 0x8000000000000000ULL /* NAPOT translation */ -#define PTE_RESERVED 0x1FC0000000000000ULL /* Reserved bits */ +#define PTE_RESERVED(svrsw60t59b) \ + (svrsw60t59b ? 0x07C0000000000000ULL : 0x1FC0000000000000ULL) /* Reserved bits */ #define PTE_ATTR (PTE_N | PTE_PBMT) /* All attributes bits */ /* Page table PPN shift amount */ diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index 8a84348..aa28dc8 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -21,184 +21,10 @@ #ifndef RISCV_CPU_CFG_H #define RISCV_CPU_CFG_H -/* - * map is a 16-bit bitmap: the most significant set bit in map is the maximum - * satp mode that is supported. It may be chosen by the user and must respect - * what qemu implements (valid_1_10_32/64) and what the hw is capable of - * (supported bitmap below). - * - * init is a 16-bit bitmap used to make sure the user selected a correct - * configuration as per the specification. - * - * supported is a 16-bit bitmap used to reflect the hw capabilities. - */ -typedef struct { - uint16_t map, init, supported; -} RISCVSATPMap; - struct RISCVCPUConfig { - bool ext_zba; - bool ext_zbb; - bool ext_zbc; - bool ext_zbkb; - bool ext_zbkc; - bool ext_zbkx; - bool ext_zbs; - bool ext_zca; - bool ext_zcb; - bool ext_zcd; - bool ext_zce; - bool ext_zcf; - bool ext_zcmp; - bool ext_zcmt; - bool ext_zk; - bool ext_zkn; - bool ext_zknd; - bool ext_zkne; - bool ext_zknh; - bool ext_zkr; - bool ext_zks; - bool ext_zksed; - bool ext_zksh; - bool ext_zkt; - bool ext_zifencei; - bool ext_zicntr; - bool ext_zicsr; - bool ext_zicbom; - bool ext_zicbop; - bool ext_zicboz; - bool ext_zicfilp; - bool ext_zicfiss; - bool ext_zicond; - bool ext_zihintntl; - bool ext_zihintpause; - bool ext_zihpm; - bool ext_zimop; - bool ext_zcmop; - bool ext_ztso; - bool ext_smstateen; - bool ext_sstc; - bool ext_smcdeleg; - bool ext_ssccfg; - bool ext_smcntrpmf; - bool ext_smcsrind; - bool ext_sscsrind; - bool ext_ssdbltrp; - bool ext_smdbltrp; - bool ext_svadu; - bool ext_svinval; - bool ext_svnapot; - bool ext_svpbmt; - bool ext_svvptc; - bool ext_svukte; - bool ext_zdinx; - bool ext_zaamo; - bool ext_zacas; - bool ext_zama16b; - bool ext_zabha; - bool ext_zalrsc; - bool ext_zawrs; - bool ext_zfa; - bool ext_zfbfmin; - bool ext_zfh; - bool ext_zfhmin; - bool ext_zfinx; - bool ext_zhinx; - bool ext_zhinxmin; - bool ext_zve32f; - bool ext_zve32x; - bool ext_zve64f; - bool ext_zve64d; - bool ext_zve64x; - bool ext_zvbb; - bool ext_zvbc; - bool ext_zvkb; - bool ext_zvkg; - bool ext_zvkned; - bool ext_zvknha; - bool ext_zvknhb; - bool ext_zvksed; - bool ext_zvksh; - bool ext_zvkt; - bool ext_zvkn; - bool ext_zvknc; - bool ext_zvkng; - bool ext_zvks; - bool ext_zvksc; - bool ext_zvksg; - bool ext_zmmul; - bool ext_zvfbfmin; - bool ext_zvfbfwma; - bool ext_zvfh; - bool ext_zvfhmin; - bool ext_smaia; - bool ext_ssaia; - bool ext_smctr; - bool ext_ssctr; - bool ext_sscofpmf; - bool ext_smepmp; - bool ext_smrnmi; - bool ext_ssnpm; - bool ext_smnpm; - bool ext_smmpm; - bool ext_sspm; - bool ext_supm; - bool rvv_ta_all_1s; - bool rvv_ma_all_1s; - bool rvv_vl_half_avl; - - uint32_t mvendorid; - uint64_t marchid; - uint64_t mimpid; - - /* Named features */ - bool ext_svade; - bool ext_zic64b; - bool ext_ssstateen; - bool ext_sha; - - /* - * Always 'true' booleans for named features - * TCG always implement/can't be user disabled, - * based on spec version. - */ - bool has_priv_1_13; - bool has_priv_1_12; - bool has_priv_1_11; - - /* Always enabled for TCG if has_priv_1_11 */ - bool ext_ziccrse; - - /* Vendor-specific custom extensions */ - bool ext_xtheadba; - bool ext_xtheadbb; - bool ext_xtheadbs; - bool ext_xtheadcmo; - bool ext_xtheadcondmov; - bool ext_xtheadfmemidx; - bool ext_xtheadfmv; - bool ext_xtheadmac; - bool ext_xtheadmemidx; - bool ext_xtheadmempair; - bool ext_xtheadsync; - bool ext_XVentanaCondOps; - - uint32_t pmu_mask; - uint16_t vlenb; - uint16_t elen; - uint16_t cbom_blocksize; - uint16_t cbop_blocksize; - uint16_t cboz_blocksize; - bool mmu; - bool pmp; - bool debug; - bool misa_w; - - bool short_isa_string; - -#ifndef CONFIG_USER_ONLY - RISCVSATPMap satp_mode; -#endif +#define BOOL_FIELD(x) bool x; +#define TYPED_FIELD(type, x, default) type x; +#include "cpu_cfg_fields.h.inc" }; typedef struct RISCVCPUConfig RISCVCPUConfig; diff --git a/target/riscv/cpu_cfg_fields.h.inc b/target/riscv/cpu_cfg_fields.h.inc new file mode 100644 index 0000000..e2d116f --- /dev/null +++ b/target/riscv/cpu_cfg_fields.h.inc @@ -0,0 +1,173 @@ +/* + * Required definitions before including this file: + * + * #define BOOL_FIELD(x) + * #define TYPED_FIELD(type, x, default) + */ + +BOOL_FIELD(ext_zba) +BOOL_FIELD(ext_zbb) +BOOL_FIELD(ext_zbc) +BOOL_FIELD(ext_zbkb) +BOOL_FIELD(ext_zbkc) +BOOL_FIELD(ext_zbkx) +BOOL_FIELD(ext_zbs) +BOOL_FIELD(ext_zca) +BOOL_FIELD(ext_zcb) +BOOL_FIELD(ext_zcd) +BOOL_FIELD(ext_zce) +BOOL_FIELD(ext_zcf) +BOOL_FIELD(ext_zcmp) +BOOL_FIELD(ext_zcmt) +BOOL_FIELD(ext_zk) +BOOL_FIELD(ext_zkn) +BOOL_FIELD(ext_zknd) +BOOL_FIELD(ext_zkne) +BOOL_FIELD(ext_zknh) +BOOL_FIELD(ext_zkr) +BOOL_FIELD(ext_zks) +BOOL_FIELD(ext_zksed) +BOOL_FIELD(ext_zksh) +BOOL_FIELD(ext_zkt) +BOOL_FIELD(ext_zifencei) +BOOL_FIELD(ext_zicntr) +BOOL_FIELD(ext_zicsr) +BOOL_FIELD(ext_zicbom) +BOOL_FIELD(ext_zicbop) +BOOL_FIELD(ext_zicboz) +BOOL_FIELD(ext_zicfilp) +BOOL_FIELD(ext_zicfiss) +BOOL_FIELD(ext_zicond) +BOOL_FIELD(ext_zihintntl) +BOOL_FIELD(ext_zihintpause) +BOOL_FIELD(ext_zihpm) +BOOL_FIELD(ext_zimop) +BOOL_FIELD(ext_zcmop) +BOOL_FIELD(ext_ztso) +BOOL_FIELD(ext_smstateen) +BOOL_FIELD(ext_sstc) +BOOL_FIELD(ext_smcdeleg) +BOOL_FIELD(ext_ssccfg) +BOOL_FIELD(ext_smcntrpmf) +BOOL_FIELD(ext_smcsrind) +BOOL_FIELD(ext_sscsrind) +BOOL_FIELD(ext_ssdbltrp) +BOOL_FIELD(ext_smdbltrp) +BOOL_FIELD(ext_svadu) +BOOL_FIELD(ext_svinval) +BOOL_FIELD(ext_svnapot) +BOOL_FIELD(ext_svpbmt) +BOOL_FIELD(ext_svrsw60t59b) +BOOL_FIELD(ext_svvptc) +BOOL_FIELD(ext_svukte) +BOOL_FIELD(ext_zdinx) +BOOL_FIELD(ext_zaamo) +BOOL_FIELD(ext_zacas) +BOOL_FIELD(ext_zama16b) +BOOL_FIELD(ext_zabha) +BOOL_FIELD(ext_zalrsc) +BOOL_FIELD(ext_zawrs) +BOOL_FIELD(ext_zfa) +BOOL_FIELD(ext_zfbfmin) +BOOL_FIELD(ext_zfh) +BOOL_FIELD(ext_zfhmin) +BOOL_FIELD(ext_zfinx) +BOOL_FIELD(ext_zhinx) +BOOL_FIELD(ext_zhinxmin) +BOOL_FIELD(ext_zve32f) +BOOL_FIELD(ext_zve32x) +BOOL_FIELD(ext_zve64f) +BOOL_FIELD(ext_zve64d) +BOOL_FIELD(ext_zve64x) +BOOL_FIELD(ext_zvbb) +BOOL_FIELD(ext_zvbc) +BOOL_FIELD(ext_zvkb) +BOOL_FIELD(ext_zvkg) +BOOL_FIELD(ext_zvkned) +BOOL_FIELD(ext_zvknha) +BOOL_FIELD(ext_zvknhb) +BOOL_FIELD(ext_zvksed) +BOOL_FIELD(ext_zvksh) +BOOL_FIELD(ext_zvkt) +BOOL_FIELD(ext_zvkn) +BOOL_FIELD(ext_zvknc) +BOOL_FIELD(ext_zvkng) +BOOL_FIELD(ext_zvks) +BOOL_FIELD(ext_zvksc) +BOOL_FIELD(ext_zvksg) +BOOL_FIELD(ext_zmmul) +BOOL_FIELD(ext_zvfbfmin) +BOOL_FIELD(ext_zvfbfwma) +BOOL_FIELD(ext_zvfh) +BOOL_FIELD(ext_zvfhmin) +BOOL_FIELD(ext_smaia) +BOOL_FIELD(ext_ssaia) +BOOL_FIELD(ext_smctr) +BOOL_FIELD(ext_ssctr) +BOOL_FIELD(ext_sscofpmf) +BOOL_FIELD(ext_smepmp) +BOOL_FIELD(ext_smrnmi) +BOOL_FIELD(ext_ssnpm) +BOOL_FIELD(ext_smnpm) +BOOL_FIELD(ext_smmpm) +BOOL_FIELD(ext_sspm) +BOOL_FIELD(ext_supm) +BOOL_FIELD(rvv_ta_all_1s) +BOOL_FIELD(rvv_ma_all_1s) +BOOL_FIELD(rvv_vl_half_avl) +BOOL_FIELD(rvv_vsetvl_x0_vill) +/* Named features */ +BOOL_FIELD(ext_svade) +BOOL_FIELD(ext_zic64b) +BOOL_FIELD(ext_ssstateen) +BOOL_FIELD(ext_sha) + +/* + * Always 'true' booleans for named features + * TCG always implement/can't be user disabled, + * based on spec version. + */ +BOOL_FIELD(has_priv_1_13) +BOOL_FIELD(has_priv_1_12) +BOOL_FIELD(has_priv_1_11) + +/* Always enabled for TCG if has_priv_1_11 */ +BOOL_FIELD(ext_ziccrse) + +/* Vendor-specific custom extensions */ +BOOL_FIELD(ext_xtheadba) +BOOL_FIELD(ext_xtheadbb) +BOOL_FIELD(ext_xtheadbs) +BOOL_FIELD(ext_xtheadcmo) +BOOL_FIELD(ext_xtheadcondmov) +BOOL_FIELD(ext_xtheadfmemidx) +BOOL_FIELD(ext_xtheadfmv) +BOOL_FIELD(ext_xtheadmac) +BOOL_FIELD(ext_xtheadmemidx) +BOOL_FIELD(ext_xtheadmempair) +BOOL_FIELD(ext_xtheadsync) +BOOL_FIELD(ext_XVentanaCondOps) + +BOOL_FIELD(mmu) +BOOL_FIELD(pmp) +BOOL_FIELD(debug) +BOOL_FIELD(misa_w) + +BOOL_FIELD(short_isa_string) + +TYPED_FIELD(uint32_t, mvendorid, 0) +TYPED_FIELD(uint64_t, marchid, 0) +TYPED_FIELD(uint64_t, mimpid, 0) + +TYPED_FIELD(uint32_t, pmu_mask, 0) +TYPED_FIELD(uint16_t, vlenb, 0) +TYPED_FIELD(uint16_t, elen, 0) +TYPED_FIELD(uint16_t, cbom_blocksize, 0) +TYPED_FIELD(uint16_t, cbop_blocksize, 0) +TYPED_FIELD(uint16_t, cboz_blocksize, 0) +TYPED_FIELD(uint8_t, pmp_regions, 0) + +TYPED_FIELD(int8_t, max_satp_mode, -1) + +#undef BOOL_FIELD +#undef TYPED_FIELD diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 6c4391d..3479a62 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -24,14 +24,15 @@ #include "internals.h" #include "pmu.h" #include "exec/cputlb.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" +#include "exec/target_page.h" +#include "system/memory.h" #include "instmap.h" #include "tcg/tcg-op.h" #include "accel/tcg/cpu-ops.h" #include "trace.h" #include "semihosting/common-semi.h" -#include "system/cpu-timers.h" +#include "exec/icount.h" #include "cpu_bits.h" #include "debug.h" #include "pmp.h" @@ -134,103 +135,6 @@ bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt) #endif } -void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, - uint64_t *cs_base, uint32_t *pflags) -{ - RISCVCPU *cpu = env_archcpu(env); - RISCVExtStatus fs, vs; - uint32_t flags = 0; - bool pm_signext = riscv_cpu_virt_mem_enabled(env); - - *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc; - *cs_base = 0; - - if (cpu->cfg.ext_zve32x) { - /* - * If env->vl equals to VLMAX, we can use generic vector operation - * expanders (GVEC) to accerlate the vector operations. - * However, as LMUL could be a fractional number. The maximum - * vector size can be operated might be less than 8 bytes, - * which is not supported by GVEC. So we set vl_eq_vlmax flag to true - * only when maxsz >= 8 bytes. - */ - - /* lmul encoded as in DisasContext::lmul */ - int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3); - uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW); - uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul); - uint32_t maxsz = vlmax << vsew; - bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && - (maxsz >= 8); - flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); - flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew); - flags = FIELD_DP32(flags, TB_FLAGS, LMUL, - FIELD_EX64(env->vtype, VTYPE, VLMUL)); - flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); - flags = FIELD_DP32(flags, TB_FLAGS, VTA, - FIELD_EX64(env->vtype, VTYPE, VTA)); - flags = FIELD_DP32(flags, TB_FLAGS, VMA, - FIELD_EX64(env->vtype, VTYPE, VMA)); - flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0); - } else { - flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); - } - - if (cpu_get_fcfien(env)) { - /* - * For Forward CFI, only the expectation of a lpad at - * the start of the block is tracked via env->elp. env->elp - * is turned on during jalr translation. - */ - flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp); - flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1); - } - - if (cpu_get_bcfien(env)) { - flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1); - } - -#ifdef CONFIG_USER_ONLY - fs = EXT_STATUS_DIRTY; - vs = EXT_STATUS_DIRTY; -#else - flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv); - - flags |= riscv_env_mmu_index(env, 0); - fs = get_field(env->mstatus, MSTATUS_FS); - vs = get_field(env->mstatus, MSTATUS_VS); - - if (env->virt_enabled) { - flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1); - /* - * Merge DISABLED and !DIRTY states using MIN. - * We will set both fields when dirtying. - */ - fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS)); - vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS)); - } - - /* With Zfinx, floating point is enabled/disabled by Smstateen. */ - if (!riscv_has_ext(env, RVF)) { - fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE) - ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED; - } - - if (cpu->cfg.debug && !icount_enabled()) { - flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); - } -#endif - - flags = FIELD_DP32(flags, TB_FLAGS, FS, fs); - flags = FIELD_DP32(flags, TB_FLAGS, VS, vs); - flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); - flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env)); - flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env)); - flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext); - - *pflags = flags; -} - RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env) { #ifndef CONFIG_USER_ONLY @@ -1405,6 +1309,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, bool svade = riscv_cpu_cfg(env)->ext_svade; bool svadu = riscv_cpu_cfg(env)->ext_svadu; bool adue = svadu ? env->menvcfg & MENVCFG_ADUE : !svade; + bool svrsw60t59b = riscv_cpu_cfg(env)->ext_svrsw60t59b; if (first_stage && two_stage && env->virt_enabled) { pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE); @@ -1472,7 +1377,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, if (riscv_cpu_sxl(env) == MXL_RV32) { ppn = pte >> PTE_PPN_SHIFT; } else { - if (pte & PTE_RESERVED) { + if (pte & PTE_RESERVED(svrsw60t59b)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: " "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n", __func__, pte_addr, pte); @@ -1662,9 +1567,11 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1); target_ulong old_pte; if (riscv_cpu_sxl(env) == MXL_RV32) { - old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, pte, updated_pte); + old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, cpu_to_le32(pte), cpu_to_le32(updated_pte)); + old_pte = le32_to_cpu(old_pte); } else { - old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte); + old_pte = qatomic_cmpxchg(pte_pa, cpu_to_le64(pte), cpu_to_le64(updated_pte)); + old_pte = le64_to_cpu(old_pte); } if (old_pte != pte) { goto restart; diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c index bb084e0..a0fb54b 100644 --- a/target/riscv/crypto_helper.c +++ b/target/riscv/crypto_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "crypto/aes.h" #include "crypto/aes-round.h" diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 7948188..8631be9 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -24,12 +24,14 @@ #include "tcg/tcg-cpu.h" #include "pmu.h" #include "time_helper.h" -#include "exec/exec-all.h" #include "exec/cputlb.h" #include "exec/tb-flush.h" -#include "system/cpu-timers.h" +#include "exec/icount.h" +#include "accel/tcg/getpc.h" #include "qemu/guest-random.h" #include "qapi/error.h" +#include "tcg/insn-start-words.h" +#include "internals.h" #include <stdbool.h> /* CSR function table public API */ @@ -38,7 +40,7 @@ void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops) *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)]; } -void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) +void riscv_set_csr_ops(int csrno, const riscv_csr_operations *ops) { csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops; } @@ -736,7 +738,10 @@ static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno) static RISCVException pmp(CPURISCVState *env, int csrno) { if (riscv_cpu_cfg(env)->pmp) { - if (csrno <= CSR_PMPCFG3) { + int max_pmpcfg = (env->priv_ver >= PRIV_VERSION_1_12_0) ? ++ CSR_PMPCFG15 : CSR_PMPCFG3; + + if (csrno <= max_pmpcfg) { uint32_t reg_index = csrno - CSR_PMPCFG0; /* TODO: RV128 restriction check */ @@ -830,13 +835,15 @@ static RISCVException seed(CPURISCVState *env, int csrno) } /* zicfiss CSR_SSP read and write */ -static int read_ssp(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_ssp(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->ssp; return RISCV_EXCP_NONE; } -static int write_ssp(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_ssp(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { env->ssp = val; return RISCV_EXCP_NONE; @@ -851,7 +858,7 @@ static RISCVException read_fflags(CPURISCVState *env, int csrno, } static RISCVException write_fflags(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) if (riscv_has_ext(env, RVF)) { @@ -870,7 +877,7 @@ static RISCVException read_frm(CPURISCVState *env, int csrno, } static RISCVException write_frm(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) if (riscv_has_ext(env, RVF)) { @@ -890,7 +897,7 @@ static RISCVException read_fcsr(CPURISCVState *env, int csrno, } static RISCVException write_fcsr(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) if (riscv_has_ext(env, RVF)) { @@ -942,7 +949,7 @@ static RISCVException read_vxrm(CPURISCVState *env, int csrno, } static RISCVException write_vxrm(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -959,7 +966,7 @@ static RISCVException read_vxsat(CPURISCVState *env, int csrno, } static RISCVException write_vxsat(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -976,7 +983,7 @@ static RISCVException read_vstart(CPURISCVState *env, int csrno, } static RISCVException write_vstart(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -997,7 +1004,7 @@ static RISCVException read_vcsr(CPURISCVState *env, int csrno, } static RISCVException write_vcsr(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { #if !defined(CONFIG_USER_ONLY) env->mstatus |= MSTATUS_VS; @@ -1055,7 +1062,7 @@ static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno, } static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t inh_avail_mask; @@ -1084,7 +1091,7 @@ static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno, } static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | MCYCLECFGH_BIT_MINH); @@ -1109,7 +1116,7 @@ static RISCVException read_minstretcfg(CPURISCVState *env, int csrno, } static RISCVException write_minstretcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t inh_avail_mask; @@ -1136,7 +1143,7 @@ static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno, } static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | MINSTRETCFGH_BIT_MINH); @@ -1163,7 +1170,7 @@ static RISCVException read_mhpmevent(CPURISCVState *env, int csrno, } static RISCVException write_mhpmevent(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { int evt_index = csrno - CSR_MCOUNTINHIBIT; uint64_t mhpmevt_val = val; @@ -1201,7 +1208,7 @@ static RISCVException read_mhpmeventh(CPURISCVState *env, int csrno, } static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { int evt_index = csrno - CSR_MHPMEVENT3H + 3; uint64_t mhpmevth_val; @@ -1343,14 +1350,16 @@ static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val, return RISCV_EXCP_NONE; } -static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { int ctr_idx = csrno - CSR_MCYCLE; return riscv_pmu_write_ctr(env, val, ctr_idx); } -static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { int ctr_idx = csrno - CSR_MCYCLEH; @@ -1661,7 +1670,7 @@ static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, } static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (riscv_cpu_mxl(env) == MXL_RV32) { env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val); @@ -1676,7 +1685,7 @@ static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, } static RISCVException write_vstimecmph(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val); riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, @@ -1710,13 +1719,13 @@ static RISCVException read_stimecmph(CPURISCVState *env, int csrno, } static RISCVException write_stimecmp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (env->virt_enabled) { if (env->hvictl & HVICTL_VTI) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } - return write_vstimecmp(env, csrno, val); + return write_vstimecmp(env, csrno, val, ra); } if (riscv_cpu_mxl(env) == MXL_RV32) { @@ -1731,13 +1740,13 @@ static RISCVException write_stimecmp(CPURISCVState *env, int csrno, } static RISCVException write_stimecmph(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (env->virt_enabled) { if (env->hvictl & HVICTL_VTI) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } - return write_vstimecmph(env, csrno, val); + return write_vstimecmph(env, csrno, val, ra); } env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val); @@ -1842,7 +1851,7 @@ static RISCVException read_zero(CPURISCVState *env, int csrno, } static RISCVException write_ignore(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return RISCV_EXCP_NONE; } @@ -1906,8 +1915,13 @@ static RISCVException read_mstatus(CPURISCVState *env, int csrno, static bool validate_vm(CPURISCVState *env, target_ulong vm) { - uint64_t mode_supported = riscv_cpu_cfg(env)->satp_mode.map; - return get_field(mode_supported, (1 << vm)); + bool rv32 = riscv_cpu_mxl(env) == MXL_RV32; + RISCVCPU *cpu = env_archcpu(env); + int satp_mode_supported_max = cpu->cfg.max_satp_mode; + const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64; + + assert(satp_mode_supported_max >= 0); + return vm <= satp_mode_supported_max && valid_vm[vm]; } static target_ulong legalize_xatp(CPURISCVState *env, target_ulong old_xatp, @@ -1963,7 +1977,7 @@ static target_ulong legalize_mpp(CPURISCVState *env, target_ulong old_mpp, } static RISCVException write_mstatus(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mstatus = env->mstatus; uint64_t mask = 0; @@ -2042,7 +2056,7 @@ static RISCVException read_mstatush(CPURISCVState *env, int csrno, } static RISCVException write_mstatush(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t valh = (uint64_t)val << 32; uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0; @@ -2095,8 +2109,21 @@ static RISCVException read_misa(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static target_ulong get_next_pc(CPURISCVState *env, uintptr_t ra) +{ + uint64_t data[INSN_START_WORDS]; + + /* Outside of a running cpu, env contains the next pc. */ + if (ra == 0 || !cpu_unwind_state_data(env_cpu(env), ra, data)) { + return env->pc; + } + + /* Within unwind data, [0] is pc and [1] is the opcode. */ + return data[0] + insn_len(data[1]); +} + static RISCVException write_misa(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); uint32_t orig_misa_ext = env->misa_ext; @@ -2110,11 +2137,8 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, /* Mask extensions that are not supported by this hart */ val &= env->misa_ext_mask; - /* - * Suppress 'C' if next instruction is not aligned - * TODO: this should check next_pc - */ - if ((val & RVC) && (GETPC() & ~3) != 0) { + /* Suppress 'C' if next instruction is not aligned. */ + if ((val & RVC) && (get_next_pc(env, ra) & 3) != 0) { val &= ~RVC; } @@ -2160,7 +2184,7 @@ static RISCVException read_medeleg(CPURISCVState *env, int csrno, } static RISCVException write_medeleg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS); return RISCV_EXCP_NONE; @@ -2955,7 +2979,7 @@ static RISCVException read_mtvec(CPURISCVState *env, int csrno, } static RISCVException write_mtvec(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { @@ -2974,7 +2998,7 @@ static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno, } static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { int cidx; PMUCTRState *counter; @@ -3049,10 +3073,9 @@ static RISCVException read_scountinhibit(CPURISCVState *env, int csrno, } static RISCVException write_scountinhibit(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { - write_mcountinhibit(env, csrno, val & env->mcounteren); - return RISCV_EXCP_NONE; + return write_mcountinhibit(env, csrno, val & env->mcounteren, ra); } static RISCVException read_mcounteren(CPURISCVState *env, int csrno, @@ -3063,7 +3086,7 @@ static RISCVException read_mcounteren(CPURISCVState *env, int csrno, } static RISCVException write_mcounteren(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); @@ -3097,7 +3120,7 @@ static RISCVException read_mscratch(CPURISCVState *env, int csrno, } static RISCVException write_mscratch(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mscratch = val; return RISCV_EXCP_NONE; @@ -3106,14 +3129,14 @@ static RISCVException write_mscratch(CPURISCVState *env, int csrno, static RISCVException read_mepc(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env->mepc; + *val = env->mepc & get_xepc_mask(env); return RISCV_EXCP_NONE; } static RISCVException write_mepc(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { - env->mepc = val; + env->mepc = val & get_xepc_mask(env); return RISCV_EXCP_NONE; } @@ -3125,7 +3148,7 @@ static RISCVException read_mcause(CPURISCVState *env, int csrno, } static RISCVException write_mcause(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mcause = val; return RISCV_EXCP_NONE; @@ -3139,7 +3162,7 @@ static RISCVException read_mtval(CPURISCVState *env, int csrno, } static RISCVException write_mtval(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mtval = val; return RISCV_EXCP_NONE; @@ -3154,13 +3177,14 @@ static RISCVException read_menvcfg(CPURISCVState *env, int csrno, } static RISCVException write_henvcfg(CPURISCVState *env, int csrno, - target_ulong val); + target_ulong val, uintptr_t ra); static RISCVException write_menvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE | MENVCFG_CDE; + bool stce_changed = false; if (riscv_cpu_mxl(env) == MXL_RV64) { mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | @@ -3186,11 +3210,19 @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno, if ((val & MENVCFG_DTE) == 0) { env->mstatus &= ~MSTATUS_SDT; } + + if (cfg->ext_sstc && + ((env->menvcfg & MENVCFG_STCE) != (val & MENVCFG_STCE))) { + stce_changed = true; + } } env->menvcfg = (env->menvcfg & ~mask) | (val & mask); - write_henvcfg(env, CSR_HENVCFG, env->henvcfg); - return RISCV_EXCP_NONE; + if (stce_changed) { + riscv_timer_stce_changed(env, true, !!(val & MENVCFG_STCE)); + } + + return write_henvcfg(env, CSR_HENVCFG, env->henvcfg, ra); } static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, @@ -3201,9 +3233,9 @@ static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, } static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, - target_ulong val); + target_ulong val, uintptr_t ra); static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | @@ -3212,15 +3244,24 @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) | (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0); uint64_t valh = (uint64_t)val << 32; + bool stce_changed = false; + + if (cfg->ext_sstc && + ((env->menvcfg & MENVCFG_STCE) != (valh & MENVCFG_STCE))) { + stce_changed = true; + } if ((valh & MENVCFG_DTE) == 0) { env->mstatus &= ~MSTATUS_SDT; } env->menvcfg = (env->menvcfg & ~mask) | (valh & mask); - write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32); - return RISCV_EXCP_NONE; + if (stce_changed) { + riscv_timer_stce_changed(env, true, !!(valh & MENVCFG_STCE)); + } + + return write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32, ra); } static RISCVException read_senvcfg(CPURISCVState *env, int csrno, @@ -3238,7 +3279,7 @@ static RISCVException read_senvcfg(CPURISCVState *env, int csrno, } static RISCVException write_senvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE; RISCVException ret; @@ -3295,10 +3336,12 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno, } static RISCVException write_henvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { + const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE; RISCVException ret; + bool stce_changed = false; ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); if (ret != RISCV_EXCP_NONE) { @@ -3324,6 +3367,11 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno, get_field(val, HENVCFG_PMM) != PMM_FIELD_RESERVED) { mask |= HENVCFG_PMM; } + + if (cfg->ext_sstc && + ((env->henvcfg & HENVCFG_STCE) != (val & HENVCFG_STCE))) { + stce_changed = true; + } } env->henvcfg = val & mask; @@ -3331,6 +3379,10 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno, env->vsstatus &= ~MSTATUS_SDT; } + if (stce_changed) { + riscv_timer_stce_changed(env, false, !!(val & HENVCFG_STCE)); + } + return RISCV_EXCP_NONE; } @@ -3350,21 +3402,34 @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, } static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { + const RISCVCPUConfig *cfg = riscv_cpu_cfg(env); uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE | HENVCFG_DTE); uint64_t valh = (uint64_t)val << 32; RISCVException ret; + bool stce_changed = false; ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG); if (ret != RISCV_EXCP_NONE) { return ret; } + + if (cfg->ext_sstc && + ((env->henvcfg & HENVCFG_STCE) != (valh & HENVCFG_STCE))) { + stce_changed = true; + } + env->henvcfg = (env->henvcfg & 0xFFFFFFFF) | (valh & mask); if ((env->henvcfg & HENVCFG_DTE) == 0) { env->vsstatus &= ~MSTATUS_SDT; } + + if (stce_changed) { + riscv_timer_stce_changed(env, false, !!(val & HENVCFG_STCE)); + } + return RISCV_EXCP_NONE; } @@ -3388,7 +3453,7 @@ static RISCVException write_mstateen(CPURISCVState *env, int csrno, } static RISCVException write_mstateen0(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; if (!riscv_has_ext(env, RVF)) { @@ -3420,7 +3485,7 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno, } static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -3447,7 +3512,7 @@ static RISCVException write_mstateenh(CPURISCVState *env, int csrno, } static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -3463,7 +3528,7 @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, } static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -3492,7 +3557,7 @@ static RISCVException write_hstateen(CPURISCVState *env, int csrno, } static RISCVException write_hstateen0(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -3521,7 +3586,7 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno, } static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -3552,7 +3617,7 @@ static RISCVException write_hstateenh(CPURISCVState *env, int csrno, } static RISCVException write_hstateen0h(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -3564,7 +3629,7 @@ static RISCVException write_hstateen0h(CPURISCVState *env, int csrno, } static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -3603,7 +3668,7 @@ static RISCVException write_sstateen(CPURISCVState *env, int csrno, } static RISCVException write_sstateen0(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -3615,7 +3680,7 @@ static RISCVException write_sstateen0(CPURISCVState *env, int csrno, } static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val, uintptr_t ra) { return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -3635,7 +3700,14 @@ static RISCVException rmw_mip64(CPURISCVState *env, int csrno, if (riscv_cpu_cfg(env)->ext_sstc && (env->priv == PRV_M) && get_field(env->menvcfg, MENVCFG_STCE)) { /* sstc extension forbids STIP & VSTIP to be writeable in mip */ - mask = mask & ~(MIP_STIP | MIP_VSTIP); + + /* STIP is not writable when menvcfg.STCE is enabled. */ + mask = mask & ~MIP_STIP; + + /* VSTIP is not writable when both [mh]envcfg.STCE are enabled. */ + if (get_field(env->henvcfg, HENVCFG_STCE)) { + mask = mask & ~MIP_VSTIP; + } } if (mask) { @@ -3866,7 +3938,7 @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno, } static RISCVException write_sstatus(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { target_ulong mask = (sstatus_v1_10_mask); @@ -3883,7 +3955,7 @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno, mask |= SSTATUS_SDT; } target_ulong newval = (env->mstatus & ~mask) | (val & mask); - return write_mstatus(env, CSR_MSTATUS, newval); + return write_mstatus(env, CSR_MSTATUS, newval, ra); } static RISCVException rmw_vsie64(CPURISCVState *env, int csrno, @@ -4035,7 +4107,7 @@ static RISCVException read_stvec(CPURISCVState *env, int csrno, } static RISCVException write_stvec(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { @@ -4054,7 +4126,7 @@ static RISCVException read_scounteren(CPURISCVState *env, int csrno, } static RISCVException write_scounteren(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); @@ -4088,7 +4160,7 @@ static RISCVException read_sscratch(CPURISCVState *env, int csrno, } static RISCVException write_sscratch(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->sscratch = val; return RISCV_EXCP_NONE; @@ -4097,14 +4169,14 @@ static RISCVException write_sscratch(CPURISCVState *env, int csrno, static RISCVException read_sepc(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env->sepc; + *val = env->sepc & get_xepc_mask(env); return RISCV_EXCP_NONE; } static RISCVException write_sepc(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { - env->sepc = val; + env->sepc = val & get_xepc_mask(env); return RISCV_EXCP_NONE; } @@ -4116,7 +4188,7 @@ static RISCVException read_scause(CPURISCVState *env, int csrno, } static RISCVException write_scause(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->scause = val; return RISCV_EXCP_NONE; @@ -4130,7 +4202,7 @@ static RISCVException read_stval(CPURISCVState *env, int csrno, } static RISCVException write_stval(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->stval = val; return RISCV_EXCP_NONE; @@ -4270,7 +4342,7 @@ static RISCVException read_satp(CPURISCVState *env, int csrno, } static RISCVException write_satp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (!riscv_cpu_cfg(env)->mmu) { return RISCV_EXCP_NONE; @@ -4492,7 +4564,7 @@ static RISCVException read_hstatus(CPURISCVState *env, int csrno, } static RISCVException write_hstatus(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mask = (target_ulong)-1; if (!env_archcpu(env)->cfg.ext_svukte) { @@ -4524,7 +4596,7 @@ static RISCVException read_hedeleg(CPURISCVState *env, int csrno, } static RISCVException write_hedeleg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->hedeleg = val & vs_delegable_excps; return RISCV_EXCP_NONE; @@ -4545,7 +4617,7 @@ static RISCVException read_hedelegh(CPURISCVState *env, int csrno, } static RISCVException write_hedelegh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVException ret; ret = smstateen_acc_ok(env, 0, SMSTATEEN0_P1P13); @@ -4808,7 +4880,7 @@ static RISCVException read_hcounteren(CPURISCVState *env, int csrno, } static RISCVException write_hcounteren(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { RISCVCPU *cpu = env_archcpu(env); @@ -4828,7 +4900,7 @@ static RISCVException read_hgeie(CPURISCVState *env, int csrno, } static RISCVException write_hgeie(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { /* Only GEILEN:1 bits implemented and BIT0 is never implemented */ val &= ((((target_ulong)1) << env->geilen) - 1) << 1; @@ -4847,7 +4919,7 @@ static RISCVException read_htval(CPURISCVState *env, int csrno, } static RISCVException write_htval(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->htval = val; return RISCV_EXCP_NONE; @@ -4861,7 +4933,7 @@ static RISCVException read_htinst(CPURISCVState *env, int csrno, } static RISCVException write_htinst(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return RISCV_EXCP_NONE; } @@ -4883,7 +4955,7 @@ static RISCVException read_hgatp(CPURISCVState *env, int csrno, } static RISCVException write_hgatp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->hgatp = legalize_xatp(env, env->hgatp, val); return RISCV_EXCP_NONE; @@ -4901,7 +4973,7 @@ static RISCVException read_htimedelta(CPURISCVState *env, int csrno, } static RISCVException write_htimedelta(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (!env->rdtime_fn) { return RISCV_EXCP_ILLEGAL_INST; @@ -4933,7 +5005,7 @@ static RISCVException read_htimedeltah(CPURISCVState *env, int csrno, } static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (!env->rdtime_fn) { return RISCV_EXCP_ILLEGAL_INST; @@ -4957,7 +5029,7 @@ static RISCVException read_hvictl(CPURISCVState *env, int csrno, } static RISCVException write_hvictl(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->hvictl = val & HVICTL_VALID_MASK; return RISCV_EXCP_NONE; @@ -5022,7 +5094,7 @@ static RISCVException read_hviprio1(CPURISCVState *env, int csrno, } static RISCVException write_hviprio1(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return write_hvipriox(env, 0, env->hviprio, val); } @@ -5034,7 +5106,7 @@ static RISCVException read_hviprio1h(CPURISCVState *env, int csrno, } static RISCVException write_hviprio1h(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return write_hvipriox(env, 4, env->hviprio, val); } @@ -5046,7 +5118,7 @@ static RISCVException read_hviprio2(CPURISCVState *env, int csrno, } static RISCVException write_hviprio2(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return write_hvipriox(env, 8, env->hviprio, val); } @@ -5058,7 +5130,7 @@ static RISCVException read_hviprio2h(CPURISCVState *env, int csrno, } static RISCVException write_hviprio2h(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { return write_hvipriox(env, 12, env->hviprio, val); } @@ -5072,7 +5144,7 @@ static RISCVException read_vsstatus(CPURISCVState *env, int csrno, } static RISCVException write_vsstatus(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint64_t mask = (target_ulong)-1; if ((val & VSSTATUS64_UXL) == 0) { @@ -5097,7 +5169,7 @@ static RISCVException read_vstvec(CPURISCVState *env, int csrno, } static RISCVException write_vstvec(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ if ((val & 3) < 2) { @@ -5116,7 +5188,7 @@ static RISCVException read_vsscratch(CPURISCVState *env, int csrno, } static RISCVException write_vsscratch(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vsscratch = val; return RISCV_EXCP_NONE; @@ -5130,7 +5202,7 @@ static RISCVException read_vsepc(CPURISCVState *env, int csrno, } static RISCVException write_vsepc(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vsepc = val; return RISCV_EXCP_NONE; @@ -5144,7 +5216,7 @@ static RISCVException read_vscause(CPURISCVState *env, int csrno, } static RISCVException write_vscause(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vscause = val; return RISCV_EXCP_NONE; @@ -5158,7 +5230,7 @@ static RISCVException read_vstval(CPURISCVState *env, int csrno, } static RISCVException write_vstval(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vstval = val; return RISCV_EXCP_NONE; @@ -5172,7 +5244,7 @@ static RISCVException read_vsatp(CPURISCVState *env, int csrno, } static RISCVException write_vsatp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->vsatp = legalize_xatp(env, env->vsatp, val); return RISCV_EXCP_NONE; @@ -5186,7 +5258,7 @@ static RISCVException read_mtval2(CPURISCVState *env, int csrno, } static RISCVException write_mtval2(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mtval2 = val; return RISCV_EXCP_NONE; @@ -5200,7 +5272,7 @@ static RISCVException read_mtinst(CPURISCVState *env, int csrno, } static RISCVException write_mtinst(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->mtinst = val; return RISCV_EXCP_NONE; @@ -5215,7 +5287,7 @@ static RISCVException read_mseccfg(CPURISCVState *env, int csrno, } static RISCVException write_mseccfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { mseccfg_csr_write(env, val); return RISCV_EXCP_NONE; @@ -5231,7 +5303,7 @@ static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, } static RISCVException write_pmpcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { uint32_t reg_index = csrno - CSR_PMPCFG0; @@ -5247,7 +5319,7 @@ static RISCVException read_pmpaddr(CPURISCVState *env, int csrno, } static RISCVException write_pmpaddr(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val); return RISCV_EXCP_NONE; @@ -5261,7 +5333,7 @@ static RISCVException read_tselect(CPURISCVState *env, int csrno, } static RISCVException write_tselect(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { tselect_csr_write(env, val); return RISCV_EXCP_NONE; @@ -5285,7 +5357,7 @@ static RISCVException read_tdata(CPURISCVState *env, int csrno, } static RISCVException write_tdata(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { if (!tdata_available(env, csrno - CSR_TDATA1)) { return RISCV_EXCP_ILLEGAL_INST; @@ -5310,7 +5382,7 @@ static RISCVException read_mcontext(CPURISCVState *env, int csrno, } static RISCVException write_mcontext(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false; int32_t mask; @@ -5334,43 +5406,50 @@ static RISCVException read_mnscratch(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static int write_mnscratch(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mnscratch(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { env->mnscratch = val; return RISCV_EXCP_NONE; } -static int read_mnepc(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mnepc(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mnepc; return RISCV_EXCP_NONE; } -static int write_mnepc(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mnepc(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { env->mnepc = val; return RISCV_EXCP_NONE; } -static int read_mncause(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mncause(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mncause; return RISCV_EXCP_NONE; } -static int write_mncause(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mncause(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { env->mncause = val; return RISCV_EXCP_NONE; } -static int read_mnstatus(CPURISCVState *env, int csrno, target_ulong *val) +static RISCVException read_mnstatus(CPURISCVState *env, int csrno, + target_ulong *val) { *val = env->mnstatus; return RISCV_EXCP_NONE; } -static int write_mnstatus(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException write_mnstatus(CPURISCVState *env, int csrno, + target_ulong val, uintptr_t ra) { target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP); @@ -5510,7 +5589,8 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env, static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, - target_ulong write_mask) + target_ulong write_mask, + uintptr_t ra) { RISCVException ret; target_ulong old_value = 0; @@ -5540,7 +5620,7 @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno, if (write_mask) { new_value = (old_value & ~write_mask) | (new_value & write_mask); if (csr_ops[csrno].write) { - ret = csr_ops[csrno].write(env, csrno, new_value); + ret = csr_ops[csrno].write(env, csrno, new_value, ra); if (ret != RISCV_EXCP_NONE) { return ret; } @@ -5563,25 +5643,25 @@ RISCVException riscv_csrr(CPURISCVState *env, int csrno, return ret; } - return riscv_csrrw_do64(env, csrno, ret_value, 0, 0); + return riscv_csrrw_do64(env, csrno, ret_value, 0, 0, 0); } RISCVException riscv_csrrw(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) + target_ulong *ret_value, target_ulong new_value, + target_ulong write_mask, uintptr_t ra) { RISCVException ret = riscv_csrrw_check(env, csrno, true); if (ret != RISCV_EXCP_NONE) { return ret; } - return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask); + return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask, ra); } static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno, Int128 *ret_value, Int128 new_value, - Int128 write_mask) + Int128 write_mask, uintptr_t ra) { RISCVException ret; Int128 old_value; @@ -5603,7 +5683,7 @@ static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno, } } else if (csr_ops[csrno].write) { /* avoids having to write wrappers for all registers */ - ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value)); + ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value), ra); if (ret != RISCV_EXCP_NONE) { return ret; } @@ -5630,7 +5710,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, if (csr_ops[csrno].read128) { return riscv_csrrw_do128(env, csrno, ret_value, - int128_zero(), int128_zero()); + int128_zero(), int128_zero(), 0); } /* @@ -5641,9 +5721,7 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, * accesses */ target_ulong old_value; - ret = riscv_csrrw_do64(env, csrno, &old_value, - (target_ulong)0, - (target_ulong)0); + ret = riscv_csrrw_do64(env, csrno, &old_value, 0, 0, 0); if (ret == RISCV_EXCP_NONE && ret_value) { *ret_value = int128_make64(old_value); } @@ -5651,8 +5729,8 @@ RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, } RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, - Int128 *ret_value, - Int128 new_value, Int128 write_mask) + Int128 *ret_value, Int128 new_value, + Int128 write_mask, uintptr_t ra) { RISCVException ret; @@ -5662,7 +5740,8 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, } if (csr_ops[csrno].read128) { - return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask); + return riscv_csrrw_do128(env, csrno, ret_value, + new_value, write_mask, ra); } /* @@ -5675,7 +5754,7 @@ RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, target_ulong old_value; ret = riscv_csrrw_do64(env, csrno, &old_value, int128_getlo(new_value), - int128_getlo(write_mask)); + int128_getlo(write_mask), ra); if (ret == RISCV_EXCP_NONE && ret_value) { *ret_value = int128_make64(old_value); } @@ -5698,7 +5777,7 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, if (!write_mask) { ret = riscv_csrr(env, csrno, ret_value); } else { - ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask); + ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask, 0); } #if !defined(CONFIG_USER_ONLY) env->debugger = false; @@ -5714,7 +5793,7 @@ static RISCVException read_jvt(CPURISCVState *env, int csrno, } static RISCVException write_jvt(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val, uintptr_t ra) { env->jvt = val; return RISCV_EXCP_NONE; @@ -6088,6 +6167,30 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg }, [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg }, [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg }, + [CSR_PMPCFG4] = { "pmpcfg4", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG5] = { "pmpcfg5", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG6] = { "pmpcfg6", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG7] = { "pmpcfg7", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG8] = { "pmpcfg8", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG9] = { "pmpcfg9", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG10] = { "pmpcfg10", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG11] = { "pmpcfg11", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG12] = { "pmpcfg12", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG13] = { "pmpcfg13", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG14] = { "pmpcfg14", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPCFG15] = { "pmpcfg15", pmp, read_pmpcfg, write_pmpcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr }, @@ -6102,8 +6205,104 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr }, - [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr }, - [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr }, + [CSR_PMPADDR16] = { "pmpaddr16", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR17] = { "pmpaddr17", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR18] = { "pmpaddr18", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR19] = { "pmpaddr19", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR20] = { "pmpaddr20", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR21] = { "pmpaddr21", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR22] = { "pmpaddr22", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR23] = { "pmpaddr23", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR24] = { "pmpaddr24", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR25] = { "pmpaddr25", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR26] = { "pmpaddr26", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR27] = { "pmpaddr27", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR28] = { "pmpaddr28", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR29] = { "pmpaddr29", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR30] = { "pmpaddr30", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR31] = { "pmpaddr31", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR32] = { "pmpaddr32", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR33] = { "pmpaddr33", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR34] = { "pmpaddr34", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR35] = { "pmpaddr35", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR36] = { "pmpaddr36", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR37] = { "pmpaddr37", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR38] = { "pmpaddr38", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR39] = { "pmpaddr39", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR40] = { "pmpaddr40", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR41] = { "pmpaddr41", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR42] = { "pmpaddr42", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR43] = { "pmpaddr43", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR44] = { "pmpaddr44", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR45] = { "pmpaddr45", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR46] = { "pmpaddr46", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR47] = { "pmpaddr47", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR48] = { "pmpaddr48", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR49] = { "pmpaddr49", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR50] = { "pmpaddr50", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR51] = { "pmpaddr51", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR52] = { "pmpaddr52", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR53] = { "pmpaddr53", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR54] = { "pmpaddr54", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR55] = { "pmpaddr55", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR56] = { "pmpaddr56", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR57] = { "pmpaddr57", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR58] = { "pmpaddr58", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR59] = { "pmpaddr59", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR60] = { "pmpaddr60", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR61] = { "pmpaddr61", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR62] = { "pmpaddr62", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_PMPADDR63] = { "pmpaddr63", pmp, read_pmpaddr, write_pmpaddr, + .min_priv_ver = PRIV_VERSION_1_12_0 }, /* Debug CSRs */ [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect }, diff --git a/target/riscv/debug.c b/target/riscv/debug.c index 198d051..5664466 100644 --- a/target/riscv/debug.c +++ b/target/riscv/debug.c @@ -28,9 +28,10 @@ #include "qapi/error.h" #include "cpu.h" #include "trace.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" +#include "exec/watchpoint.h" #include "system/cpu-timers.h" +#include "exec/icount.h" /* * The following M-mode trigger CSRs are implemented: diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c index 91b1a56..af40561 100644 --- a/target/riscv/fpu_helper.c +++ b/target/riscv/fpu_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" #include "qemu/host-utils.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" #include "internals.h" @@ -756,6 +755,6 @@ uint64_t helper_fcvt_bf16_s(CPURISCVState *env, uint64_t rs1) uint64_t helper_fcvt_s_bf16(CPURISCVState *env, uint64_t rs1) { - float16 frs1 = check_nanbox_h(env, rs1); + float16 frs1 = check_nanbox_bf16(env, rs1); return nanbox_s(env, bfloat16_to_float32(frs1, &env->fp_status)); } diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c index 18e88f4..1934f91 100644 --- a/target/riscv/gdbstub.c +++ b/target/riscv/gdbstub.c @@ -62,7 +62,7 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) return 0; } - switch (mcc->misa_mxl_max) { + switch (mcc->def->misa_mxl_max) { case MXL_RV32: return gdb_get_reg32(mem_buf, tmp); case MXL_RV64: @@ -82,7 +82,7 @@ int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) int length = 0; target_ulong tmp; - switch (mcc->misa_mxl_max) { + switch (mcc->def->misa_mxl_max) { case MXL_RV32: tmp = (int32_t)ldl_p(mem_buf); length = 4; @@ -359,7 +359,7 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) ricsv_gen_dynamic_vector_feature(cs, cs->gdb_num_regs), 0); } - switch (mcc->misa_mxl_max) { + switch (mcc->def->misa_mxl_max) { case MXL_RV32: gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual, diff --git a/target/riscv/helper.h b/target/riscv/helper.h index 85d73e4..f712b1c 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -159,7 +159,7 @@ DEF_HELPER_FLAGS_3(hyp_hsv_d, TCG_CALL_NO_WG, void, env, tl, tl) #endif /* Vector functions */ -DEF_HELPER_3(vsetvl, tl, env, tl, tl) +DEF_HELPER_4(vsetvl, tl, env, tl, tl, tl) DEF_HELPER_5(vle8_v, void, ptr, ptr, tl, env, i32) DEF_HELPER_5(vle16_v, void, ptr, ptr, tl, env, i32) DEF_HELPER_5(vle32_v, void, ptr, ptr, tl, env, i32) diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index 6d1a13c..cd23b1f 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -703,14 +703,14 @@ vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm # Vector widening ordered and unordered float reduction sum vfwredusum_vs 110001 . ..... ..... 001 ..... 1010111 @r_vm vfwredosum_vs 110011 . ..... ..... 001 ..... 1010111 @r_vm -vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r -vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r -vmandn_mm 011000 - ..... ..... 010 ..... 1010111 @r -vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r -vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r -vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r -vmorn_mm 011100 - ..... ..... 010 ..... 1010111 @r -vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r +vmand_mm 011001 1 ..... ..... 010 ..... 1010111 @r +vmnand_mm 011101 1 ..... ..... 010 ..... 1010111 @r +vmandn_mm 011000 1 ..... ..... 010 ..... 1010111 @r +vmxor_mm 011011 1 ..... ..... 010 ..... 1010111 @r +vmor_mm 011010 1 ..... ..... 010 ..... 1010111 @r +vmnor_mm 011110 1 ..... ..... 010 ..... 1010111 @r +vmorn_mm 011100 1 ..... ..... 010 ..... 1010111 @r +vmxnor_mm 011111 1 ..... ..... 010 ..... 1010111 @r vcpop_m 010000 . ..... 10000 010 ..... 1010111 @r2_vm vfirst_m 010000 . ..... 10001 010 ..... 1010111 @r2_vm vmsbf_m 010100 . ..... 00001 010 ..... 1010111 @r2_vm @@ -732,7 +732,7 @@ vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm vrgatherei16_vv 001110 . ..... ..... 000 ..... 1010111 @r_vm vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm -vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r +vcompress_vm 010111 1 ..... ..... 010 ..... 1010111 @r vmv1r_v 100111 1 ..... 00000 011 ..... 1010111 @r2rd vmv2r_v 100111 1 ..... 00001 011 ..... 1010111 @r2rd vmv4r_v 100111 1 ..... 00011 011 ..... 1010111 @r2rd diff --git a/target/riscv/insn_trans/trans_rvbf16.c.inc b/target/riscv/insn_trans/trans_rvbf16.c.inc index 0a9cd1e..066dc36 100644 --- a/target/riscv/insn_trans/trans_rvbf16.c.inc +++ b/target/riscv/insn_trans/trans_rvbf16.c.inc @@ -119,8 +119,11 @@ static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a) REQUIRE_FPU; REQUIRE_ZVFBFWMA(ctx); + uint8_t sew = ctx->sew; if (require_rvv(ctx) && vext_check_isa_ill(ctx) && (ctx->sew == MO_16) && - vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm)) { + vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm) && + vext_check_input_eew(ctx, a->rd, sew + 1, a->rs1, sew, a->vm) && + vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) { uint32_t data = 0; gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN); @@ -146,8 +149,10 @@ static bool trans_vfwmaccbf16_vf(DisasContext *ctx, arg_vfwmaccbf16_vf *a) REQUIRE_FPU; REQUIRE_ZVFBFWMA(ctx); + uint8_t sew = ctx->sew; if (require_rvv(ctx) && (ctx->sew == MO_16) && vext_check_isa_ill(ctx) && - vext_check_ds(ctx, a->rd, a->rs2, a->vm)) { + vext_check_ds(ctx, a->rd, a->rs2, a->vm) && + vext_check_input_eew(ctx, a->rd, sew + 1, a->rs2, sew, a->vm)) { uint32_t data = 0; gen_set_rm(ctx, RISCV_FRM_DYN); diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index b9883a5..610bf9f 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -100,10 +100,33 @@ static bool require_scale_rvfmin(DisasContext *s) } } -/* Destination vector register group cannot overlap source mask register. */ -static bool require_vm(int vm, int vd) +/* + * Source and destination vector register groups cannot overlap source mask + * register: + * + * A vector register cannot be used to provide source operands with more than + * one EEW for a single instruction. A mask register source is considered to + * have EEW=1 for this constraint. An encoding that would result in the same + * vector register being read with two or more different EEWs, including when + * the vector register appears at different positions within two or more vector + * register groups, is reserved. + * (Section 5.2) + * + * A destination vector register group can overlap a source vector + * register group only if one of the following holds: + * 1. The destination EEW equals the source EEW. + * 2. The destination EEW is smaller than the source EEW and the overlap + * is in the lowest-numbered part of the source register group. + * 3. The destination EEW is greater than the source EEW, the source EMUL + * is at least 1, and the overlap is in the highest-numbered part of + * the destination register group. + * For the purpose of determining register group overlap constraints, mask + * elements have EEW=1. + * (Section 5.2) + */ +static bool require_vm(int vm, int v) { - return (vm != 0 || vd != 0); + return (vm != 0 || v != 0); } static bool require_nf(int vd, int nf, int lmul) @@ -179,7 +202,7 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) s1 = get_gpr(s, rs1, EXT_ZERO); } - gen_helper_vsetvl(dst, tcg_env, s1, s2); + gen_helper_vsetvl(dst, tcg_env, s1, s2, tcg_constant_tl((int) (rd == 0 && rs1 == 0))); gen_set_gpr(s, rd, dst); finalize_rvv_inst(s); @@ -199,7 +222,7 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2) dst = dest_gpr(s, rd); - gen_helper_vsetvl(dst, tcg_env, s1, s2); + gen_helper_vsetvl(dst, tcg_env, s1, s2, tcg_constant_tl(0)); gen_set_gpr(s, rd, dst); finalize_rvv_inst(s); gen_update_pc(s, s->cur_insn_len); @@ -356,11 +379,41 @@ static bool vext_check_ld_index(DisasContext *s, int vd, int vs2, return ret; } +/* + * Check whether a vector register is used to provide source operands with + * more than one EEW for the vector instruction. + * Returns true if the instruction has valid encoding + * Returns false if encoding violates the mismatched input EEWs constraint + */ +static bool vext_check_input_eew(DisasContext *s, int vs1, uint8_t eew_vs1, + int vs2, uint8_t eew_vs2, int vm) +{ + bool is_valid = true; + int8_t emul_vs1 = eew_vs1 - s->sew + s->lmul; + int8_t emul_vs2 = eew_vs2 - s->sew + s->lmul; + + /* When vm is 0, vs1 & vs2(EEW!=1) group can't overlap v0 (EEW=1) */ + if ((vs1 != -1 && !require_vm(vm, vs1)) || + (vs2 != -1 && !require_vm(vm, vs2))) { + is_valid = false; + } + + /* When eew_vs1 != eew_vs2, check whether vs1 and vs2 are overlapped */ + if ((vs1 != -1 && vs2 != -1) && (eew_vs1 != eew_vs2) && + is_overlapped(vs1, 1 << MAX(emul_vs1, 0), + vs2, 1 << MAX(emul_vs2, 0))) { + is_valid = false; + } + + return is_valid; +} + static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm) { return require_vm(vm, vd) && require_align(vd, s->lmul) && - require_align(vs, s->lmul); + require_align(vs, s->lmul) && + vext_check_input_eew(s, vs, s->sew, -1, s->sew, vm); } /* @@ -379,6 +432,7 @@ static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm) static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_ss(s, vd, vs2, vm) && + vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) && require_align(vs1, s->lmul); } @@ -474,6 +528,7 @@ static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2, static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm) { return vext_wide_check_common(s, vd, vm) && + vext_check_input_eew(s, vs, s->sew, -1, 0, vm) && require_align(vs, s->lmul) && require_noover(vd, s->lmul + 1, vs, s->lmul); } @@ -481,6 +536,7 @@ static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm) static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm) { return vext_wide_check_common(s, vd, vm) && + vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm) && require_align(vs, s->lmul + 1); } @@ -499,6 +555,7 @@ static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm) static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_ds(s, vd, vs2, vm) && + vext_check_input_eew(s, vs1, s->sew, vs2, s->sew, vm) && require_align(vs1, s->lmul) && require_noover(vd, s->lmul + 1, vs1, s->lmul); } @@ -521,12 +578,14 @@ static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm) static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_ds(s, vd, vs1, vm) && + vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) && require_align(vs2, s->lmul + 1); } static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm) { - bool ret = vext_narrow_check_common(s, vd, vs, vm); + bool ret = vext_narrow_check_common(s, vd, vs, vm) && + vext_check_input_eew(s, vs, s->sew + 1, -1, 0, vm); if (vd != vs) { ret &= require_noover(vd, s->lmul, vs, s->lmul + 1); } @@ -549,6 +608,7 @@ static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm) static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm) { return vext_check_sd(s, vd, vs2, vm) && + vext_check_input_eew(s, vs1, s->sew, vs2, s->sew + 1, vm) && require_align(vs1, s->lmul); } @@ -584,7 +644,9 @@ static bool vext_check_slide(DisasContext *s, int vd, int vs2, { bool ret = require_align(vs2, s->lmul) && require_align(vd, s->lmul) && - require_vm(vm, vd); + require_vm(vm, vd) && + vext_check_input_eew(s, -1, 0, vs2, s->sew, vm); + if (is_over) { ret &= (vd != vs2); } @@ -802,32 +864,286 @@ GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check) GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check) /* - *** stride load and store + * MAXSZ returns the maximum vector size can be operated in bytes, + * which is used in GVEC IR when vl_eq_vlmax flag is set to true + * to accelerate vector operation. + */ +static inline uint32_t MAXSZ(DisasContext *s) +{ + int max_sz = s->cfg_ptr->vlenb << 3; + return max_sz >> (3 - s->lmul); +} + +static inline uint32_t get_log2(uint32_t a) +{ + uint32_t i = 0; + for (; a > 0;) { + a >>= 1; + i++; + } + return i; +} + +typedef void gen_tl_ldst(TCGv, TCGv_ptr, tcg_target_long); + +/* + * Simulate the strided load/store main loop: + * + * for (i = env->vstart; i < env->vl; env->vstart = ++i) { + * k = 0; + * while (k < nf) { + * if (!vm && !vext_elem_mask(v0, i)) { + * vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz, + * (i + k * max_elems + 1) * esz); + * k++; + * continue; + * } + * target_ulong addr = base + stride * i + (k << log2_esz); + * ldst(env, adjust_addr(env, addr), i + k * max_elems, vd, ra); + * k++; + * } + * } + */ +static void gen_ldst_stride_main_loop(DisasContext *s, TCGv dest, uint32_t rs1, + uint32_t rs2, uint32_t vm, uint32_t nf, + gen_tl_ldst *ld_fn, gen_tl_ldst *st_fn, + bool is_load) +{ + TCGv addr = tcg_temp_new(); + TCGv base = get_gpr(s, rs1, EXT_NONE); + TCGv stride = get_gpr(s, rs2, EXT_NONE); + + TCGv i = tcg_temp_new(); + TCGv i_esz = tcg_temp_new(); + TCGv k = tcg_temp_new(); + TCGv k_esz = tcg_temp_new(); + TCGv k_max = tcg_temp_new(); + TCGv mask = tcg_temp_new(); + TCGv mask_offs = tcg_temp_new(); + TCGv mask_offs_64 = tcg_temp_new(); + TCGv mask_elem = tcg_temp_new(); + TCGv mask_offs_rem = tcg_temp_new(); + TCGv vreg = tcg_temp_new(); + TCGv dest_offs = tcg_temp_new(); + TCGv stride_offs = tcg_temp_new(); + + uint32_t max_elems = MAXSZ(s) >> s->sew; + + TCGLabel *start = gen_new_label(); + TCGLabel *end = gen_new_label(); + TCGLabel *start_k = gen_new_label(); + TCGLabel *inc_k = gen_new_label(); + TCGLabel *end_k = gen_new_label(); + + MemOp atomicity = MO_ATOM_NONE; + if (s->sew == 0) { + atomicity = MO_ATOM_NONE; + } else { + atomicity = MO_ATOM_IFALIGN_PAIR; + } + + mark_vs_dirty(s); + + tcg_gen_addi_tl(mask, (TCGv)tcg_env, vreg_ofs(s, 0)); + + /* Start of outer loop. */ + tcg_gen_mov_tl(i, cpu_vstart); + gen_set_label(start); + tcg_gen_brcond_tl(TCG_COND_GE, i, cpu_vl, end); + tcg_gen_shli_tl(i_esz, i, s->sew); + /* Start of inner loop. */ + tcg_gen_movi_tl(k, 0); + gen_set_label(start_k); + tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end_k); + /* + * If we are in mask agnostic regime and the operation is not unmasked we + * set the inactive elements to 1. + */ + if (!vm && s->vma) { + TCGLabel *active_element = gen_new_label(); + /* (i + k * max_elems) * esz */ + tcg_gen_shli_tl(mask_offs, k, get_log2(max_elems << s->sew)); + tcg_gen_add_tl(mask_offs, mask_offs, i_esz); + + /* + * Check whether the i bit of the mask is 0 or 1. + * + * static inline int vext_elem_mask(void *v0, int index) + * { + * int idx = index / 64; + * int pos = index % 64; + * return (((uint64_t *)v0)[idx] >> pos) & 1; + * } + */ + tcg_gen_shri_tl(mask_offs_64, mask_offs, 3); + tcg_gen_add_tl(mask_offs_64, mask_offs_64, mask); + tcg_gen_ld_i64((TCGv_i64)mask_elem, (TCGv_ptr)mask_offs_64, 0); + tcg_gen_rem_tl(mask_offs_rem, mask_offs, tcg_constant_tl(8)); + tcg_gen_shr_tl(mask_elem, mask_elem, mask_offs_rem); + tcg_gen_andi_tl(mask_elem, mask_elem, 1); + tcg_gen_brcond_tl(TCG_COND_NE, mask_elem, tcg_constant_tl(0), + active_element); + /* + * Set masked-off elements in the destination vector register to 1s. + * Store instructions simply skip this bit as memory ops access memory + * only for active elements. + */ + if (is_load) { + tcg_gen_shli_tl(mask_offs, mask_offs, s->sew); + tcg_gen_add_tl(mask_offs, mask_offs, dest); + st_fn(tcg_constant_tl(-1), (TCGv_ptr)mask_offs, 0); + } + tcg_gen_br(inc_k); + gen_set_label(active_element); + } + /* + * The element is active, calculate the address with stride: + * target_ulong addr = base + stride * i + (k << log2_esz); + */ + tcg_gen_mul_tl(stride_offs, stride, i); + tcg_gen_shli_tl(k_esz, k, s->sew); + tcg_gen_add_tl(stride_offs, stride_offs, k_esz); + tcg_gen_add_tl(addr, base, stride_offs); + /* Calculate the offset in the dst/src vector register. */ + tcg_gen_shli_tl(k_max, k, get_log2(max_elems)); + tcg_gen_add_tl(dest_offs, i, k_max); + tcg_gen_shli_tl(dest_offs, dest_offs, s->sew); + tcg_gen_add_tl(dest_offs, dest_offs, dest); + if (is_load) { + tcg_gen_qemu_ld_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity); + st_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0); + } else { + ld_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0); + tcg_gen_qemu_st_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity); + } + /* + * We don't execute the load/store above if the element was inactive. + * We jump instead directly to incrementing k and continuing the loop. + */ + if (!vm && s->vma) { + gen_set_label(inc_k); + } + tcg_gen_addi_tl(k, k, 1); + tcg_gen_br(start_k); + /* End of the inner loop. */ + gen_set_label(end_k); + + tcg_gen_addi_tl(i, i, 1); + tcg_gen_mov_tl(cpu_vstart, i); + tcg_gen_br(start); + + /* End of the outer loop. */ + gen_set_label(end); + + return; +} + + +/* + * Set the tail bytes of the strided loads/stores to 1: + * + * for (k = 0; k < nf; ++k) { + * cnt = (k * max_elems + vl) * esz; + * tot = (k * max_elems + max_elems) * esz; + * for (i = cnt; i < tot; i += esz) { + * store_1s(-1, vd[vl+i]); + * } + * } */ -typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv, - TCGv, TCGv_env, TCGv_i32); +static void gen_ldst_stride_tail_loop(DisasContext *s, TCGv dest, uint32_t nf, + gen_tl_ldst *st_fn) +{ + TCGv i = tcg_temp_new(); + TCGv k = tcg_temp_new(); + TCGv tail_cnt = tcg_temp_new(); + TCGv tail_tot = tcg_temp_new(); + TCGv tail_addr = tcg_temp_new(); + + TCGLabel *start = gen_new_label(); + TCGLabel *end = gen_new_label(); + TCGLabel *start_i = gen_new_label(); + TCGLabel *end_i = gen_new_label(); + + uint32_t max_elems_b = MAXSZ(s); + uint32_t esz = 1 << s->sew; + + /* Start of the outer loop. */ + tcg_gen_movi_tl(k, 0); + tcg_gen_shli_tl(tail_cnt, cpu_vl, s->sew); + tcg_gen_movi_tl(tail_tot, max_elems_b); + tcg_gen_add_tl(tail_addr, dest, tail_cnt); + gen_set_label(start); + tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end); + /* Start of the inner loop. */ + tcg_gen_mov_tl(i, tail_cnt); + gen_set_label(start_i); + tcg_gen_brcond_tl(TCG_COND_GE, i, tail_tot, end_i); + /* store_1s(-1, vd[vl+i]); */ + st_fn(tcg_constant_tl(-1), (TCGv_ptr)tail_addr, 0); + tcg_gen_addi_tl(tail_addr, tail_addr, esz); + tcg_gen_addi_tl(i, i, esz); + tcg_gen_br(start_i); + /* End of the inner loop. */ + gen_set_label(end_i); + /* Update the counts */ + tcg_gen_addi_tl(tail_cnt, tail_cnt, max_elems_b); + tcg_gen_addi_tl(tail_tot, tail_cnt, max_elems_b); + tcg_gen_addi_tl(k, k, 1); + tcg_gen_br(start); + /* End of the outer loop. */ + gen_set_label(end); + + return; +} static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, - uint32_t data, gen_helper_ldst_stride *fn, - DisasContext *s) + uint32_t data, DisasContext *s, bool is_load) { - TCGv_ptr dest, mask; - TCGv base, stride; - TCGv_i32 desc; + if (!s->vstart_eq_zero) { + return false; + } - dest = tcg_temp_new_ptr(); - mask = tcg_temp_new_ptr(); - base = get_gpr(s, rs1, EXT_NONE); - stride = get_gpr(s, rs2, EXT_NONE); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, - s->cfg_ptr->vlenb, data)); + TCGv dest = tcg_temp_new(); - tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); - tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); + uint32_t nf = FIELD_EX32(data, VDATA, NF); + uint32_t vm = FIELD_EX32(data, VDATA, VM); + + /* Destination register and mask register */ + tcg_gen_addi_tl(dest, (TCGv)tcg_env, vreg_ofs(s, vd)); + + /* + * Select the appropriate load/tore to retrieve data from the vector + * register given a specific sew. + */ + static gen_tl_ldst * const ld_fns[4] = { + tcg_gen_ld8u_tl, tcg_gen_ld16u_tl, + tcg_gen_ld32u_tl, tcg_gen_ld_tl + }; + + static gen_tl_ldst * const st_fns[4] = { + tcg_gen_st8_tl, tcg_gen_st16_tl, + tcg_gen_st32_tl, tcg_gen_st_tl + }; + + gen_tl_ldst *ld_fn = ld_fns[s->sew]; + gen_tl_ldst *st_fn = st_fns[s->sew]; + + if (ld_fn == NULL || st_fn == NULL) { + return false; + } mark_vs_dirty(s); - fn(dest, mask, base, stride, tcg_env, desc); + gen_ldst_stride_main_loop(s, dest, rs1, rs2, vm, nf, ld_fn, st_fn, is_load); + + tcg_gen_movi_tl(cpu_vstart, 0); + + /* + * Set the tail bytes to 1 if tail agnostic: + */ + if (s->vta != 0 && is_load) { + gen_ldst_stride_tail_loop(s, dest, nf, st_fn); + } finalize_rvv_inst(s); return true; @@ -836,16 +1152,6 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) { uint32_t data = 0; - gen_helper_ldst_stride *fn; - static gen_helper_ldst_stride * const fns[4] = { - gen_helper_vlse8_v, gen_helper_vlse16_v, - gen_helper_vlse32_v, gen_helper_vlse64_v - }; - - fn = fns[eew]; - if (fn == NULL) { - return false; - } uint8_t emul = vext_get_emul(s, eew); data = FIELD_DP32(data, VDATA, VM, a->vm); @@ -853,7 +1159,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) data = FIELD_DP32(data, VDATA, NF, a->nf); data = FIELD_DP32(data, VDATA, VTA, s->vta); data = FIELD_DP32(data, VDATA, VMA, s->vma); - return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, true); } static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -871,23 +1177,13 @@ GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check) static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) { uint32_t data = 0; - gen_helper_ldst_stride *fn; - static gen_helper_ldst_stride * const fns[4] = { - /* masked stride store */ - gen_helper_vsse8_v, gen_helper_vsse16_v, - gen_helper_vsse32_v, gen_helper_vsse64_v - }; uint8_t emul = vext_get_emul(s, eew); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, emul); data = FIELD_DP32(data, VDATA, NF, a->nf); - fn = fns[eew]; - if (fn == NULL) { - return false; - } - return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, false); } static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -981,7 +1277,8 @@ static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) { return require_rvv(s) && vext_check_isa_ill(s) && - vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew); + vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew) && + vext_check_input_eew(s, -1, 0, a->rs2, eew, a->vm); } GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check) @@ -1033,7 +1330,8 @@ static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) { return require_rvv(s) && vext_check_isa_ill(s) && - vext_check_st_index(s, a->rd, a->rs2, a->nf, eew); + vext_check_st_index(s, a->rd, a->rs2, a->nf, eew) && + vext_check_input_eew(s, a->rd, s->sew, a->rs2, eew, a->vm); } GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check) @@ -1063,6 +1361,12 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data, fn(dest, mask, base, tcg_env, desc); finalize_rvv_inst(s); + + /* vector unit-stride fault-only-first load may modify vl CSR */ + gen_update_pc(s, s->cur_insn_len); + lookup_and_goto_ptr(s); + s->base.is_jmp = DISAS_NORETURN; + return true; } @@ -1100,25 +1404,86 @@ GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check) typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32); static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, - gen_helper_ldst_whole *fn, - DisasContext *s) + uint32_t log2_esz, gen_helper_ldst_whole *fn, + DisasContext *s, bool is_load) { - TCGv_ptr dest; - TCGv base; - TCGv_i32 desc; - - uint32_t data = FIELD_DP32(0, VDATA, NF, nf); - data = FIELD_DP32(data, VDATA, VM, 1); - dest = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, - s->cfg_ptr->vlenb, data)); - - base = get_gpr(s, rs1, EXT_NONE); - tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); - mark_vs_dirty(s); - fn(dest, base, tcg_env, desc); + /* + * Load/store multiple bytes per iteration. + * When possible do this atomically. + * Update vstart with the number of processed elements. + * Use the helper function if either: + * - vstart is not 0. + * - the target has 32 bit registers and we are loading/storing 64 bit long + * elements. This is to ensure that we process every element with a single + * memory instruction. + */ + + bool use_helper_fn = !(s->vstart_eq_zero) || + (TCG_TARGET_REG_BITS == 32 && log2_esz == 3); + + if (!use_helper_fn) { + TCGv addr = tcg_temp_new(); + uint32_t size = s->cfg_ptr->vlenb * nf; + TCGv_i64 t8 = tcg_temp_new_i64(); + TCGv_i32 t4 = tcg_temp_new_i32(); + MemOp atomicity = MO_ATOM_NONE; + if (log2_esz == 0) { + atomicity = MO_ATOM_NONE; + } else { + atomicity = MO_ATOM_IFALIGN_PAIR; + } + if (TCG_TARGET_REG_BITS == 64) { + for (int i = 0; i < size; i += 8) { + addr = get_address(s, rs1, i); + if (is_load) { + tcg_gen_qemu_ld_i64(t8, addr, s->mem_idx, + MO_LE | MO_64 | atomicity); + tcg_gen_st_i64(t8, tcg_env, vreg_ofs(s, vd) + i); + } else { + tcg_gen_ld_i64(t8, tcg_env, vreg_ofs(s, vd) + i); + tcg_gen_qemu_st_i64(t8, addr, s->mem_idx, + MO_LE | MO_64 | atomicity); + } + if (i == size - 8) { + tcg_gen_movi_tl(cpu_vstart, 0); + } else { + tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 8 >> log2_esz); + } + } + } else { + for (int i = 0; i < size; i += 4) { + addr = get_address(s, rs1, i); + if (is_load) { + tcg_gen_qemu_ld_i32(t4, addr, s->mem_idx, + MO_LE | MO_32 | atomicity); + tcg_gen_st_i32(t4, tcg_env, vreg_ofs(s, vd) + i); + } else { + tcg_gen_ld_i32(t4, tcg_env, vreg_ofs(s, vd) + i); + tcg_gen_qemu_st_i32(t4, addr, s->mem_idx, + MO_LE | MO_32 | atomicity); + } + if (i == size - 4) { + tcg_gen_movi_tl(cpu_vstart, 0); + } else { + tcg_gen_addi_tl(cpu_vstart, cpu_vstart, 4 >> log2_esz); + } + } + } + } else { + TCGv_ptr dest; + TCGv base; + TCGv_i32 desc; + uint32_t data = FIELD_DP32(0, VDATA, NF, nf); + data = FIELD_DP32(data, VDATA, VM, 1); + dest = tcg_temp_new_ptr(); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); + base = get_gpr(s, rs1, EXT_NONE); + tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); + fn(dest, base, tcg_env, desc); + } finalize_rvv_inst(s); return true; @@ -1128,58 +1493,47 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, * load and store whole register instructions ignore vtype and vl setting. * Thus, we don't need to check vill bit. (Section 7.9) */ -#define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF) \ -static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ -{ \ - if (require_rvv(s) && \ - QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \ - return ldst_whole_trans(a->rd, a->rs1, ARG_NF, \ - gen_helper_##NAME, s); \ - } \ - return false; \ -} - -GEN_LDST_WHOLE_TRANS(vl1re8_v, 1) -GEN_LDST_WHOLE_TRANS(vl1re16_v, 1) -GEN_LDST_WHOLE_TRANS(vl1re32_v, 1) -GEN_LDST_WHOLE_TRANS(vl1re64_v, 1) -GEN_LDST_WHOLE_TRANS(vl2re8_v, 2) -GEN_LDST_WHOLE_TRANS(vl2re16_v, 2) -GEN_LDST_WHOLE_TRANS(vl2re32_v, 2) -GEN_LDST_WHOLE_TRANS(vl2re64_v, 2) -GEN_LDST_WHOLE_TRANS(vl4re8_v, 4) -GEN_LDST_WHOLE_TRANS(vl4re16_v, 4) -GEN_LDST_WHOLE_TRANS(vl4re32_v, 4) -GEN_LDST_WHOLE_TRANS(vl4re64_v, 4) -GEN_LDST_WHOLE_TRANS(vl8re8_v, 8) -GEN_LDST_WHOLE_TRANS(vl8re16_v, 8) -GEN_LDST_WHOLE_TRANS(vl8re32_v, 8) -GEN_LDST_WHOLE_TRANS(vl8re64_v, 8) +#define GEN_LDST_WHOLE_TRANS(NAME, ETYPE, ARG_NF, IS_LOAD) \ +static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ +{ \ + if (require_rvv(s) && \ + QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \ + return ldst_whole_trans(a->rd, a->rs1, ARG_NF, ctzl(sizeof(ETYPE)), \ + gen_helper_##NAME, s, IS_LOAD); \ + } \ + return false; \ +} + +GEN_LDST_WHOLE_TRANS(vl1re8_v, int8_t, 1, true) +GEN_LDST_WHOLE_TRANS(vl1re16_v, int16_t, 1, true) +GEN_LDST_WHOLE_TRANS(vl1re32_v, int32_t, 1, true) +GEN_LDST_WHOLE_TRANS(vl1re64_v, int64_t, 1, true) +GEN_LDST_WHOLE_TRANS(vl2re8_v, int8_t, 2, true) +GEN_LDST_WHOLE_TRANS(vl2re16_v, int16_t, 2, true) +GEN_LDST_WHOLE_TRANS(vl2re32_v, int32_t, 2, true) +GEN_LDST_WHOLE_TRANS(vl2re64_v, int64_t, 2, true) +GEN_LDST_WHOLE_TRANS(vl4re8_v, int8_t, 4, true) +GEN_LDST_WHOLE_TRANS(vl4re16_v, int16_t, 4, true) +GEN_LDST_WHOLE_TRANS(vl4re32_v, int32_t, 4, true) +GEN_LDST_WHOLE_TRANS(vl4re64_v, int64_t, 4, true) +GEN_LDST_WHOLE_TRANS(vl8re8_v, int8_t, 8, true) +GEN_LDST_WHOLE_TRANS(vl8re16_v, int16_t, 8, true) +GEN_LDST_WHOLE_TRANS(vl8re32_v, int32_t, 8, true) +GEN_LDST_WHOLE_TRANS(vl8re64_v, int64_t, 8, true) /* * The vector whole register store instructions are encoded similar to * unmasked unit-stride store of elements with EEW=8. */ -GEN_LDST_WHOLE_TRANS(vs1r_v, 1) -GEN_LDST_WHOLE_TRANS(vs2r_v, 2) -GEN_LDST_WHOLE_TRANS(vs4r_v, 4) -GEN_LDST_WHOLE_TRANS(vs8r_v, 8) +GEN_LDST_WHOLE_TRANS(vs1r_v, int8_t, 1, false) +GEN_LDST_WHOLE_TRANS(vs2r_v, int8_t, 2, false) +GEN_LDST_WHOLE_TRANS(vs4r_v, int8_t, 4, false) +GEN_LDST_WHOLE_TRANS(vs8r_v, int8_t, 8, false) /* *** Vector Integer Arithmetic Instructions */ -/* - * MAXSZ returns the maximum vector size can be operated in bytes, - * which is used in GVEC IR when vl_eq_vlmax flag is set to true - * to accelerate vector operation. - */ -static inline uint32_t MAXSZ(DisasContext *s) -{ - int max_sz = s->cfg_ptr->vlenb * 8; - return max_sz >> (3 - s->lmul); -} - static bool opivv_check(DisasContext *s, arg_rmrr *a) { return require_rvv(s) && @@ -1475,6 +1829,16 @@ static bool opivv_widen_check(DisasContext *s, arg_rmrr *a) vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm); } +/* OPIVV with overwrite and WIDEN */ +static bool opivv_overwrite_widen_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm); +} + static bool do_opivv_widen(DisasContext *s, arg_rmrr *a, gen_helper_gvec_4_ptr *fn, bool (*checkfn)(DisasContext *, arg_rmrr *)) @@ -1522,6 +1886,14 @@ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a) vext_check_ds(s, a->rd, a->rs2, a->vm); } +static bool opivx_overwrite_widen_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + vext_check_isa_ill(s) && + vext_check_ds(s, a->rd, a->rs2, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm); +} + #define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ { \ @@ -1993,13 +2365,13 @@ GEN_OPIVX_TRANS(vmadd_vx, opivx_check) GEN_OPIVX_TRANS(vnmsub_vx, opivx_check) /* Vector Widening Integer Multiply-Add Instructions */ -GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check) -GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check) -GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check) -GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_overwrite_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_overwrite_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_overwrite_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_overwrite_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_overwrite_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_overwrite_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_overwrite_widen_check) /* Vector Integer Merge and Move Instructions */ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a) @@ -2340,6 +2712,17 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a) vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm); } +static bool opfvv_overwrite_widen_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + require_rvf(s) && + require_scale_rvf(s) && + vext_check_isa_ill(s) && + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs1, s->sew, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm); +} + /* OPFVV with WIDEN */ #define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ @@ -2379,11 +2762,21 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a) vext_check_ds(s, a->rd, a->rs2, a->vm); } +static bool opfvf_overwrite_widen_check(DisasContext *s, arg_rmrr *a) +{ + return require_rvv(s) && + require_rvf(s) && + require_scale_rvf(s) && + vext_check_isa_ill(s) && + vext_check_ds(s, a->rd, a->rs2, a->vm) && + vext_check_input_eew(s, a->rd, s->sew + 1, a->rs2, s->sew, a->vm); +} + /* OPFVF with WIDEN */ -#define GEN_OPFVF_WIDEN_TRANS(NAME) \ +#define GEN_OPFVF_WIDEN_TRANS(NAME, CHECK) \ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ { \ - if (opfvf_widen_check(s, a)) { \ + if (CHECK(s, a)) { \ uint32_t data = 0; \ static gen_helper_opfvf *const fns[2] = { \ gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ @@ -2399,8 +2792,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ return false; \ } -GEN_OPFVF_WIDEN_TRANS(vfwadd_vf) -GEN_OPFVF_WIDEN_TRANS(vfwsub_vf) +GEN_OPFVF_WIDEN_TRANS(vfwadd_vf, opfvf_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwsub_vf, opfvf_widen_check) static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a) { @@ -2482,7 +2875,7 @@ GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check) /* Vector Widening Floating-Point Multiply */ GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check) -GEN_OPFVF_WIDEN_TRANS(vfwmul_vf) +GEN_OPFVF_WIDEN_TRANS(vfwmul_vf, opfvf_widen_check) /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */ GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check) @@ -2503,14 +2896,14 @@ GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check) GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check) /* Vector Widening Floating-Point Fused Multiply-Add Instructions */ -GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check) -GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check) -GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check) -GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check) -GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf) -GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf) -GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf) -GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf) +GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_overwrite_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_overwrite_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_overwrite_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_overwrite_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf, opfvf_overwrite_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf, opfvf_overwrite_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf, opfvf_overwrite_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf, opfvf_overwrite_widen_check) /* Vector Floating-Point Square-Root Instruction */ @@ -3426,6 +3819,7 @@ static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a) { return require_rvv(s) && vext_check_isa_ill(s) && + vext_check_input_eew(s, a->rs1, s->sew, a->rs2, s->sew, a->vm) && require_align(a->rd, s->lmul) && require_align(a->rs1, s->lmul) && require_align(a->rs2, s->lmul) && @@ -3438,6 +3832,7 @@ static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a) int8_t emul = MO_16 - s->sew + s->lmul; return require_rvv(s) && vext_check_isa_ill(s) && + vext_check_input_eew(s, a->rs1, MO_16, a->rs2, s->sew, a->vm) && (emul >= -3 && emul <= 3) && require_align(a->rd, s->lmul) && require_align(a->rs1, emul) && @@ -3457,6 +3852,7 @@ static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a) { return require_rvv(s) && vext_check_isa_ill(s) && + vext_check_input_eew(s, -1, MO_64, a->rs2, s->sew, a->vm) && require_align(a->rd, s->lmul) && require_align(a->rs2, s->lmul) && (a->rd != a->rs2) && @@ -3600,7 +3996,9 @@ static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div) require_align(a->rd, s->lmul) && require_align(a->rs2, s->lmul - div) && require_vm(a->vm, a->rd) && - require_noover(a->rd, s->lmul, a->rs2, s->lmul - div); + require_noover(a->rd, s->lmul, a->rs2, s->lmul - div) && + vext_check_input_eew(s, -1, 0, a->rs2, s->sew, a->vm); + return ret; } diff --git a/target/riscv/internals.h b/target/riscv/internals.h index 213aff3..172296f 100644 --- a/target/riscv/internals.h +++ b/target/riscv/internals.h @@ -142,6 +142,33 @@ static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f) } } +static inline float16 check_nanbox_bf16(CPURISCVState *env, uint64_t f) +{ + /* Disable nanbox check when enable zfinx */ + if (env_archcpu(env)->cfg.ext_zfinx) { + return (uint16_t)f; + } + + uint64_t mask = MAKE_64BIT_MASK(16, 48); + + if (likely((f & mask) == mask)) { + return (uint16_t)f; + } else { + return 0x7FC0u; /* default qnan */ + } +} + +static inline target_ulong get_xepc_mask(CPURISCVState *env) +{ + /* When IALIGN=32, both low bits must be zero. + * When IALIGN=16 (has C extension), only bit 0 must be zero. */ + if (riscv_has_ext(env, RVC)) { + return ~(target_ulong)1; + } else { + return ~(target_ulong)3; + } +} + #ifndef CONFIG_USER_ONLY /* Our implementation of SysemuCPUOps::has_work */ bool riscv_cpu_has_work(CPUState *cs); @@ -201,4 +228,9 @@ static inline target_ulong adjust_addr_virt(CPURISCVState *env, return adjust_addr_body(env, addr, true); } +static inline int insn_len(uint16_t first_word) +{ + return (first_word & 3) == 3 ? 4 : 2; +} + #endif diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 0f4997a..5c19062 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -35,7 +35,7 @@ #include "accel/accel-cpu-target.h" #include "hw/pci/pci.h" #include "exec/memattrs.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "hw/boards.h" #include "hw/irq.h" #include "hw/intc/riscv_imsic.h" @@ -58,33 +58,17 @@ void riscv_kvm_aplic_request(void *opaque, int irq, int level) static bool cap_has_mp_state; -static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type, - uint64_t idx) -{ - uint64_t id = KVM_REG_RISCV | type | idx; - - switch (riscv_cpu_mxl(env)) { - case MXL_RV32: - id |= KVM_REG_SIZE_U32; - break; - case MXL_RV64: - id |= KVM_REG_SIZE_U64; - break; - default: - g_assert_not_reached(); - } - return id; -} +#define KVM_RISCV_REG_ID_U32(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U32 | \ + type | idx) -static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx) -{ - return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx; -} +#define KVM_RISCV_REG_ID_U64(type, idx) (KVM_REG_RISCV | KVM_REG_SIZE_U64 | \ + type | idx) -static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx) -{ - return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx; -} +#if defined(TARGET_RISCV64) +#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U64(type, idx) +#else +#define KVM_RISCV_REG_ID_ULONG(type, idx) KVM_RISCV_REG_ID_U32(type, idx) +#endif static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b) { @@ -107,45 +91,29 @@ static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu, return kvm_encode_reg_size_id(id, size_b); } -#define RISCV_CORE_REG(env, name) \ - kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \ +#define RISCV_CORE_REG(name) \ + KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, \ KVM_REG_RISCV_CORE_REG(name)) -#define RISCV_CSR_REG(env, name) \ - kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \ +#define RISCV_CSR_REG(name) \ + KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CSR, \ KVM_REG_RISCV_CSR_REG(name)) -#define RISCV_CONFIG_REG(env, name) \ - kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \ +#define RISCV_CONFIG_REG(name) \ + KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, \ KVM_REG_RISCV_CONFIG_REG(name)) -#define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \ +#define RISCV_TIMER_REG(name) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_TIMER, \ KVM_REG_RISCV_TIMER_REG(name)) -#define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx) +#define RISCV_FP_F_REG(idx) KVM_RISCV_REG_ID_U32(KVM_REG_RISCV_FP_F, idx) -#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx) +#define RISCV_FP_D_REG(idx) KVM_RISCV_REG_ID_U64(KVM_REG_RISCV_FP_D, idx) -#define RISCV_VECTOR_CSR_REG(env, name) \ - kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \ +#define RISCV_VECTOR_CSR_REG(name) \ + KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_VECTOR, \ KVM_REG_RISCV_VECTOR_CSR_REG(name)) -#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \ - do { \ - int _ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \ - if (_ret) { \ - return _ret; \ - } \ - } while (0) - -#define KVM_RISCV_SET_CSR(cs, env, csr, reg) \ - do { \ - int _ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \ - if (_ret) { \ - return _ret; \ - } \ - } while (0) - #define KVM_RISCV_GET_TIMER(cs, name, reg) \ do { \ int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), ®); \ @@ -167,6 +135,7 @@ typedef struct KVMCPUConfig { const char *description; target_ulong offset; uint64_t kvm_reg_id; + uint32_t prop_size; bool user_set; bool supported; } KVMCPUConfig; @@ -248,7 +217,7 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs) /* If we're here we're going to disable the MISA bit */ reg = 0; - id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT, misa_cfg->kvm_reg_id); ret = kvm_set_one_reg(cs, id, ®); if (ret != 0) { @@ -267,6 +236,56 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs) } } +#define KVM_CSR_CFG(_name, _env_prop, reg_id) \ + {.name = _name, .offset = ENV_CSR_OFFSET(_env_prop), \ + .prop_size = sizeof(((CPURISCVState *)0)->_env_prop), \ + .kvm_reg_id = reg_id} + +static KVMCPUConfig kvm_csr_cfgs[] = { + KVM_CSR_CFG("sstatus", mstatus, RISCV_CSR_REG(sstatus)), + KVM_CSR_CFG("sie", mie, RISCV_CSR_REG(sie)), + KVM_CSR_CFG("stvec", stvec, RISCV_CSR_REG(stvec)), + KVM_CSR_CFG("sscratch", sscratch, RISCV_CSR_REG(sscratch)), + KVM_CSR_CFG("sepc", sepc, RISCV_CSR_REG(sepc)), + KVM_CSR_CFG("scause", scause, RISCV_CSR_REG(scause)), + KVM_CSR_CFG("stval", stval, RISCV_CSR_REG(stval)), + KVM_CSR_CFG("sip", mip, RISCV_CSR_REG(sip)), + KVM_CSR_CFG("satp", satp, RISCV_CSR_REG(satp)), + KVM_CSR_CFG("scounteren", scounteren, RISCV_CSR_REG(scounteren)), + KVM_CSR_CFG("senvcfg", senvcfg, RISCV_CSR_REG(senvcfg)), +}; + +static void *kvmconfig_get_env_addr(RISCVCPU *cpu, KVMCPUConfig *csr_cfg) +{ + return (void *)&cpu->env + csr_cfg->offset; +} + +static uint32_t kvm_cpu_csr_get_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg) +{ + uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg); + return *val32; +} + +static uint64_t kvm_cpu_csr_get_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg) +{ + uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg); + return *val64; +} + +static void kvm_cpu_csr_set_u32(RISCVCPU *cpu, KVMCPUConfig *csr_cfg, + uint32_t val) +{ + uint32_t *val32 = kvmconfig_get_env_addr(cpu, csr_cfg); + *val32 = val; +} + +static void kvm_cpu_csr_set_u64(RISCVCPU *cpu, KVMCPUConfig *csr_cfg, + uint64_t val) +{ + uint64_t *val64 = kvmconfig_get_env_addr(cpu, csr_cfg); + *val64 = val; +} + #define KVM_EXT_CFG(_name, _prop, _reg_id) \ {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \ .kvm_reg_id = _reg_id} @@ -434,7 +453,6 @@ static KVMCPUConfig kvm_sbi_dbcn = { static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs) { - CPURISCVState *env = &cpu->env; uint64_t id, reg; int i, ret; @@ -445,7 +463,7 @@ static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs) continue; } - id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT, multi_ext_cfg->kvm_reg_id); reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg); ret = kvm_set_one_reg(cs, id, ®); @@ -570,14 +588,14 @@ static int kvm_riscv_get_regs_core(CPUState *cs) target_ulong reg; CPURISCVState *env = &RISCV_CPU(cs)->env; - ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®); + ret = kvm_get_one_reg(cs, RISCV_CORE_REG(regs.pc), ®); if (ret) { return ret; } env->pc = reg; for (i = 1; i < 32; i++) { - uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i); + uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i); ret = kvm_get_one_reg(cs, id, ®); if (ret) { return ret; @@ -596,13 +614,13 @@ static int kvm_riscv_put_regs_core(CPUState *cs) CPURISCVState *env = &RISCV_CPU(cs)->env; reg = env->pc; - ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®); + ret = kvm_set_one_reg(cs, RISCV_CORE_REG(regs.pc), ®); if (ret) { return ret; } for (i = 1; i < 32; i++) { - uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i); + uint64_t id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CORE, i); reg = env->gpr[i]; ret = kvm_set_one_reg(cs, id, ®); if (ret) { @@ -613,53 +631,81 @@ static int kvm_riscv_put_regs_core(CPUState *cs) return ret; } -static void kvm_riscv_reset_regs_csr(CPURISCVState *env) -{ - env->mstatus = 0; - env->mie = 0; - env->stvec = 0; - env->sscratch = 0; - env->sepc = 0; - env->scause = 0; - env->stval = 0; - env->mip = 0; - env->satp = 0; -} - static int kvm_riscv_get_regs_csr(CPUState *cs) { - CPURISCVState *env = &RISCV_CPU(cs)->env; + RISCVCPU *cpu = RISCV_CPU(cs); + uint64_t reg; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) { + KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i]; + + if (!csr_cfg->supported) { + continue; + } + + ret = kvm_get_one_reg(cs, csr_cfg->kvm_reg_id, ®); + if (ret) { + return ret; + } - KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus); - KVM_RISCV_GET_CSR(cs, env, sie, env->mie); - KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec); - KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch); - KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc); - KVM_RISCV_GET_CSR(cs, env, scause, env->scause); - KVM_RISCV_GET_CSR(cs, env, stval, env->stval); - KVM_RISCV_GET_CSR(cs, env, sip, env->mip); - KVM_RISCV_GET_CSR(cs, env, satp, env->satp); + if (csr_cfg->prop_size == sizeof(uint32_t)) { + kvm_cpu_csr_set_u32(cpu, csr_cfg, (uint32_t)reg); + } else if (csr_cfg->prop_size == sizeof(uint64_t)) { + kvm_cpu_csr_set_u64(cpu, csr_cfg, reg); + } else { + g_assert_not_reached(); + } + } return 0; } static int kvm_riscv_put_regs_csr(CPUState *cs) { - CPURISCVState *env = &RISCV_CPU(cs)->env; + RISCVCPU *cpu = RISCV_CPU(cs); + uint64_t reg; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) { + KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i]; + + if (!csr_cfg->supported) { + continue; + } + + if (csr_cfg->prop_size == sizeof(uint32_t)) { + reg = kvm_cpu_csr_get_u32(cpu, csr_cfg); + } else if (csr_cfg->prop_size == sizeof(uint64_t)) { + reg = kvm_cpu_csr_get_u64(cpu, csr_cfg); + } else { + g_assert_not_reached(); + } - KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus); - KVM_RISCV_SET_CSR(cs, env, sie, env->mie); - KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec); - KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch); - KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc); - KVM_RISCV_SET_CSR(cs, env, scause, env->scause); - KVM_RISCV_SET_CSR(cs, env, stval, env->stval); - KVM_RISCV_SET_CSR(cs, env, sip, env->mip); - KVM_RISCV_SET_CSR(cs, env, satp, env->satp); + ret = kvm_set_one_reg(cs, csr_cfg->kvm_reg_id, ®); + if (ret) { + return ret; + } + } return 0; } +static void kvm_riscv_reset_regs_csr(CPURISCVState *env) +{ + env->mstatus = 0; + env->mie = 0; + env->stvec = 0; + env->sscratch = 0; + env->sepc = 0; + env->scause = 0; + env->stval = 0; + env->mip = 0; + env->satp = 0; + env->scounteren = 0; + env->senvcfg = 0; +} + static int kvm_riscv_get_regs_fp(CPUState *cs) { int ret = 0; @@ -800,26 +846,26 @@ static int kvm_riscv_get_regs_vector(CPUState *cs) return 0; } - ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®); + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®); if (ret) { return ret; } env->vstart = reg; - ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®); + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®); if (ret) { return ret; } env->vl = reg; - ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®); + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®); if (ret) { return ret; } env->vtype = reg; if (kvm_v_vlenb.supported) { - ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), ®); + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®); if (ret) { return ret; } @@ -857,26 +903,26 @@ static int kvm_riscv_put_regs_vector(CPUState *cs) } reg = env->vstart; - ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®); + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vstart), ®); if (ret) { return ret; } reg = env->vl; - ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®); + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vl), ®); if (ret) { return ret; } reg = env->vtype; - ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®); + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vtype), ®); if (ret) { return ret; } if (kvm_v_vlenb.supported) { reg = cpu->cfg.vlenb; - ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), ®); + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(vlenb), ®); for (int i = 0; i < 32; i++) { /* @@ -953,27 +999,39 @@ static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch) close(scratch->kvmfd); } +static void kvm_riscv_init_max_satp_mode(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) +{ + struct kvm_one_reg reg; + int ret; + + reg.id = RISCV_CONFIG_REG(satp_mode); + reg.addr = (uint64_t)&cpu->cfg.max_satp_mode; + ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + error_report("Unable to retrieve satp mode from host, error %d", ret); + } +} + static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) { - CPURISCVState *env = &cpu->env; struct kvm_one_reg reg; int ret; - reg.id = RISCV_CONFIG_REG(env, mvendorid); + reg.id = RISCV_CONFIG_REG(mvendorid); reg.addr = (uint64_t)&cpu->cfg.mvendorid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { error_report("Unable to retrieve mvendorid from host, error %d", ret); } - reg.id = RISCV_CONFIG_REG(env, marchid); + reg.id = RISCV_CONFIG_REG(marchid); reg.addr = (uint64_t)&cpu->cfg.marchid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { error_report("Unable to retrieve marchid from host, error %d", ret); } - reg.id = RISCV_CONFIG_REG(env, mimpid); + reg.id = RISCV_CONFIG_REG(mimpid); reg.addr = (uint64_t)&cpu->cfg.mimpid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { @@ -988,7 +1046,7 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu, struct kvm_one_reg reg; int ret; - reg.id = RISCV_CONFIG_REG(env, isa); + reg.id = RISCV_CONFIG_REG(isa); reg.addr = (uint64_t)&env->misa_ext_mask; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); @@ -1005,11 +1063,10 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu, static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, KVMCPUConfig *cbomz_cfg) { - CPURISCVState *env = &cpu->env; struct kvm_one_reg reg; int ret; - reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, cbomz_cfg->kvm_reg_id); reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg); ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); @@ -1023,7 +1080,6 @@ static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) { - CPURISCVState *env = &cpu->env; uint64_t val; int i, ret; @@ -1031,7 +1087,7 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i]; struct kvm_one_reg reg; - reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT, multi_ext_cfg->kvm_reg_id); reg.addr = (uint64_t)&val; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); @@ -1061,6 +1117,32 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, } } +static void kvm_riscv_read_csr_cfg_legacy(KVMScratchCPU *kvmcpu) +{ + uint64_t val; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) { + KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i]; + struct kvm_one_reg reg; + + reg.id = csr_cfg->kvm_reg_id; + reg.addr = (uint64_t)&val; + ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); + if (ret != 0) { + if (errno == EINVAL) { + csr_cfg->supported = false; + } else { + error_report("Unable to read KVM CSR %s: %s", + csr_cfg->name, strerror(errno)); + exit(EXIT_FAILURE); + } + } else { + csr_cfg->supported = true; + } + } +} + static int uint64_cmp(const void *a, const void *b) { uint64_t val1 = *(const uint64_t *)a; @@ -1078,7 +1160,6 @@ static int uint64_cmp(const void *a, const void *b) } static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu, - KVMScratchCPU *kvmcpu, struct kvm_reg_list *reglist) { struct kvm_reg_list *reg_search; @@ -1118,12 +1199,31 @@ static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, } } -static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) +static void kvm_riscv_read_csr_cfg(struct kvm_reg_list *reglist) +{ + struct kvm_reg_list *reg_search; + uint64_t reg_id; + + for (int i = 0; i < ARRAY_SIZE(kvm_csr_cfgs); i++) { + KVMCPUConfig *csr_cfg = &kvm_csr_cfgs[i]; + + reg_id = csr_cfg->kvm_reg_id; + reg_search = bsearch(®_id, reglist->reg, reglist->n, + sizeof(uint64_t), uint64_cmp); + if (!reg_search) { + continue; + } + + csr_cfg->supported = true; + } +} + +static void kvm_riscv_init_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) { + g_autofree struct kvm_reg_list *reglist = NULL; KVMCPUConfig *multi_ext_cfg; struct kvm_one_reg reg; struct kvm_reg_list rl_struct; - struct kvm_reg_list *reglist; uint64_t val, reg_id, *reg_search; int i, ret; @@ -1135,7 +1235,9 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) * (EINVAL). Use read_legacy() in this case. */ if (errno == EINVAL) { - return kvm_riscv_read_multiext_legacy(cpu, kvmcpu); + kvm_riscv_read_multiext_legacy(cpu, kvmcpu); + kvm_riscv_read_csr_cfg_legacy(kvmcpu); + return; } else if (errno != E2BIG) { /* * E2BIG is an expected error message for the API since we @@ -1164,7 +1266,7 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) { multi_ext_cfg = &kvm_multi_ext_cfgs[i]; - reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT, + reg_id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_ISA_EXT, multi_ext_cfg->kvm_reg_id); reg_search = bsearch(®_id, reglist->reg, reglist->n, sizeof(uint64_t), uint64_cmp); @@ -1197,7 +1299,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) kvm_riscv_read_vlenb(cpu, kvmcpu, reglist); } - kvm_riscv_check_sbi_dbcn_support(cpu, kvmcpu, reglist); + kvm_riscv_check_sbi_dbcn_support(cpu, reglist); + kvm_riscv_read_csr_cfg(reglist); } static void riscv_init_kvm_registers(Object *cpu_obj) @@ -1211,7 +1314,8 @@ static void riscv_init_kvm_registers(Object *cpu_obj) kvm_riscv_init_machine_ids(cpu, &kvmcpu); kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu); - kvm_riscv_init_multiext_cfg(cpu, &kvmcpu); + kvm_riscv_init_cfg(cpu, &kvmcpu); + kvm_riscv_init_max_satp_mode(cpu, &kvmcpu); kvm_riscv_destroy_scratch_vcpu(&kvmcpu); } @@ -1343,12 +1447,11 @@ void kvm_arch_init_irq_routing(KVMState *s) static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs) { - CPURISCVState *env = &cpu->env; target_ulong reg; uint64_t id; int ret; - id = RISCV_CONFIG_REG(env, mvendorid); + id = RISCV_CONFIG_REG(mvendorid); /* * cfg.mvendorid is an uint32 but a target_ulong will * be written. Assign it to a target_ulong var to avoid @@ -1360,13 +1463,13 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs) return ret; } - id = RISCV_CONFIG_REG(env, marchid); + id = RISCV_CONFIG_REG(marchid); ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid); if (ret != 0) { return ret; } - id = RISCV_CONFIG_REG(env, mimpid); + id = RISCV_CONFIG_REG(mimpid); ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid); return ret; @@ -1383,6 +1486,11 @@ static int kvm_vcpu_enable_sbi_dbcn(RISCVCPU *cpu, CPUState *cs) return kvm_set_one_reg(cs, kvm_sbi_dbcn.kvm_reg_id, ®); } +int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp) +{ + return 0; +} + int kvm_arch_init_vcpu(CPUState *cs) { int ret = 0; @@ -1511,7 +1619,7 @@ static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run) break; case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: ch = run->riscv_sbi.args[0]; - ret = qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch)); + ret = qemu_chr_fe_write_all(serial_hd(0)->be, &ch, sizeof(ch)); if (ret < 0) { error_report("SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: error when " @@ -1891,7 +1999,7 @@ static bool kvm_cpu_realize(CPUState *cs, Error **errp) } } - return true; + return true; } void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp) @@ -1916,7 +2024,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp) if (cpu->cfg.ext_zicbom && riscv_cpu_option_set(kvm_cbom_blocksize.name)) { - reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, kvm_cbom_blocksize.kvm_reg_id); reg.addr = (uint64_t)&val; ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®); @@ -1935,7 +2043,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp) if (cpu->cfg.ext_zicboz && riscv_cpu_option_set(kvm_cboz_blocksize.name)) { - reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + reg.id = KVM_RISCV_REG_ID_ULONG(KVM_REG_RISCV_CONFIG, kvm_cboz_blocksize.kvm_reg_id); reg.addr = (uint64_t)&val; ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, ®); @@ -1976,7 +2084,7 @@ void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp) kvm_riscv_destroy_scratch_vcpu(&kvmcpu); } -static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data) +static void kvm_cpu_accel_class_init(ObjectClass *oc, const void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); @@ -1997,22 +2105,25 @@ static void kvm_cpu_accel_register_types(void) } type_init(kvm_cpu_accel_register_types); -static void riscv_host_cpu_class_init(ObjectClass *c, void *data) -{ - RISCVCPUClass *mcc = RISCV_CPU_CLASS(c); - -#if defined(TARGET_RISCV32) - mcc->misa_mxl_max = MXL_RV32; -#elif defined(TARGET_RISCV64) - mcc->misa_mxl_max = MXL_RV64; -#endif -} - static const TypeInfo riscv_kvm_cpu_type_infos[] = { { .name = TYPE_RISCV_CPU_HOST, .parent = TYPE_RISCV_CPU, - .class_init = riscv_host_cpu_class_init, +#if defined(TARGET_RISCV32) + .class_data = &(const RISCVCPUDef) { + .misa_mxl_max = MXL_RV32, + .priv_spec = RISCV_PROFILE_ATTR_UNUSED, + .vext_spec = RISCV_PROFILE_ATTR_UNUSED, + .cfg.max_satp_mode = -1, + }, +#elif defined(TARGET_RISCV64) + .class_data = &(const RISCVCPUDef) { + .misa_mxl_max = MXL_RV64, + .priv_spec = RISCV_PROFILE_ATTR_UNUSED, + .vext_spec = RISCV_PROFILE_ATTR_UNUSED, + .cfg.max_satp_mode = -1, + }, +#endif } }; diff --git a/target/riscv/m128_helper.c b/target/riscv/m128_helper.c index ec14aaa..7d9b83b 100644 --- a/target/riscv/m128_helper.c +++ b/target/riscv/m128_helper.c @@ -19,7 +19,6 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" target_ulong HELPER(divu_i128)(CPURISCVState *env, diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 889e2b6..1600ec4 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -21,7 +21,7 @@ #include "qemu/error-report.h" #include "system/kvm.h" #include "migration/cpu.h" -#include "system/cpu-timers.h" +#include "exec/icount.h" #include "debug.h" static bool pmp_needed(void *opaque) @@ -36,8 +36,9 @@ static int pmp_post_load(void *opaque, int version_id) RISCVCPU *cpu = opaque; CPURISCVState *env = &cpu->env; int i; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { pmp_update_rule_addr(env, i); } pmp_update_rule_nums(env); @@ -170,7 +171,7 @@ static bool rv128_needed(void *opaque) { RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(opaque); - return mcc->misa_mxl_max == MXL_RV128; + return mcc->def->misa_mxl_max == MXL_RV128; } static const VMStateDescription vmstate_rv128 = { diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 72dc48e..15460bf 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -21,10 +21,11 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" -#include "exec/exec-all.h" #include "exec/cputlb.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" #include "exec/helper-proto.h" +#include "exec/tlb-flags.h" #include "trace.h" /* Exceptions processing helpers */ @@ -70,7 +71,7 @@ target_ulong helper_csrr(CPURISCVState *env, int csr) void helper_csrw(CPURISCVState *env, int csr, target_ulong src) { target_ulong mask = env->xl == MXL_RV32 ? UINT32_MAX : (target_ulong)-1; - RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask); + RISCVException ret = riscv_csrrw(env, csr, NULL, src, mask, GETPC()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -81,7 +82,7 @@ target_ulong helper_csrrw(CPURISCVState *env, int csr, target_ulong src, target_ulong write_mask) { target_ulong val = 0; - RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask); + RISCVException ret = riscv_csrrw(env, csr, &val, src, write_mask, GETPC()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -107,7 +108,7 @@ void helper_csrw_i128(CPURISCVState *env, int csr, { RISCVException ret = riscv_csrrw_i128(env, csr, NULL, int128_make128(srcl, srch), - UINT128_MAX); + UINT128_MAX, GETPC()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -115,13 +116,14 @@ void helper_csrw_i128(CPURISCVState *env, int csr, } target_ulong helper_csrrw_i128(CPURISCVState *env, int csr, - target_ulong srcl, target_ulong srch, - target_ulong maskl, target_ulong maskh) + target_ulong srcl, target_ulong srch, + target_ulong maskl, target_ulong maskh) { Int128 rv = int128_zero(); RISCVException ret = riscv_csrrw_i128(env, csr, &rv, int128_make128(srcl, srch), - int128_make128(maskl, maskh)); + int128_make128(maskl, maskh), + GETPC()); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -278,7 +280,7 @@ target_ulong helper_sret(CPURISCVState *env) riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } - target_ulong retpc = env->sepc; + target_ulong retpc = env->sepc & get_xepc_mask(env); if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg, env->priv_ver, env->misa_ext) && (retpc & 0x3)) { @@ -389,7 +391,7 @@ static target_ulong ssdbltrp_mxret(CPURISCVState *env, target_ulong mstatus, target_ulong helper_mret(CPURISCVState *env) { - target_ulong retpc = env->mepc; + target_ulong retpc = env->mepc & get_xepc_mask(env); uint64_t mstatus = env->mstatus; target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index b0841d4..3540327 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -26,12 +26,22 @@ #include "trace.h" #include "exec/cputlb.h" #include "exec/page-protection.h" +#include "exec/target_page.h" static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, uint8_t val); static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); /* + * Convert the PMP permissions to match the truth table in the Smepmp spec. + */ +static inline uint8_t pmp_get_smepmp_operation(uint8_t cfg) +{ + return ((cfg & PMP_LOCK) >> 4) | ((cfg & PMP_READ) << 2) | + (cfg & PMP_WRITE) | ((cfg & PMP_EXEC) >> 2); +} + +/* * Accessor method to extract address matching type 'a field' from cfg reg */ static inline uint8_t pmp_get_a_field(uint8_t cfg) @@ -45,21 +55,58 @@ static inline uint8_t pmp_get_a_field(uint8_t cfg) */ static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) { - /* mseccfg.RLB is set */ - if (MSECCFG_RLB_ISSET(env)) { - return 0; - } - if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { return 1; } - /* Top PMP has no 'next' to check */ - if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { + return 0; +} + +/* + * Check whether a PMP is locked for writing or not. + * (i.e. has LOCK flag and mseccfg.RLB is unset) + */ +static int pmp_is_readonly(CPURISCVState *env, uint32_t pmp_index) +{ + return pmp_is_locked(env, pmp_index) && !MSECCFG_RLB_ISSET(env); +} + +/* + * Check whether `val` is an invalid Smepmp config value + */ +static int pmp_is_invalid_smepmp_cfg(CPURISCVState *env, uint8_t val) +{ + /* No check if mseccfg.MML is not set or if mseccfg.RLB is set */ + if (!MSECCFG_MML_ISSET(env) || MSECCFG_RLB_ISSET(env)) { return 0; } - return 0; + /* + * Adding a rule with executable privileges that either is M-mode-only + * or a locked Shared-Region is not possible + */ + switch (pmp_get_smepmp_operation(val)) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 12: + case 14: + case 15: + return 0; + case 9: + case 10: + case 11: + case 13: + return 1; + default: + g_assert_not_reached(); + } } /* @@ -75,7 +122,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env) */ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) { - if (pmp_index < MAX_RISCV_PMPS) { + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; + + if (pmp_index < pmp_regions) { return env->pmp_state.pmp[pmp_index].cfg_reg; } @@ -89,46 +138,21 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) */ static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) { - if (pmp_index < MAX_RISCV_PMPS) { - bool locked = true; - - if (riscv_cpu_cfg(env)->ext_smepmp) { - /* mseccfg.RLB is set */ - if (MSECCFG_RLB_ISSET(env)) { - locked = false; - } - - /* mseccfg.MML is not set */ - if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) { - locked = false; - } + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; - /* mseccfg.MML is set */ - if (MSECCFG_MML_ISSET(env)) { - /* not adding execute bit */ - if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) { - locked = false; - } - /* shared region and not adding X bit */ - if ((val & PMP_LOCK) != PMP_LOCK && - (val & 0x7) != (PMP_WRITE | PMP_EXEC)) { - locked = false; - } - } - } else { - if (!pmp_is_locked(env, pmp_index)) { - locked = false; - } + if (pmp_index < pmp_regions) { + if (env->pmp_state.pmp[pmp_index].cfg_reg == val) { + /* no change */ + return false; } - if (locked) { - qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); - } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) { - /* If !mseccfg.MML then ignore writes with encoding RW=01 */ - if ((val & PMP_WRITE) && !(val & PMP_READ) && - !MSECCFG_MML_ISSET(env)) { - return false; - } + if (pmp_is_readonly(env, pmp_index)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpcfg write - read only\n"); + } else if (pmp_is_invalid_smepmp_cfg(env, val)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpcfg write - invalid\n"); + } else { env->pmp_state.pmp[pmp_index].cfg_reg = val; pmp_update_rule_addr(env, pmp_index); return true; @@ -216,9 +240,10 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) void pmp_update_rule_nums(CPURISCVState *env) { int i; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; env->pmp_state.num_rules = 0; - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); if (PMP_AMATCH_OFF != a_field) { @@ -312,6 +337,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, int pmp_size = 0; hwaddr s = 0; hwaddr e = 0; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; /* Short cut if no rules */ if (0 == pmp_get_num_rules(env)) { @@ -336,7 +362,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, * 1.10 draft priv spec states there is an implicit order * from low to high */ - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { s = pmp_is_in_range(env, i, addr); e = pmp_is_in_range(env, i, addr + pmp_size - 1); @@ -352,16 +378,6 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); - /* - * Convert the PMP permissions to match the truth table in the - * Smepmp spec. - */ - const uint8_t smepmp_operation = - ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | - ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | - (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | - ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); - if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { /* * If the PMP entry is not off and the address is in range, @@ -380,6 +396,9 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, /* * If mseccfg.MML Bit set, do the enhanced pmp priv check */ + const uint8_t smepmp_operation = + pmp_get_smepmp_operation(env->pmp_state.pmp[i].cfg_reg); + if (mode == PRV_M) { switch (smepmp_operation) { case 0: @@ -514,35 +533,39 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, { trace_pmpaddr_csr_write(env->mhartid, addr_index, val); bool is_next_cfg_tor = false; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; + + if (addr_index < pmp_regions) { + if (env->pmp_state.pmp[addr_index].addr_reg == val) { + /* no change */ + return; + } - if (addr_index < MAX_RISCV_PMPS) { /* * In TOR mode, need to check the lock bit of the next pmp * (if there is a next). */ - if (addr_index + 1 < MAX_RISCV_PMPS) { + if (addr_index + 1 < pmp_regions) { uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg; is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg); - if (pmp_is_locked(env, addr_index + 1) && is_next_cfg_tor) { + if (pmp_is_readonly(env, addr_index + 1) && is_next_cfg_tor) { qemu_log_mask(LOG_GUEST_ERROR, - "ignoring pmpaddr write - pmpcfg + 1 locked\n"); + "ignoring pmpaddr write - pmpcfg+1 read only\n"); return; } } - if (!pmp_is_locked(env, addr_index)) { - if (env->pmp_state.pmp[addr_index].addr_reg != val) { - env->pmp_state.pmp[addr_index].addr_reg = val; - pmp_update_rule_addr(env, addr_index); - if (is_next_cfg_tor) { - pmp_update_rule_addr(env, addr_index + 1); - } - tlb_flush(env_cpu(env)); + if (!pmp_is_readonly(env, addr_index)) { + env->pmp_state.pmp[addr_index].addr_reg = val; + pmp_update_rule_addr(env, addr_index); + if (is_next_cfg_tor) { + pmp_update_rule_addr(env, addr_index + 1); } + tlb_flush(env_cpu(env)); } else { qemu_log_mask(LOG_GUEST_ERROR, - "ignoring pmpaddr write - locked\n"); + "ignoring pmpaddr write - read only\n"); } } else { qemu_log_mask(LOG_GUEST_ERROR, @@ -557,8 +580,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) { target_ulong val = 0; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; - if (addr_index < MAX_RISCV_PMPS) { + if (addr_index < pmp_regions) { val = env->pmp_state.pmp[addr_index].addr_reg; trace_pmpaddr_csr_read(env->mhartid, addr_index, val); } else { @@ -576,6 +600,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val) { int i; uint64_t mask = MSECCFG_MMWP | MSECCFG_MML; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; /* Update PMM field only if the value is valid according to Zjpm v1.0 */ if (riscv_cpu_cfg(env)->ext_smmpm && riscv_cpu_mxl(env) == MXL_RV64 && @@ -587,7 +612,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val) /* RLB cannot be enabled if it's already 0 and if any regions are locked */ if (!MSECCFG_RLB_ISSET(env)) { - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { if (pmp_is_locked(env, i)) { val &= ~MSECCFG_RLB; break; @@ -643,6 +668,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr) hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; int i; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; /* * If PMP is not supported or there are no PMP rules, the TLB page will not @@ -653,7 +679,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr) return TARGET_PAGE_SIZE; } - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) { continue; } diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c index 0408f96..a68809e 100644 --- a/target/riscv/pmu.c +++ b/target/riscv/pmu.c @@ -22,7 +22,7 @@ #include "qemu/timer.h" #include "cpu.h" #include "pmu.h" -#include "system/cpu-timers.h" +#include "exec/icount.h" #include "system/device_tree.h" #define RISCV_TIMEBASE_FREQ 1000000000 /* 1Ghz */ diff --git a/target/riscv/riscv-qmp-cmds.c b/target/riscv/riscv-qmp-cmds.c index d0a3243..8a1856c 100644 --- a/target/riscv/riscv-qmp-cmds.c +++ b/target/riscv/riscv-qmp-cmds.c @@ -25,7 +25,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qapi-commands-machine-target.h" +#include "qapi/qapi-commands-machine.h" #include "qobject/qbool.h" #include "qobject/qdict.h" #include "qapi/qobject-input-visitor.h" @@ -121,7 +121,7 @@ static void riscv_obj_add_profiles_qdict(Object *obj, QDict *qdict_out) for (int i = 0; riscv_profiles[i] != NULL; i++) { profile = riscv_profiles[i]; - value = QOBJECT(qbool_from_bool(profile->enabled)); + value = QOBJECT(qbool_from_bool(profile->present)); qdict_put_obj(qdict_out, profile->name, value); } diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c index 5aef9ee..78fb279 100644 --- a/target/riscv/tcg/tcg-cpu.c +++ b/target/riscv/tcg/tcg-cpu.c @@ -18,10 +18,10 @@ */ #include "qemu/osdep.h" -#include "exec/exec-all.h" #include "exec/translation-block.h" #include "tcg-cpu.h" #include "cpu.h" +#include "exec/target_page.h" #include "internals.h" #include "pmu.h" #include "time_helper.h" @@ -35,6 +35,8 @@ #include "tcg/tcg.h" #ifndef CONFIG_USER_ONLY #include "hw/boards.h" +#include "system/tcg.h" +#include "exec/icount.h" #endif /* Hash that stores user set extensions */ @@ -91,6 +93,108 @@ static const char *cpu_priv_ver_to_str(int priv_ver) return priv_spec_str; } +static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return riscv_env_mmu_index(cpu_env(cs), ifetch); +} + +static TCGTBCPUState riscv_get_tb_cpu_state(CPUState *cs) +{ + CPURISCVState *env = cpu_env(cs); + RISCVCPU *cpu = env_archcpu(env); + RISCVExtStatus fs, vs; + uint32_t flags = 0; + bool pm_signext = riscv_cpu_virt_mem_enabled(env); + + if (cpu->cfg.ext_zve32x) { + /* + * If env->vl equals to VLMAX, we can use generic vector operation + * expanders (GVEC) to accerlate the vector operations. + * However, as LMUL could be a fractional number. The maximum + * vector size can be operated might be less than 8 bytes, + * which is not supported by GVEC. So we set vl_eq_vlmax flag to true + * only when maxsz >= 8 bytes. + */ + + /* lmul encoded as in DisasContext::lmul */ + int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3); + uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW); + uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul); + uint32_t maxsz = vlmax << vsew; + bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) && + (maxsz >= 8); + flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill); + flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew); + flags = FIELD_DP32(flags, TB_FLAGS, LMUL, + FIELD_EX64(env->vtype, VTYPE, VLMUL)); + flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); + flags = FIELD_DP32(flags, TB_FLAGS, VTA, + FIELD_EX64(env->vtype, VTYPE, VTA)); + flags = FIELD_DP32(flags, TB_FLAGS, VMA, + FIELD_EX64(env->vtype, VTYPE, VMA)); + flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0); + } else { + flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); + } + + if (cpu_get_fcfien(env)) { + /* + * For Forward CFI, only the expectation of a lpad at + * the start of the block is tracked via env->elp. env->elp + * is turned on during jalr translation. + */ + flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED, env->elp); + flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1); + } + + if (cpu_get_bcfien(env)) { + flags = FIELD_DP32(flags, TB_FLAGS, BCFI_ENABLED, 1); + } + +#ifdef CONFIG_USER_ONLY + fs = EXT_STATUS_DIRTY; + vs = EXT_STATUS_DIRTY; +#else + flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv); + + flags |= riscv_env_mmu_index(env, 0); + fs = get_field(env->mstatus, MSTATUS_FS); + vs = get_field(env->mstatus, MSTATUS_VS); + + if (env->virt_enabled) { + flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1); + /* + * Merge DISABLED and !DIRTY states using MIN. + * We will set both fields when dirtying. + */ + fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS)); + vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS)); + } + + /* With Zfinx, floating point is enabled/disabled by Smstateen. */ + if (!riscv_has_ext(env, RVF)) { + fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE) + ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED; + } + + if (cpu->cfg.debug && !icount_enabled()) { + flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); + } +#endif + + flags = FIELD_DP32(flags, TB_FLAGS, FS, fs); + flags = FIELD_DP32(flags, TB_FLAGS, VS, vs); + flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl); + flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env)); + flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env)); + flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext); + + return (TCGTBCPUState){ + .pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc, + .flags = flags + }; +} + static void riscv_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -133,16 +237,48 @@ static void riscv_restore_state_to_opc(CPUState *cs, env->excp_uw2 = data[2]; } -static const TCGCPUOps riscv_tcg_ops = { +#ifndef CONFIG_USER_ONLY +static vaddr riscv_pointer_wrap(CPUState *cs, int mmu_idx, + vaddr result, vaddr base) +{ + CPURISCVState *env = cpu_env(cs); + uint32_t pm_len; + bool pm_signext; + + if (cpu_address_xl(env) == MXL_RV32) { + return (uint32_t)result; + } + + pm_len = riscv_pm_get_pmlen(riscv_pm_get_pmm(env)); + if (pm_len == 0) { + return result; + } + + pm_signext = riscv_cpu_virt_mem_enabled(env); + if (pm_signext) { + return sextract64(result, 0, 64 - pm_len); + } + return extract64(result, 0, 64 - pm_len); +} +#endif + +const TCGCPUOps riscv_tcg_ops = { + .mttcg_supported = true, + .guest_default_memory_order = 0, + .initialize = riscv_translate_init, .translate_code = riscv_translate_code, + .get_tb_cpu_state = riscv_get_tb_cpu_state, .synchronize_from_tb = riscv_cpu_synchronize_from_tb, .restore_state_to_opc = riscv_restore_state_to_opc, + .mmu_index = riscv_cpu_mmu_index, #ifndef CONFIG_USER_ONLY .tlb_fill = riscv_cpu_tlb_fill, + .pointer_wrap = riscv_pointer_wrap, .cpu_exec_interrupt = riscv_cpu_exec_interrupt, .cpu_exec_halt = riscv_cpu_has_work, + .cpu_exec_reset = cpu_reset, .do_interrupt = riscv_cpu_do_interrupt, .do_transaction_failed = riscv_cpu_do_transaction_failed, .do_unaligned_access = riscv_cpu_do_unaligned_access, @@ -315,6 +451,15 @@ static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) continue; } + /* + * cpu.debug = true is marked as 'sdtrig', priv spec 1.12. + * Skip this warning since existing CPUs with older priv + * spec and debug = true will be impacted. + */ + if (!strcmp(edata->name, "sdtrig")) { + continue; + } + isa_ext_update_enabled(cpu, edata->ext_enable_offset, false); /* @@ -581,7 +726,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } - if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { + if (mcc->def->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { error_setg(errp, "Zcf extension is only relevant to RV32"); return; } @@ -678,7 +823,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } - if (mcc->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) { + if (mcc->def->misa_mxl_max == MXL_RV32 && cpu->cfg.ext_svukte) { error_setg(errp, "svukte is not supported for RV32"); return; } @@ -694,6 +839,12 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) cpu->cfg.ext_ssctr = false; } + if (cpu->cfg.ext_svrsw60t59b && + (!cpu->cfg.mmu || mcc->def->misa_mxl_max == MXL_RV32)) { + error_setg(errp, "svrsw60t59b is not supported on RV32 and MMU-less platforms"); + return; + } + /* * Disable isa extensions based on priv spec after we * validated and set everything we need. @@ -706,8 +857,9 @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, RISCVCPUProfile *profile, bool send_warn) { - int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); + int satp_max = cpu->cfg.max_satp_mode; + assert(satp_max >= 0); if (profile->satp_mode > satp_max) { if (send_warn) { bool is_32bit = riscv_cpu_is_32bit(cpu); @@ -730,16 +882,11 @@ static void riscv_cpu_check_parent_profile(RISCVCPU *cpu, RISCVCPUProfile *profile, RISCVCPUProfile *parent) { - const char *parent_name; - bool parent_enabled; - - if (!profile->enabled || !parent) { + if (!profile->present || !parent) { return; } - parent_name = parent->name; - parent_enabled = object_property_get_bool(OBJECT(cpu), parent_name, NULL); - profile->enabled = parent_enabled; + profile->present = parent->present; } static void riscv_cpu_validate_profile(RISCVCPU *cpu, @@ -800,7 +947,7 @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu, } } - profile->enabled = profile_impl; + profile->present = profile_impl; riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent); riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent); @@ -915,7 +1062,7 @@ static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true); cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true); - if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { + if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); } } @@ -924,7 +1071,7 @@ static void cpu_enable_zc_implied_rules(RISCVCPU *cpu) if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true); - if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) { + if (riscv_has_ext(env, RVF) && mcc->def->misa_mxl_max == MXL_RV32) { cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true); } @@ -1029,6 +1176,70 @@ static bool riscv_cpu_is_generic(Object *cpu_obj) return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; } +static void riscv_cpu_set_profile(RISCVCPU *cpu, + RISCVCPUProfile *profile, + bool enabled) +{ + int i, ext_offset; + + if (profile->u_parent != NULL) { + riscv_cpu_set_profile(cpu, profile->u_parent, enabled); + } + + if (profile->s_parent != NULL) { + riscv_cpu_set_profile(cpu, profile->s_parent, enabled); + } + + profile->enabled = enabled; + + if (profile->enabled) { + cpu->env.priv_ver = profile->priv_spec; + +#ifndef CONFIG_USER_ONLY + if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { + object_property_set_bool(OBJECT(cpu), "mmu", true, NULL); + const char *satp_prop = satp_mode_str(profile->satp_mode, + riscv_cpu_is_32bit(cpu)); + object_property_set_bool(OBJECT(cpu), satp_prop, true, NULL); + } +#endif + } + + for (i = 0; misa_bits[i] != 0; i++) { + uint32_t bit = misa_bits[i]; + + if (!(profile->misa_ext & bit)) { + continue; + } + + if (bit == RVI && !profile->enabled) { + /* + * Disabling profiles will not disable the base + * ISA RV64I. + */ + continue; + } + + cpu_misa_ext_add_user_opt(bit, profile->enabled); + riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); + } + + for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { + ext_offset = profile->ext_offsets[i]; + + if (profile->enabled) { + if (cpu_cfg_offset_is_named_feat(ext_offset)) { + riscv_cpu_enable_named_feat(cpu, ext_offset); + } + + cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); + } + + cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); + isa_ext_update_enabled(cpu, ext_offset, profile->enabled); + } +} + /* * We'll get here via the following path: * @@ -1039,7 +1250,6 @@ static bool riscv_cpu_is_generic(Object *cpu_obj) static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) { RISCVCPU *cpu = RISCV_CPU(cs); - RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); if (!riscv_cpu_tcg_compatible(cpu)) { g_autofree char *name = riscv_cpu_get_name(cpu); @@ -1048,7 +1258,10 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) return false; } - if (mcc->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) { +#ifndef CONFIG_USER_ONLY + RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); + + if (mcc->def->misa_mxl_max >= MXL_RV128 && qemu_tcg_mttcg_enabled()) { /* Missing 128-bit aligned atomics */ error_setg(errp, "128-bit RISC-V currently does not work with Multi " @@ -1056,7 +1269,6 @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp) return false; } -#ifndef CONFIG_USER_ONLY CPURISCVState *env = &cpu->env; tcg_cflags_set(CPU(cs), CF_PCREL); @@ -1194,7 +1406,6 @@ static void cpu_set_profile(Object *obj, Visitor *v, const char *name, RISCVCPUProfile *profile = opaque; RISCVCPU *cpu = RISCV_CPU(obj); bool value; - int i, ext_offset; if (riscv_cpu_is_vendor(obj)) { error_setg(errp, "Profile %s is not available for vendor CPUs", @@ -1213,64 +1424,8 @@ static void cpu_set_profile(Object *obj, Visitor *v, const char *name, } profile->user_set = true; - profile->enabled = value; - - if (profile->u_parent != NULL) { - object_property_set_bool(obj, profile->u_parent->name, - profile->enabled, NULL); - } - if (profile->s_parent != NULL) { - object_property_set_bool(obj, profile->s_parent->name, - profile->enabled, NULL); - } - - if (profile->enabled) { - cpu->env.priv_ver = profile->priv_spec; - } - -#ifndef CONFIG_USER_ONLY - if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { - object_property_set_bool(obj, "mmu", true, NULL); - const char *satp_prop = satp_mode_str(profile->satp_mode, - riscv_cpu_is_32bit(cpu)); - object_property_set_bool(obj, satp_prop, profile->enabled, NULL); - } -#endif - - for (i = 0; misa_bits[i] != 0; i++) { - uint32_t bit = misa_bits[i]; - - if (!(profile->misa_ext & bit)) { - continue; - } - - if (bit == RVI && !profile->enabled) { - /* - * Disabling profiles will not disable the base - * ISA RV64I. - */ - continue; - } - - cpu_misa_ext_add_user_opt(bit, profile->enabled); - riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); - } - - for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { - ext_offset = profile->ext_offsets[i]; - - if (profile->enabled) { - if (cpu_cfg_offset_is_named_feat(ext_offset)) { - riscv_cpu_enable_named_feat(cpu, ext_offset); - } - - cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); - } - - cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); - isa_ext_update_enabled(cpu, ext_offset, profile->enabled); - } + riscv_cpu_set_profile(cpu, profile, value); } static void cpu_get_profile(Object *obj, Visitor *v, const char *name, @@ -1285,7 +1440,7 @@ static void cpu_get_profile(Object *obj, Visitor *v, const char *name, static void riscv_cpu_add_profiles(Object *cpu_obj) { for (int i = 0; riscv_profiles[i] != NULL; i++) { - const RISCVCPUProfile *profile = riscv_profiles[i]; + RISCVCPUProfile *profile = riscv_profiles[i]; object_property_add(cpu_obj, profile->name, "bool", cpu_get_profile, cpu_set_profile, @@ -1297,30 +1452,11 @@ static void riscv_cpu_add_profiles(Object *cpu_obj) * case. */ if (profile->enabled) { - object_property_set_bool(cpu_obj, profile->name, true, NULL); + riscv_cpu_set_profile(RISCV_CPU(cpu_obj), profile, true); } } } -static bool cpu_ext_is_deprecated(const char *ext_name) -{ - return isupper(ext_name[0]); -} - -/* - * String will be allocated in the heap. Caller is responsible - * for freeing it. - */ -static char *cpu_ext_to_lower(const char *ext_name) -{ - char *ret = g_malloc0(strlen(ext_name) + 1); - - strcpy(ret, ext_name); - ret[0] = tolower(ret[0]); - - return ret; -} - static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { @@ -1333,13 +1469,6 @@ static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, return; } - if (cpu_ext_is_deprecated(multi_ext_cfg->name)) { - g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name); - - warn_report("CPU property '%s' is deprecated. Please use '%s' instead", - multi_ext_cfg->name, lower); - } - cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); @@ -1375,14 +1504,13 @@ static void cpu_add_multi_ext_prop(Object *cpu_obj, const RISCVCPUMultiExtConfig *multi_cfg) { bool generic_cpu = riscv_cpu_is_generic(cpu_obj); - bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name); object_property_add(cpu_obj, multi_cfg->name, "bool", cpu_get_multi_ext_cfg, cpu_set_multi_ext_cfg, NULL, (void *)multi_cfg); - if (!generic_cpu || deprecated_ext) { + if (!generic_cpu) { return; } @@ -1425,8 +1553,6 @@ static void riscv_cpu_add_user_properties(Object *obj) riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts); riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts); - riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); - riscv_cpu_add_profiles(obj); } @@ -1468,6 +1594,8 @@ static void riscv_init_max_cpu_extensions(Object *obj) if (env->misa_mxl != MXL_RV32) { isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false); + } else { + isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_svrsw60t59b), false); } /* @@ -1516,24 +1644,10 @@ static void riscv_tcg_cpu_instance_init(CPUState *cs) } } -static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc) -{ - /* - * All cpus use the same set of operations. - */ - cc->tcg_ops = &riscv_tcg_ops; -} - -static void riscv_tcg_cpu_class_init(CPUClass *cc) -{ - cc->init_accel_cpu = riscv_tcg_cpu_init_ops; -} - -static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data) +static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, const void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); - acc->cpu_class_init = riscv_tcg_cpu_class_init; acc->cpu_instance_init = riscv_tcg_cpu_instance_init; acc->cpu_target_realize = riscv_tcg_cpu_realize; } diff --git a/target/riscv/tcg/tcg-cpu.h b/target/riscv/tcg/tcg-cpu.h index ce94253..a23716a 100644 --- a/target/riscv/tcg/tcg-cpu.h +++ b/target/riscv/tcg/tcg-cpu.h @@ -26,6 +26,8 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp); void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp); bool riscv_cpu_tcg_compatible(RISCVCPU *cpu); +extern const TCGCPUOps riscv_tcg_ops; + struct DisasContext; struct RISCVCPUConfig; typedef struct RISCVDecoder { diff --git a/target/riscv/th_csr.c b/target/riscv/th_csr.c index 6c970d4..49eb7bb 100644 --- a/target/riscv/th_csr.c +++ b/target/riscv/th_csr.c @@ -27,12 +27,6 @@ #define TH_SXSTATUS_MAEE BIT(21) #define TH_SXSTATUS_THEADISAEE BIT(22) -typedef struct { - int csrno; - int (*insertion_test)(RISCVCPU *cpu); - riscv_csr_operations csr_ops; -} riscv_csr; - static RISCVException smode(CPURISCVState *env, int csrno) { if (riscv_has_ext(env, RVS)) { @@ -42,13 +36,9 @@ static RISCVException smode(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } -static int test_thead_mvendorid(RISCVCPU *cpu) +static bool test_thead_mvendorid(RISCVCPU *cpu) { - if (cpu->cfg.mvendorid != THEAD_VENDOR_ID) { - return -1; - } - - return 0; + return cpu->cfg.mvendorid == THEAD_VENDOR_ID; } static RISCVException read_th_sxstatus(CPURISCVState *env, int csrno, @@ -59,21 +49,11 @@ static RISCVException read_th_sxstatus(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static riscv_csr th_csr_list[] = { +const RISCVCSR th_csr_list[] = { { .csrno = CSR_TH_SXSTATUS, .insertion_test = test_thead_mvendorid, .csr_ops = { "th.sxstatus", smode, read_th_sxstatus } - } + }, + { } }; - -void th_register_custom_csrs(RISCVCPU *cpu) -{ - for (size_t i = 0; i < ARRAY_SIZE(th_csr_list); i++) { - int csrno = th_csr_list[i].csrno; - riscv_csr_operations *csr_ops = &th_csr_list[i].csr_ops; - if (!th_csr_list[i].insertion_test(cpu)) { - riscv_set_csr_ops(csrno, csr_ops); - } - } -} diff --git a/target/riscv/time_helper.c b/target/riscv/time_helper.c index bc0d9a0..400e917 100644 --- a/target/riscv/time_helper.c +++ b/target/riscv/time_helper.c @@ -46,8 +46,23 @@ void riscv_timer_write_timecmp(CPURISCVState *env, QEMUTimer *timer, { uint64_t diff, ns_diff, next; RISCVAclintMTimerState *mtimer = env->rdtime_fn_arg; - uint32_t timebase_freq = mtimer->timebase_freq; - uint64_t rtc_r = env->rdtime_fn(env->rdtime_fn_arg) + delta; + uint32_t timebase_freq; + uint64_t rtc_r; + + if (!riscv_cpu_cfg(env)->ext_sstc || !env->rdtime_fn || + !env->rdtime_fn_arg || !get_field(env->menvcfg, MENVCFG_STCE)) { + /* S/VS Timer IRQ depends on sstc extension, rdtime_fn(), and STCE. */ + return; + } + + if (timer_irq == MIP_VSTIP && + (!riscv_has_ext(env, RVH) || !get_field(env->henvcfg, HENVCFG_STCE))) { + /* VS Timer IRQ also depends on RVH and henvcfg.STCE. */ + return; + } + + timebase_freq = mtimer->timebase_freq; + rtc_r = env->rdtime_fn(env->rdtime_fn_arg) + delta; if (timecmp <= rtc_r) { /* @@ -125,6 +140,52 @@ void riscv_timer_write_timecmp(CPURISCVState *env, QEMUTimer *timer, timer_mod(timer, next); } +/* + * When disabling xenvcfg.STCE, the S/VS Timer may be disabled at the same time. + * It is safe to call this function regardless of whether the timer has been + * deleted or not. timer_del() will do nothing if the timer has already + * been deleted. + */ +static void riscv_timer_disable_timecmp(CPURISCVState *env, QEMUTimer *timer, + uint32_t timer_irq) +{ + /* Disable S-mode Timer IRQ and HW-based STIP */ + if ((timer_irq == MIP_STIP) && !get_field(env->menvcfg, MENVCFG_STCE)) { + riscv_cpu_update_mip(env, timer_irq, BOOL_TO_MASK(0)); + timer_del(timer); + return; + } + + /* Disable VS-mode Timer IRQ and HW-based VSTIP */ + if ((timer_irq == MIP_VSTIP) && + (!get_field(env->menvcfg, MENVCFG_STCE) || + !get_field(env->henvcfg, HENVCFG_STCE))) { + env->vstime_irq = 0; + riscv_cpu_update_mip(env, 0, BOOL_TO_MASK(0)); + timer_del(timer); + return; + } +} + +/* Enable or disable S/VS-mode Timer when xenvcfg.STCE is changed */ +void riscv_timer_stce_changed(CPURISCVState *env, bool is_m_mode, bool enable) +{ + if (enable) { + riscv_timer_write_timecmp(env, env->vstimer, env->vstimecmp, + env->htimedelta, MIP_VSTIP); + } else { + riscv_timer_disable_timecmp(env, env->vstimer, MIP_VSTIP); + } + + if (is_m_mode) { + if (enable) { + riscv_timer_write_timecmp(env, env->stimer, env->stimecmp, 0, MIP_STIP); + } else { + riscv_timer_disable_timecmp(env, env->stimer, MIP_STIP); + } + } +} + void riscv_timer_init(RISCVCPU *cpu) { CPURISCVState *env; diff --git a/target/riscv/time_helper.h b/target/riscv/time_helper.h index cacd79b..af1f634 100644 --- a/target/riscv/time_helper.h +++ b/target/riscv/time_helper.h @@ -25,6 +25,7 @@ void riscv_timer_write_timecmp(CPURISCVState *env, QEMUTimer *timer, uint64_t timecmp, uint64_t delta, uint32_t timer_irq); +void riscv_timer_stce_changed(CPURISCVState *env, bool is_m_mode, bool enable); void riscv_timer_init(RISCVCPU *cpu); #endif diff --git a/target/riscv/translate.c b/target/riscv/translate.c index d6651f2..9ddef2d 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -20,10 +20,9 @@ #include "qemu/log.h" #include "cpu.h" #include "tcg/tcg-op.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" - +#include "exec/target_page.h" #include "exec/translator.h" #include "exec/translation-block.h" #include "exec/log.h" @@ -1210,11 +1209,6 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) /* The specification allows for longer insns, but not supported by qemu. */ #define MAX_INSN_LEN 4 -static inline int insn_len(uint16_t first_word) -{ - return (first_word & 3) == 3 ? 4 : 2; -} - const RISCVDecoder decoder_table[] = { { always_true_p, decode_insn32 }, { has_xthead_p, decode_xthead}, @@ -1223,13 +1217,35 @@ const RISCVDecoder decoder_table[] = { const size_t decoder_table_size = ARRAY_SIZE(decoder_table); -static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) +static void decode_opc(CPURISCVState *env, DisasContext *ctx) { + uint32_t opcode; + bool pc_is_4byte_align = ((ctx->base.pc_next % 4) == 0); + ctx->virt_inst_excp = false; - ctx->cur_insn_len = insn_len(opcode); + if (pc_is_4byte_align) { + /* + * Load 4 bytes at once to make instruction fetch atomically. + * + * Note: When pc is 4-byte aligned, 4-byte instruction wouldn't be + * across pages. We could preload 4 bytes instruction no matter + * real one is 2 or 4 bytes. Instruction preload wouldn't trigger + * additional page fault. + */ + opcode = translator_ldl(env, &ctx->base, ctx->base.pc_next); + } else { + /* + * For unaligned pc, instruction preload may trigger additional + * page fault so we only load 2 bytes here. + */ + opcode = (uint32_t) translator_lduw(env, &ctx->base, ctx->base.pc_next); + } + ctx->ol = ctx->xl; + + ctx->cur_insn_len = insn_len((uint16_t)opcode); /* Check for compressed insn */ if (ctx->cur_insn_len == 2) { - ctx->opcode = opcode; + ctx->opcode = (uint16_t)opcode; /* * The Zca extension is added as way to refer to instructions in the C * extension that do not include the floating-point loads and stores @@ -1239,15 +1255,17 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) return; } } else { - uint32_t opcode32 = opcode; - opcode32 = deposit32(opcode32, 16, 16, - translator_lduw(env, &ctx->base, - ctx->base.pc_next + 2)); - ctx->opcode = opcode32; + if (!pc_is_4byte_align) { + /* Load last 2 bytes of instruction here */ + opcode = deposit32(opcode, 16, 16, + translator_lduw(env, &ctx->base, + ctx->base.pc_next + 2)); + } + ctx->opcode = opcode; for (guint i = 0; i < ctx->decoders->len; ++i) { riscv_cpu_decode_fn func = g_ptr_array_index(ctx->decoders, i); - if (func(ctx, opcode32)) { + if (func(ctx, opcode)) { return; } } @@ -1282,7 +1300,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->cfg_vta_all_1s = cpu->cfg.rvv_ta_all_1s; ctx->vstart_eq_zero = FIELD_EX32(tb_flags, TB_FLAGS, VSTART_EQ_ZERO); ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX); - ctx->misa_mxl_max = mcc->misa_mxl_max; + ctx->misa_mxl_max = mcc->def->misa_mxl_max; ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL); ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL); ctx->cs = cs; @@ -1325,10 +1343,8 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPURISCVState *env = cpu_env(cpu); - uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next); - ctx->ol = ctx->xl; - decode_opc(env, ctx, opcode16); + decode_opc(env, ctx); ctx->base.pc_next += ctx->cur_insn_len; /* diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c index 1526de9..9a0d9b4 100644 --- a/target/riscv/vcrypto_helper.c +++ b/target/riscv/vcrypto_helper.c @@ -26,7 +26,6 @@ #include "crypto/aes-round.h" #include "crypto/sm4.h" #include "exec/memop.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" #include "internals.h" #include "vector_internals.h" diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 67b3baf..b41c29d 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -21,10 +21,13 @@ #include "qemu/bitops.h" #include "cpu.h" #include "exec/memop.h" -#include "exec/exec-all.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/probe.h" #include "exec/page-protection.h" #include "exec/helper-proto.h" +#include "exec/tlb-flags.h" +#include "exec/target_page.h" +#include "exec/tswap.h" #include "fpu/softfloat.h" #include "tcg/tcg-gvec-desc.h" #include "internals.h" @@ -32,7 +35,7 @@ #include <math.h> target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, - target_ulong s2) + target_ulong s2, target_ulong x0) { int vlmax, vl; RISCVCPU *cpu = env_archcpu(env); @@ -80,6 +83,16 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, } else { vl = vlmax; } + + if (cpu->cfg.rvv_vsetvl_x0_vill && x0 && (env->vl != vl)) { + /* only set vill bit. */ + env->vill = 1; + env->vtype = 0; + env->vl = 0; + env->vstart = 0; + return 0; + } + env->vl = vl; env->vtype = s2; env->vstart = 0; @@ -114,25 +127,42 @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz) * It will trigger an exception if there is no mapping in TLB * and page table walk can't fill the TLB entry. Then the guest * software can return here after process the exception or never return. + * + * This function can also be used when direct access to probe_access_flags is + * needed in order to access the flags. If a pointer to a flags operand is + * provided the function will call probe_access_flags instead, use nonfault + * and update host and flags. */ -static void probe_pages(CPURISCVState *env, target_ulong addr, - target_ulong len, uintptr_t ra, - MMUAccessType access_type) +static void probe_pages(CPURISCVState *env, target_ulong addr, target_ulong len, + uintptr_t ra, MMUAccessType access_type, int mmu_index, + void **host, int *flags, bool nonfault) { target_ulong pagelen = -(addr | TARGET_PAGE_MASK); target_ulong curlen = MIN(pagelen, len); - int mmu_index = riscv_env_mmu_index(env, false); - probe_access(env, adjust_addr(env, addr), curlen, access_type, - mmu_index, ra); + if (flags != NULL) { + *flags = probe_access_flags(env, adjust_addr(env, addr), curlen, + access_type, mmu_index, nonfault, host, ra); + } else { + probe_access(env, adjust_addr(env, addr), curlen, access_type, + mmu_index, ra); + } + if (len > curlen) { addr += curlen; curlen = len - curlen; - probe_access(env, adjust_addr(env, addr), curlen, access_type, - mmu_index, ra); + if (flags != NULL) { + *flags = probe_access_flags(env, adjust_addr(env, addr), curlen, + access_type, mmu_index, nonfault, + host, ra); + } else { + probe_access(env, adjust_addr(env, addr), curlen, access_type, + mmu_index, ra); + } } } + static inline void vext_set_elem_mask(void *v0, int index, uint8_t value) { @@ -332,8 +362,8 @@ vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr, MMUAccessType access_type = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE; /* Check page permission/pmp/watchpoint/etc. */ - flags = probe_access_flags(env, adjust_addr(env, addr), size, access_type, - mmu_index, true, &host, ra); + probe_pages(env, addr, size, ra, access_type, mmu_index, &host, &flags, + true); if (flags == 0) { if (nf == 1) { @@ -632,7 +662,7 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env, uint32_t vma = vext_vma(desc); target_ulong addr, addr_probe, addr_i, offset, remain, page_split, elems; int mmu_index = riscv_env_mmu_index(env, false); - int flags; + int flags, probe_flags; void *host; VSTART_CHECK_EARLY_EXIT(env, env->vl); @@ -646,15 +676,15 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env, } /* Check page permission/pmp/watchpoint/etc. */ - flags = probe_access_flags(env, adjust_addr(env, addr), elems * msize, - MMU_DATA_LOAD, mmu_index, true, &host, ra); + probe_pages(env, addr, elems * msize, ra, MMU_DATA_LOAD, mmu_index, &host, + &flags, true); /* If we are crossing a page check also the second page. */ if (env->vl > elems) { addr_probe = addr + (elems << log2_esz); - flags |= probe_access_flags(env, adjust_addr(env, addr_probe), - elems * msize, MMU_DATA_LOAD, mmu_index, - true, &host, ra); + probe_pages(env, addr_probe, elems * msize, ra, MMU_DATA_LOAD, + mmu_index, &host, &probe_flags, true); + flags |= probe_flags; } if (flags & ~TLB_WATCHPOINT) { @@ -666,16 +696,16 @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env, addr_i = adjust_addr(env, base + i * (nf << log2_esz)); if (i == 0) { /* Allow fault on first element. */ - probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD); + probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD, + mmu_index, &host, NULL, false); } else { remain = nf << log2_esz; while (remain > 0) { offset = -(addr_i | TARGET_PAGE_MASK); /* Probe nonfault on subsequent elements. */ - flags = probe_access_flags(env, addr_i, offset, - MMU_DATA_LOAD, mmu_index, true, - &host, 0); + probe_pages(env, addr_i, offset, 0, MMU_DATA_LOAD, + mmu_index, &host, &flags, true); /* * Stop if invalid (unmapped) or mmio (transaction may @@ -5113,9 +5143,11 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ } \ \ for (i = i_max; i < vl; ++i) { \ - if (vm || vext_elem_mask(v0, i)) { \ - *((ETYPE *)vd + H(i)) = 0; \ + if (!vm && !vext_elem_mask(v0, i)) { \ + vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \ + continue; \ } \ + *((ETYPE *)vd + H(i)) = 0; \ } \ \ env->vstart = 0; \ diff --git a/target/riscv/zce_helper.c b/target/riscv/zce_helper.c index b433bda..55221f5 100644 --- a/target/riscv/zce_helper.c +++ b/target/riscv/zce_helper.c @@ -18,9 +18,8 @@ #include "qemu/osdep.h" #include "cpu.h" -#include "exec/exec-all.h" #include "exec/helper-proto.h" -#include "exec/cpu_ldst.h" +#include "accel/tcg/cpu-ldst.h" target_ulong HELPER(cm_jalt)(CPURISCVState *env, uint32_t index) { |