diff options
Diffstat (limited to 'target')
-rw-r--r-- | target/riscv/cpu.c | 2 | ||||
-rw-r--r-- | target/riscv/cpu.h | 3 | ||||
-rw-r--r-- | target/riscv/csr.c | 5 | ||||
-rw-r--r-- | target/riscv/helper.h | 21 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_rvv.c.inc | 50 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_rvzce.c.inc | 6 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_rvzicfiss.c.inc | 9 | ||||
-rw-r--r-- | target/riscv/kvm/kvm-cpu.c | 2 | ||||
-rw-r--r-- | target/riscv/machine.c | 28 | ||||
-rw-r--r-- | target/riscv/op_helper.c | 49 | ||||
-rw-r--r-- | target/riscv/riscv-qmp-cmds.c | 148 | ||||
-rw-r--r-- | target/riscv/tcg/tcg-cpu.c | 21 | ||||
-rw-r--r-- | target/riscv/translate.c | 3 | ||||
-rw-r--r-- | target/riscv/vector_helper.c | 20 |
14 files changed, 335 insertions, 32 deletions
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index d055ddf..a877018 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -604,7 +604,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) } } } - if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { + if (riscv_cpu_cfg(env)->ext_zve32x && (flags & CPU_DUMP_VPU)) { static const int dump_rvv_csrs[] = { CSR_VSTART, CSR_VXSAT, diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 4a862da..2c22664 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -592,6 +592,7 @@ static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) extern const char * const riscv_int_regnames[]; extern const char * const riscv_int_regnamesh[]; extern const char * const riscv_fpr_regnames[]; +extern const char * const riscv_rvv_regnames[]; const char *riscv_cpu_get_trap_name(target_ulong cause, bool async); int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, @@ -873,7 +874,7 @@ static inline void riscv_csr_write(CPURISCVState *env, int csrno, static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno) { target_ulong val = 0; - riscv_csrrw(env, csrno, &val, 0, 0, 0); + riscv_csrr(env, csrno, &val); return val; } diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 3c8989f..5c91658 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -203,6 +203,8 @@ static RISCVException cfi_ss(CPURISCVState *env, int csrno) #if !defined(CONFIG_USER_ONLY) if (env->debugger) { return RISCV_EXCP_NONE; + } else if (env->virt_enabled) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } #endif return RISCV_EXCP_ILLEGAL_INST; @@ -2003,7 +2005,8 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, if (riscv_has_ext(env, RVF)) { mask |= MSTATUS_FS; } - if (riscv_has_ext(env, RVV)) { + + if (riscv_cpu_cfg(env)->ext_zve32x) { mask |= MSTATUS_VS; } diff --git a/target/riscv/helper.h b/target/riscv/helper.h index f712b1c..b785456 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -1101,14 +1101,14 @@ DEF_HELPER_6(vslidedown_vx_b, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vslidedown_vx_h, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vslidedown_vx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vslidedown_vx_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1up_vx_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1up_vx_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1up_vx_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1up_vx_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1down_vx_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslide1up_vx_b, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1up_vx_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1up_vx_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1up_vx_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1down_vx_b, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, i64, ptr, env, i32) DEF_HELPER_6(vfslide1up_vf_h, void, ptr, ptr, i64, ptr, env, i32) DEF_HELPER_6(vfslide1up_vf_w, void, ptr, ptr, i64, ptr, env, i32) @@ -1284,3 +1284,8 @@ DEF_HELPER_4(vgmul_vv, void, ptr, ptr, env, i32) DEF_HELPER_5(vsm4k_vi, void, ptr, ptr, i32, env, i32) DEF_HELPER_4(vsm4r_vv, void, ptr, ptr, env, i32) DEF_HELPER_4(vsm4r_vs, void, ptr, ptr, env, i32) + +/* CFI (zicfiss) helpers */ +#ifndef CONFIG_USER_ONLY +DEF_HELPER_1(ssamoswap_disabled, void, env) +#endif diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index 71f98fb..f4b5460 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -3561,7 +3561,6 @@ static bool slideup_check(DisasContext *s, arg_rmrr *a) } GEN_OPIVX_TRANS(vslideup_vx, slideup_check) -GEN_OPIVX_TRANS(vslide1up_vx, slideup_check) GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check) static bool slidedown_check(DisasContext *s, arg_rmrr *a) @@ -3572,9 +3571,56 @@ static bool slidedown_check(DisasContext *s, arg_rmrr *a) } GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check) -GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check) GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) +typedef void gen_helper_vslide1_vx(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr, + TCGv_env, TCGv_i32); + +#define GEN_OPIVX_VSLIDE1_TRANS(NAME, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (CHECK(s, a)) { \ + static gen_helper_vslide1_vx * const fns[4] = { \ + gen_helper_##NAME##_b, gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, gen_helper_##NAME##_d, \ + }; \ + \ + TCGv_ptr dest, src2, mask; \ + TCGv_i64 src1; \ + TCGv_i32 desc; \ + uint32_t data = 0; \ + \ + dest = tcg_temp_new_ptr(); \ + mask = tcg_temp_new_ptr(); \ + src2 = tcg_temp_new_ptr(); \ + src1 = tcg_temp_new_i64(); \ + \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data)); \ + \ + tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd)); \ + tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2)); \ + tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); \ + tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN)); \ + \ + fns[s->sew](dest, mask, src1, src2, tcg_env, desc); \ + \ + tcg_gen_movi_tl(cpu_vstart, 0); \ + finalize_rvv_inst(s); \ + \ + return true; \ + } \ + return false; \ +} + +GEN_OPIVX_VSLIDE1_TRANS(vslide1up_vx, slideup_check) +GEN_OPIVX_VSLIDE1_TRANS(vslide1down_vx, slidedown_check) + /* Vector Floating-Point Slide Instructions */ static bool fslideup_check(DisasContext *s, arg_rmrr *a) { diff --git a/target/riscv/insn_trans/trans_rvzce.c.inc b/target/riscv/insn_trans/trans_rvzce.c.inc index c77c2b9..dd15af0 100644 --- a/target/riscv/insn_trans/trans_rvzce.c.inc +++ b/target/riscv/insn_trans/trans_rvzce.c.inc @@ -88,13 +88,13 @@ static bool trans_c_lbu(DisasContext *ctx, arg_c_lbu *a) static bool trans_c_lhu(DisasContext *ctx, arg_c_lhu *a) { REQUIRE_ZCB(ctx); - return gen_load(ctx, a, MO_UW); + return gen_load(ctx, a, MO_TEUW); } static bool trans_c_lh(DisasContext *ctx, arg_c_lh *a) { REQUIRE_ZCB(ctx); - return gen_load(ctx, a, MO_SW); + return gen_load(ctx, a, MO_TESW); } static bool trans_c_sb(DisasContext *ctx, arg_c_sb *a) @@ -106,7 +106,7 @@ static bool trans_c_sb(DisasContext *ctx, arg_c_sb *a) static bool trans_c_sh(DisasContext *ctx, arg_c_sh *a) { REQUIRE_ZCB(ctx); - return gen_store(ctx, a, MO_UW); + return gen_store(ctx, a, MO_TEUW); } #define X_S0 8 diff --git a/target/riscv/insn_trans/trans_rvzicfiss.c.inc b/target/riscv/insn_trans/trans_rvzicfiss.c.inc index b0096ad..f4a1c12 100644 --- a/target/riscv/insn_trans/trans_rvzicfiss.c.inc +++ b/target/riscv/insn_trans/trans_rvzicfiss.c.inc @@ -40,6 +40,7 @@ static bool trans_sspopchk(DisasContext *ctx, arg_sspopchk *a) tcg_gen_brcond_tl(TCG_COND_EQ, data, rs1, skip); tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_BCFI_TVAL), tcg_env, offsetof(CPURISCVState, sw_check_code)); + gen_update_pc(ctx, 0); gen_helper_raise_exception(tcg_env, tcg_constant_i32(RISCV_EXCP_SW_CHECK)); gen_set_label(skip); @@ -90,7 +91,11 @@ static bool trans_ssamoswap_w(DisasContext *ctx, arg_amoswap_w *a) } if (!ctx->bcfi_enabled) { +#ifndef CONFIG_USER_ONLY + gen_helper_ssamoswap_disabled(tcg_env); +#else return false; +#endif } TCGv dest = dest_gpr(ctx, a->rd); @@ -115,7 +120,11 @@ static bool trans_ssamoswap_d(DisasContext *ctx, arg_amoswap_w *a) } if (!ctx->bcfi_enabled) { +#ifndef CONFIG_USER_ONLY + gen_helper_ssamoswap_disabled(tcg_env); +#else return false; +#endif } TCGv dest = dest_gpr(ctx, a->rd); diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 5c19062..187c2c9 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -1588,7 +1588,7 @@ static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run) * Handle the case where a 32 bit CPU is running in a * 64 bit addressing env. */ - if (riscv_cpu_mxl(&cpu->env) == MXL_RV32) { + if (riscv_cpu_is_32bit(cpu)) { addr |= (uint64_t)run->riscv_sbi.args[2] << 32; } diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 1600ec4..18d790a 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -131,7 +131,8 @@ static bool vector_needed(void *opaque) RISCVCPU *cpu = opaque; CPURISCVState *env = &cpu->env; - return riscv_has_ext(env, RVV); + return kvm_enabled() ? riscv_has_ext(env, RVV) : + riscv_cpu_cfg(env)->ext_zve32x; } static const VMStateDescription vmstate_vector = { @@ -400,6 +401,30 @@ static const VMStateDescription vmstate_ssp = { } }; +static bool sstc_timer_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + CPURISCVState *env = &cpu->env; + + if (!cpu->cfg.ext_sstc) { + return false; + } + + return env->stimer != NULL || env->vstimer != NULL; +} + +static const VMStateDescription vmstate_sstc = { + .name = "cpu/timer", + .version_id = 1, + .minimum_version_id = 1, + .needed = sstc_timer_needed, + .fields = (const VMStateField[]) { + VMSTATE_TIMER_PTR(env.stimer, RISCVCPU), + VMSTATE_TIMER_PTR(env.vstimer, RISCVCPU), + VMSTATE_END_OF_LIST() + } +}; + const VMStateDescription vmstate_riscv_cpu = { .name = "cpu", .version_id = 10, @@ -476,6 +501,7 @@ const VMStateDescription vmstate_riscv_cpu = { &vmstate_elp, &vmstate_ssp, &vmstate_ctr, + &vmstate_sstc, NULL } }; diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 110292e..8382aa9 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -717,4 +717,53 @@ target_ulong helper_hyp_hlvx_wu(CPURISCVState *env, target_ulong addr) return cpu_ldl_code_mmu(env, addr, oi, ra); } +void helper_ssamoswap_disabled(CPURISCVState *env) +{ + int exception = RISCV_EXCP_ILLEGAL_INST; + + /* + * Here we follow the RISC-V CFI spec [1] to implement the exception type + * of ssamoswap* instruction. + * + * [1] RISC-V CFI spec v1.0, ch2.7 Atomic Swap from a Shadow Stack Location + * + * Note: We have already checked some conditions in trans_* functions: + * 1. The effective priv mode is not M-mode. + * 2. The xSSE specific to the effictive priv mode is disabled. + */ + if (!get_field(env->menvcfg, MENVCFG_SSE)) { + /* + * Disabled M-mode SSE always trigger illegal instruction when + * current priv mode is not M-mode. + */ + exception = RISCV_EXCP_ILLEGAL_INST; + goto done; + } + + if (!riscv_has_ext(env, RVS)) { + /* S-mode is not implemented */ + exception = RISCV_EXCP_ILLEGAL_INST; + goto done; + } else if (env->virt_enabled) { + /* + * VU/VS-mode with disabled xSSE will trigger the virtual instruction + * exception. + */ + exception = RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + goto done; + } else { + /* + * U-mode with disabled S-mode SSE will trigger the illegal instruction + * exception. + * + * Note: S-mode is already handled in the disabled M-mode SSE case. + */ + exception = RISCV_EXCP_ILLEGAL_INST; + goto done; + } + +done: + riscv_raise_exception(env, exception, GETPC()); +} + #endif /* !CONFIG_USER_ONLY */ diff --git a/target/riscv/riscv-qmp-cmds.c b/target/riscv/riscv-qmp-cmds.c index 8a1856c..c499f9b 100644 --- a/target/riscv/riscv-qmp-cmds.c +++ b/target/riscv/riscv-qmp-cmds.c @@ -31,6 +31,10 @@ #include "qapi/qobject-input-visitor.h" #include "qapi/visitor.h" #include "qom/qom-qobject.h" +#include "qemu/ctype.h" +#include "qemu/qemu-print.h" +#include "monitor/hmp.h" +#include "monitor/hmp-target.h" #include "system/kvm.h" #include "system/tcg.h" #include "cpu-qom.h" @@ -240,3 +244,147 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, return expansion_info; } + +/* + * We have way too many potential CSRs and regs being added + * regularly to register them in a static array. + * + * Declare an empty array instead, making get_monitor_def() use + * the target_get_monitor_def() API directly. + */ +const MonitorDef monitor_defs[] = { { } }; +const MonitorDef *target_monitor_defs(void) +{ + return monitor_defs; +} + +static bool reg_is_ulong_integer(CPURISCVState *env, const char *name, + target_ulong *val, bool is_gprh) +{ + const char * const *reg_names; + target_ulong *vals; + + if (is_gprh) { + reg_names = riscv_int_regnamesh; + vals = env->gprh; + } else { + reg_names = riscv_int_regnames; + vals = env->gpr; + } + + for (int i = 0; i < 32; i++) { + g_autofree char *reg_name = g_strdup(reg_names[i]); + char *reg1 = strtok(reg_name, "/"); + char *reg2 = strtok(NULL, "/"); + + if (strcasecmp(reg1, name) == 0 || + (reg2 && strcasecmp(reg2, name) == 0)) { + *val = vals[i]; + return true; + } + } + + return false; +} + +static bool reg_is_u64_fpu(CPURISCVState *env, const char *name, uint64_t *val) +{ + if (qemu_tolower(name[0]) != 'f') { + return false; + } + + for (int i = 0; i < 32; i++) { + g_autofree char *reg_name = g_strdup(riscv_fpr_regnames[i]); + char *reg1 = strtok(reg_name, "/"); + char *reg2 = strtok(NULL, "/"); + + if (strcasecmp(reg1, name) == 0 || + (reg2 && strcasecmp(reg2, name) == 0)) { + *val = env->fpr[i]; + return true; + } + } + + return false; +} + +static bool reg_is_vreg(const char *name) +{ + if (qemu_tolower(name[0]) != 'v' || strlen(name) > 3) { + return false; + } + + for (int i = 0; i < 32; i++) { + if (strcasecmp(name, riscv_rvv_regnames[i]) == 0) { + return true; + } + } + + return false; +} + +int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval) +{ + CPURISCVState *env = &RISCV_CPU(cs)->env; + target_ulong val = 0; + uint64_t val64 = 0; + int i; + + if (reg_is_ulong_integer(env, name, &val, false) || + reg_is_ulong_integer(env, name, &val, true)) { + *pval = val; + return 0; + } + + if (reg_is_u64_fpu(env, name, &val64)) { + *pval = val64; + return 0; + } + + if (reg_is_vreg(name)) { + if (!riscv_cpu_cfg(env)->ext_zve32x) { + return -EINVAL; + } + + qemu_printf("Unable to print the value of vector " + "vreg '%s' from this API\n", name); + + /* + * We're returning 0 because returning -EINVAL triggers + * an 'unknown register' message in exp_unary() later, + * which feels ankward after our own error message. + */ + *pval = 0; + return 0; + } + + for (i = 0; i < ARRAY_SIZE(csr_ops); i++) { + RISCVException res; + int csrno = i; + + /* + * Early skip when possible since we're going + * through a lot of NULL entries. + */ + if (csr_ops[csrno].predicate == NULL) { + continue; + } + + if (strcasecmp(csr_ops[csrno].name, name) != 0) { + continue; + } + + res = riscv_csrrw_debug(env, csrno, &val, 0, 0); + + /* + * Rely on the smode, hmode, etc, predicates within csr.c + * to do the filtering of the registers that are present. + */ + if (res == RISCV_EXCP_NONE) { + *pval = val; + return 0; + } + } + + return -EINVAL; +} diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c index 143ab07..1150bd1 100644 --- a/target/riscv/tcg/tcg-cpu.c +++ b/target/riscv/tcg/tcg-cpu.c @@ -417,12 +417,21 @@ static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, Error **errp) { + uint32_t min_vlen; uint32_t vlen = cfg->vlenb << 3; - if (vlen > RV_VLEN_MAX || vlen < 128) { + if (riscv_has_ext(env, RVV)) { + min_vlen = 128; + } else if (cfg->ext_zve64x) { + min_vlen = 64; + } else if (cfg->ext_zve32x) { + min_vlen = 32; + } + + if (vlen > RV_VLEN_MAX || vlen < min_vlen) { error_setg(errp, "Vector extension implementation only supports VLEN " - "in the range [128, %d]", RV_VLEN_MAX); + "in the range [%d, %d]", min_vlen, RV_VLEN_MAX); return; } @@ -432,6 +441,12 @@ static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, "in the range [8, 64]"); return; } + + if (vlen < cfg->elen) { + error_setg(errp, "Vector extension implementation requires VLEN " + "to be greater than or equal to ELEN"); + return; + } } static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) @@ -661,7 +676,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } - if (riscv_has_ext(env, RVV)) { + if (cpu->cfg.ext_zve32x) { riscv_cpu_validate_v(env, &cpu->cfg, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 9ddef2d..6fc06c7 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -24,6 +24,7 @@ #include "exec/helper-gen.h" #include "exec/target_page.h" #include "exec/translator.h" +#include "accel/tcg/cpu-ldst.h" #include "exec/translation-block.h" #include "exec/log.h" #include "semihosting/semihost.h" @@ -1166,7 +1167,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) CPUState *cpu = ctx->cs; CPURISCVState *env = cpu_env(cpu); - return translator_ldl(env, &ctx->base, pc); + return cpu_ldl_code(env, pc); } #define SS_MMU_INDEX(ctx) (ctx->mem_idx | MMU_IDX_SS_WRITE) diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 7c67d67..41ea223 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -5198,11 +5198,11 @@ GEN_VEXT_VSLIE1UP(16, H2) GEN_VEXT_VSLIE1UP(32, H4) GEN_VEXT_VSLIE1UP(64, H8) -#define GEN_VEXT_VSLIDE1UP_VX(NAME, BITWIDTH) \ -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - vslide1up_##BITWIDTH(vd, v0, s1, vs2, env, desc); \ +#define GEN_VEXT_VSLIDE1UP_VX(NAME, BITWIDTH) \ +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vslide1up_##BITWIDTH(vd, v0, s1, vs2, env, desc); \ } /* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */ @@ -5249,11 +5249,11 @@ GEN_VEXT_VSLIDE1DOWN(16, H2) GEN_VEXT_VSLIDE1DOWN(32, H4) GEN_VEXT_VSLIDE1DOWN(64, H8) -#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, BITWIDTH) \ -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - vslide1down_##BITWIDTH(vd, v0, s1, vs2, env, desc); \ +#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, BITWIDTH) \ +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vslide1down_##BITWIDTH(vd, v0, s1, vs2, env, desc); \ } /* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */ |