diff options
Diffstat (limited to 'target')
-rw-r--r-- | target/arm/cpu.h | 2 | ||||
-rw-r--r-- | target/arm/kvm.c | 80 | ||||
-rw-r--r-- | target/arm/kvm32.c | 34 | ||||
-rw-r--r-- | target/arm/kvm64.c | 49 | ||||
-rw-r--r-- | target/arm/kvm_arm.h | 10 | ||||
-rw-r--r-- | target/arm/translate-a64.c | 6 | ||||
-rw-r--r-- | target/arm/translate-a64.h | 1 | ||||
-rw-r--r-- | target/arm/translate-sve.c | 8 | ||||
-rw-r--r-- | target/riscv/Makefile.objs | 2 | ||||
-rw-r--r-- | target/riscv/cpu.c | 50 | ||||
-rw-r--r-- | target/riscv/cpu.h | 82 | ||||
-rw-r--r-- | target/riscv/cpu_bits.h | 15 | ||||
-rw-r--r-- | target/riscv/csr.c | 75 | ||||
-rw-r--r-- | target/riscv/fpu_helper.c | 33 | ||||
-rw-r--r-- | target/riscv/helper.h | 1069 | ||||
-rw-r--r-- | target/riscv/insn32-64.decode | 11 | ||||
-rw-r--r-- | target/riscv/insn32.decode | 372 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_rvv.inc.c | 2888 | ||||
-rw-r--r-- | target/riscv/internals.h | 41 | ||||
-rw-r--r-- | target/riscv/translate.c | 27 | ||||
-rw-r--r-- | target/riscv/vector_helper.c | 4899 | ||||
-rw-r--r-- | target/s390x/insn-data.def | 2 |
22 files changed, 9705 insertions, 51 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h index cf99dcc..9e8ed42 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -573,6 +573,8 @@ typedef struct CPUARMState { uint64_t esr; } serror; + uint8_t ext_dabt_raised; /* Tracking/verifying injection of ext DABT */ + /* State of our input IRQ/FIQ/VIRQ/VFIQ lines */ uint32_t irq_line_state; diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 7c672c7..8bb7318 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -39,6 +39,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { static bool cap_has_mp_state; static bool cap_has_inject_serror_esr; +static bool cap_has_inject_ext_dabt; static ARMHostCPUFeatures arm_host_cpu_features; @@ -245,6 +246,16 @@ int kvm_arch_init(MachineState *ms, KVMState *s) ret = -EINVAL; } + if (kvm_check_extension(s, KVM_CAP_ARM_NISV_TO_USER)) { + if (kvm_vm_enable_cap(s, KVM_CAP_ARM_NISV_TO_USER, 0)) { + error_report("Failed to enable KVM_CAP_ARM_NISV_TO_USER cap"); + } else { + /* Set status for supporting the external dabt injection */ + cap_has_inject_ext_dabt = kvm_check_extension(s, + KVM_CAP_ARM_INJECT_EXT_DABT); + } + } + return ret; } @@ -738,6 +749,29 @@ int kvm_get_vcpu_events(ARMCPU *cpu) void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) { + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + if (unlikely(env->ext_dabt_raised)) { + /* + * Verifying that the ext DABT has been properly injected, + * otherwise risking indefinitely re-running the faulting instruction + * Covering a very narrow case for kernels 5.5..5.5.4 + * when injected abort was misconfigured to be + * an IMPLEMENTATION DEFINED exception (for 32-bit EL1) + */ + if (!arm_feature(env, ARM_FEATURE_AARCH64) && + unlikely(!kvm_arm_verify_ext_dabt_pending(cs))) { + + error_report("Data abort exception with no valid ISS generated by " + "guest memory access. KVM unable to emulate faulting " + "instruction. Failed to inject an external data abort " + "into the guest."); + abort(); + } + /* Clear the status */ + env->ext_dabt_raised = 0; + } } MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) @@ -810,6 +844,47 @@ void kvm_arm_vm_state_change(void *opaque, int running, RunState state) } } +/** + * kvm_arm_handle_dabt_nisv: + * @cs: CPUState + * @esr_iss: ISS encoding (limited) for the exception from Data Abort + * ISV bit set to '0b0' -> no valid instruction syndrome + * @fault_ipa: faulting address for the synchronous data abort + * + * Returns: 0 if the exception has been handled, < 0 otherwise + */ +static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, + uint64_t fault_ipa) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + /* + * Request KVM to inject the external data abort into the guest + */ + if (cap_has_inject_ext_dabt) { + struct kvm_vcpu_events events = { }; + /* + * The external data abort event will be handled immediately by KVM + * using the address fault that triggered the exit on given VCPU. + * Requesting injection of the external data abort does not rely + * on any other VCPU state. Therefore, in this particular case, the VCPU + * synchronization can be exceptionally skipped. + */ + events.exception.ext_dabt_pending = 1; + /* KVM_CAP_ARM_INJECT_EXT_DABT implies KVM_CAP_VCPU_EVENTS */ + if (!kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events)) { + env->ext_dabt_raised = 1; + return 0; + } + } else { + error_report("Data abort exception triggered by guest memory access " + "at physical address: 0x" TARGET_FMT_lx, + (target_ulong)fault_ipa); + error_printf("KVM unable to emulate faulting instruction.\n"); + } + return -1; +} + int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { int ret = 0; @@ -820,6 +895,11 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) ret = EXCP_DEBUG; } /* otherwise return to guest */ break; + case KVM_EXIT_ARM_NISV: + /* External DABT with no valid iss to decode */ + ret = kvm_arm_handle_dabt_nisv(cs, run->arm_nisv.esr_iss, + run->arm_nisv.fault_ipa); + break; default: qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n", __func__, run->exit_reason); diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c index 7b3a19e..0af46b4 100644 --- a/target/arm/kvm32.c +++ b/target/arm/kvm32.c @@ -559,3 +559,37 @@ void kvm_arm_pmu_init(CPUState *cs) { qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); } + +#define ARM_REG_DFSR ARM_CP15_REG32(0, 5, 0, 0) +#define ARM_REG_TTBCR ARM_CP15_REG32(0, 2, 0, 2) +/* + *DFSR: + * TTBCR.EAE == 0 + * FS[4] - DFSR[10] + * FS[3:0] - DFSR[3:0] + * TTBCR.EAE == 1 + * FS, bits [5:0] + */ +#define DFSR_FSC(lpae, v) \ + ((lpae) ? ((v) & 0x3F) : (((v) >> 6) | ((v) & 0x1F))) + +#define DFSC_EXTABT(lpae) ((lpae) ? 0x10 : 0x08) + +bool kvm_arm_verify_ext_dabt_pending(CPUState *cs) +{ + uint32_t dfsr_val; + + if (!kvm_get_one_reg(cs, ARM_REG_DFSR, &dfsr_val)) { + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint32_t ttbcr; + int lpae = 0; + + if (!kvm_get_one_reg(cs, ARM_REG_TTBCR, &ttbcr)) { + lpae = arm_feature(env, ARM_FEATURE_LPAE) && (ttbcr & TTBCR_EAE); + } + /* The verification is based on FS filed of the DFSR reg only*/ + return (DFSR_FSC(lpae, dfsr_val) == DFSC_EXTABT(lpae)); + } + return false; +} diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index 3dc494a..1169237 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -1493,3 +1493,52 @@ bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) return false; } + +#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0) +#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2) + +/* + * ESR_EL1 + * ISS encoding + * AARCH64: DFSC, bits [5:0] + * AARCH32: + * TTBCR.EAE == 0 + * FS[4] - DFSR[10] + * FS[3:0] - DFSR[3:0] + * TTBCR.EAE == 1 + * FS, bits [5:0] + */ +#define ESR_DFSC(aarch64, lpae, v) \ + ((aarch64 || (lpae)) ? ((v) & 0x3F) \ + : (((v) >> 6) | ((v) & 0x1F))) + +#define ESR_DFSC_EXTABT(aarch64, lpae) \ + ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8) + +bool kvm_arm_verify_ext_dabt_pending(CPUState *cs) +{ + uint64_t dfsr_val; + + if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) { + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64); + int lpae = 0; + + if (!aarch64_mode) { + uint64_t ttbcr; + + if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) { + lpae = arm_feature(env, ARM_FEATURE_LPAE) + && (ttbcr & TTBCR_EAE); + } + } + /* + * The verification here is based on the DFSC bits + * of the ESR_EL1 reg only + */ + return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) == + ESR_DFSC_EXTABT(aarch64_mode, lpae)); + } + return false; +} diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index a4ce4fd..adb3851 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -450,6 +450,16 @@ struct kvm_guest_debug_arch; void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr); /** + * kvm_arm_verify_ext_dabt_pending: + * @cs: CPUState + * + * Verify the fault status code wrt the Ext DABT injection + * + * Returns: true if the fault status code is as expected, false otherwise + */ +bool kvm_arm_verify_ext_dabt_pending(CPUState *cs); + +/** * its_class_name: * * Return the ITS class name to use depending on whether KVM acceleration diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 73d753f..8c07649 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -461,6 +461,12 @@ TCGv_i64 new_tmp_a64(DisasContext *s) return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64(); } +TCGv_i64 new_tmp_a64_local(DisasContext *s) +{ + assert(s->tmp_a64_count < TMP_A64_MAX); + return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_local_new_i64(); +} + TCGv_i64 new_tmp_a64_zero(DisasContext *s) { TCGv_i64 t = new_tmp_a64(s); diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h index 49e4865..647f0c7 100644 --- a/target/arm/translate-a64.h +++ b/target/arm/translate-a64.h @@ -30,6 +30,7 @@ void unallocated_encoding(DisasContext *s); } while (0) TCGv_i64 new_tmp_a64(DisasContext *s); +TCGv_i64 new_tmp_a64_local(DisasContext *s); TCGv_i64 new_tmp_a64_zero(DisasContext *s); TCGv_i64 cpu_reg(DisasContext *s, int reg); TCGv_i64 cpu_reg_sp(DisasContext *s, int reg); diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c index f318ca2..08f0fd1 100644 --- a/target/arm/translate-sve.c +++ b/target/arm/translate-sve.c @@ -4372,9 +4372,8 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) /* Copy the clean address into a local temp, live across the loop. */ t0 = clean_addr; - clean_addr = tcg_temp_local_new_i64(); + clean_addr = new_tmp_a64_local(s); tcg_gen_mov_i64(clean_addr, t0); - tcg_temp_free_i64(t0); gen_set_label(loop); @@ -4422,7 +4421,6 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) tcg_gen_st_i64(t0, cpu_env, vofs + len_align); tcg_temp_free_i64(t0); } - tcg_temp_free_i64(clean_addr); } /* Similarly for stores. */ @@ -4463,9 +4461,8 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) /* Copy the clean address into a local temp, live across the loop. */ t0 = clean_addr; - clean_addr = tcg_temp_local_new_i64(); + clean_addr = new_tmp_a64_local(s); tcg_gen_mov_i64(clean_addr, t0); - tcg_temp_free_i64(t0); gen_set_label(loop); @@ -4509,7 +4506,6 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) } tcg_temp_free_i64(t0); } - tcg_temp_free_i64(clean_addr); } static bool trans_LDR_zri(DisasContext *s, arg_rri *a) diff --git a/target/riscv/Makefile.objs b/target/riscv/Makefile.objs index ff651f6..ff38df6 100644 --- a/target/riscv/Makefile.objs +++ b/target/riscv/Makefile.objs @@ -1,4 +1,4 @@ -obj-y += translate.o op_helper.o cpu_helper.o cpu.o csr.o fpu_helper.o gdbstub.o +obj-y += translate.o op_helper.o cpu_helper.o cpu.o csr.o fpu_helper.o vector_helper.o gdbstub.o obj-$(CONFIG_SOFTMMU) += pmp.o ifeq ($(CONFIG_SOFTMMU),y) diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 391a0b9..228b9bd 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -106,6 +106,11 @@ static void set_priv_version(CPURISCVState *env, int priv_ver) env->priv_ver = priv_ver; } +static void set_vext_version(CPURISCVState *env, int vext_ver) +{ + env->vext_ver = vext_ver; +} + static void set_feature(CPURISCVState *env, int feature) { env->features |= (1ULL << feature); @@ -334,6 +339,7 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) CPURISCVState *env = &cpu->env; RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); int priv_version = PRIV_VERSION_1_11_0; + int vext_version = VEXT_VERSION_0_07_1; target_ulong target_misa = 0; Error *local_err = NULL; @@ -357,6 +363,7 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } set_priv_version(env, priv_version); + set_vext_version(env, vext_version); if (cpu->cfg.mmu) { set_feature(env, RISCV_FEATURE_MMU); @@ -423,6 +430,45 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) if (cpu->cfg.ext_h) { target_misa |= RVH; } + if (cpu->cfg.ext_v) { + target_misa |= RVV; + if (!is_power_of_2(cpu->cfg.vlen)) { + error_setg(errp, + "Vector extension VLEN must be power of 2"); + return; + } + if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) { + error_setg(errp, + "Vector extension implementation only supports VLEN " + "in the range [128, %d]", RV_VLEN_MAX); + return; + } + if (!is_power_of_2(cpu->cfg.elen)) { + error_setg(errp, + "Vector extension ELEN must be power of 2"); + return; + } + if (cpu->cfg.elen > 64 || cpu->cfg.vlen < 8) { + error_setg(errp, + "Vector extension implementation only supports ELEN " + "in the range [8, 64]"); + return; + } + if (cpu->cfg.vext_spec) { + if (!g_strcmp0(cpu->cfg.vext_spec, "v0.7.1")) { + vext_version = VEXT_VERSION_0_07_1; + } else { + error_setg(errp, + "Unsupported vector spec version '%s'", + cpu->cfg.vext_spec); + return; + } + } else { + qemu_log("vector verison is not specified, " + "use the default value v0.7.1\n"); + } + set_vext_version(env, vext_version); + } set_misa(env, RVXLEN | target_misa); } @@ -462,10 +508,14 @@ static Property riscv_cpu_properties[] = { DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true), /* This is experimental so mark with 'x-' */ DEFINE_PROP_BOOL("x-h", RISCVCPU, cfg.ext_h, false), + DEFINE_PROP_BOOL("x-v", RISCVCPU, cfg.ext_v, false), DEFINE_PROP_BOOL("Counters", RISCVCPU, cfg.ext_counters, true), DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), + DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), + DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), + DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), DEFINE_PROP_END_OF_LIST(), diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 80569f0..eef20ca 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -21,6 +21,7 @@ #define RISCV_CPU_H #include "hw/core/cpu.h" +#include "hw/registerfields.h" #include "exec/cpu-defs.h" #include "fpu/softfloat-types.h" @@ -59,6 +60,7 @@ #define RVA RV('A') #define RVF RV('F') #define RVD RV('D') +#define RVV RV('V') #define RVC RV('C') #define RVS RV('S') #define RVU RV('U') @@ -77,6 +79,8 @@ enum { #define PRIV_VERSION_1_10_0 0x00011000 #define PRIV_VERSION_1_11_0 0x00011100 +#define VEXT_VERSION_0_07_1 0x00000701 + #define TRANSLATE_PMP_FAIL 2 #define TRANSLATE_FAIL 1 #define TRANSLATE_SUCCESS 0 @@ -88,9 +92,26 @@ typedef struct CPURISCVState CPURISCVState; #include "pmp.h" +#define RV_VLEN_MAX 256 + +FIELD(VTYPE, VLMUL, 0, 2) +FIELD(VTYPE, VSEW, 2, 3) +FIELD(VTYPE, VEDIV, 5, 2) +FIELD(VTYPE, RESERVED, 7, sizeof(target_ulong) * 8 - 9) +FIELD(VTYPE, VILL, sizeof(target_ulong) * 8 - 2, 1) + struct CPURISCVState { target_ulong gpr[32]; uint64_t fpr[32]; /* assume both F and D extensions */ + + /* vector coprocessor state. */ + uint64_t vreg[32 * RV_VLEN_MAX / 64] QEMU_ALIGNED(16); + target_ulong vxrm; + target_ulong vxsat; + target_ulong vl; + target_ulong vstart; + target_ulong vtype; + target_ulong pc; target_ulong load_res; target_ulong load_val; @@ -101,6 +122,7 @@ struct CPURISCVState { target_ulong guest_phys_fault_addr; target_ulong priv_ver; + target_ulong vext_ver; target_ulong misa; target_ulong misa_mask; @@ -257,12 +279,16 @@ typedef struct RISCVCPU { bool ext_s; bool ext_u; bool ext_h; + bool ext_v; bool ext_counters; bool ext_ifencei; bool ext_icsr; char *priv_spec; char *user_spec; + char *vext_spec; + uint16_t vlen; + uint16_t elen; bool mmu; bool pmp; } cfg; @@ -335,19 +361,62 @@ void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong); #define TB_FLAGS_MMU_MASK 3 #define TB_FLAGS_MSTATUS_FS MSTATUS_FS +typedef CPURISCVState CPUArchState; +typedef RISCVCPU ArchCPU; +#include "exec/cpu-all.h" + +FIELD(TB_FLAGS, VL_EQ_VLMAX, 2, 1) +FIELD(TB_FLAGS, LMUL, 3, 2) +FIELD(TB_FLAGS, SEW, 5, 3) +FIELD(TB_FLAGS, VILL, 8, 1) + +/* + * A simplification for VLMAX + * = (1 << LMUL) * VLEN / (8 * (1 << SEW)) + * = (VLEN << LMUL) / (8 << SEW) + * = (VLEN << LMUL) >> (SEW + 3) + * = VLEN >> (SEW + 3 - LMUL) + */ +static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype) +{ + uint8_t sew, lmul; + + sew = FIELD_EX64(vtype, VTYPE, VSEW); + lmul = FIELD_EX64(vtype, VTYPE, VLMUL); + return cpu->cfg.vlen >> (sew + 3 - lmul); +} + static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, - target_ulong *cs_base, uint32_t *flags) + target_ulong *cs_base, uint32_t *pflags) { + uint32_t flags = 0; + *pc = env->pc; *cs_base = 0; + + if (riscv_has_ext(env, RVV)) { + uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype); + bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl); + flags = FIELD_DP32(flags, TB_FLAGS, VILL, + FIELD_EX64(env->vtype, VTYPE, VILL)); + flags = FIELD_DP32(flags, TB_FLAGS, SEW, + FIELD_EX64(env->vtype, VTYPE, VSEW)); + flags = FIELD_DP32(flags, TB_FLAGS, LMUL, + FIELD_EX64(env->vtype, VTYPE, VLMUL)); + flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); + } else { + flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); + } + #ifdef CONFIG_USER_ONLY - *flags = TB_FLAGS_MSTATUS_FS; + flags |= TB_FLAGS_MSTATUS_FS; #else - *flags = cpu_mmu_index(env, 0); + flags |= cpu_mmu_index(env, 0); if (riscv_cpu_fp_enabled(env)) { - *flags |= env->mstatus & MSTATUS_FS; + flags |= env->mstatus & MSTATUS_FS; } #endif + *pflags = flags; } int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, @@ -388,9 +457,4 @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops); void riscv_cpu_register_gdb_regs_for_features(CPUState *cs); -typedef CPURISCVState CPUArchState; -typedef RISCVCPU ArchCPU; - -#include "exec/cpu-all.h" - #endif /* RISCV_CPU_H */ diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index 7f64ee1..8117e8b 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -29,6 +29,14 @@ #define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT) #define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA) +/* Vector Fixed-Point round model */ +#define FSR_VXRM_SHIFT 9 +#define FSR_VXRM (0x3 << FSR_VXRM_SHIFT) + +/* Vector Fixed-Point saturation flag */ +#define FSR_VXSAT_SHIFT 8 +#define FSR_VXSAT (0x1 << FSR_VXSAT_SHIFT) + /* Control and Status Registers */ /* User Trap Setup */ @@ -48,6 +56,13 @@ #define CSR_FRM 0x002 #define CSR_FCSR 0x003 +/* User Vector CSRs */ +#define CSR_VSTART 0x008 +#define CSR_VXSAT 0x009 +#define CSR_VXRM 0x00a +#define CSR_VL 0xc20 +#define CSR_VTYPE 0xc21 + /* User Timers and Counters */ #define CSR_CYCLE 0xc00 #define CSR_TIME 0xc01 diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 383be0a..ac01c83 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -46,6 +46,10 @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops) static int fs(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) + /* loose check condition for fcsr in vector extension */ + if ((csrno == CSR_FCSR) && (env->misa & RVV)) { + return 0; + } if (!env->debugger && !riscv_cpu_fp_enabled(env)) { return -1; } @@ -53,6 +57,14 @@ static int fs(CPURISCVState *env, int csrno) return 0; } +static int vs(CPURISCVState *env, int csrno) +{ + if (env->misa & RVV) { + return 0; + } + return -1; +} + static int ctr(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) @@ -154,6 +166,10 @@ static int read_fcsr(CPURISCVState *env, int csrno, target_ulong *val) #endif *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT) | (env->frm << FSR_RD_SHIFT); + if (vs(env, csrno) >= 0) { + *val |= (env->vxrm << FSR_VXRM_SHIFT) + | (env->vxsat << FSR_VXSAT_SHIFT); + } return 0; } @@ -166,10 +182,62 @@ static int write_fcsr(CPURISCVState *env, int csrno, target_ulong val) env->mstatus |= MSTATUS_FS; #endif env->frm = (val & FSR_RD) >> FSR_RD_SHIFT; + if (vs(env, csrno) >= 0) { + env->vxrm = (val & FSR_VXRM) >> FSR_VXRM_SHIFT; + env->vxsat = (val & FSR_VXSAT) >> FSR_VXSAT_SHIFT; + } riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT); return 0; } +static int read_vtype(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->vtype; + return 0; +} + +static int read_vl(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->vl; + return 0; +} + +static int read_vxrm(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->vxrm; + return 0; +} + +static int write_vxrm(CPURISCVState *env, int csrno, target_ulong val) +{ + env->vxrm = val; + return 0; +} + +static int read_vxsat(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->vxsat; + return 0; +} + +static int write_vxsat(CPURISCVState *env, int csrno, target_ulong val) +{ + env->vxsat = val; + return 0; +} + +static int read_vstart(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->vstart; + return 0; +} + +static int write_vstart(CPURISCVState *env, int csrno, target_ulong val) +{ + env->vstart = val; + return 0; +} + /* User Timers and Counters */ static int read_instret(CPURISCVState *env, int csrno, target_ulong *val) { @@ -1183,7 +1251,12 @@ static riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_FFLAGS] = { fs, read_fflags, write_fflags }, [CSR_FRM] = { fs, read_frm, write_frm }, [CSR_FCSR] = { fs, read_fcsr, write_fcsr }, - + /* Vector CSRs */ + [CSR_VSTART] = { vs, read_vstart, write_vstart }, + [CSR_VXSAT] = { vs, read_vxsat, write_vxsat }, + [CSR_VXRM] = { vs, read_vxrm, write_vxrm }, + [CSR_VL] = { vs, read_vl }, + [CSR_VTYPE] = { vs, read_vtype }, /* User Timers and Counters */ [CSR_CYCLE] = { ctr, read_instret }, [CSR_INSTRET] = { ctr, read_instret }, diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c index 0b79562..4379756 100644 --- a/target/riscv/fpu_helper.c +++ b/target/riscv/fpu_helper.c @@ -22,6 +22,7 @@ #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "fpu/softfloat.h" +#include "internals.h" target_ulong riscv_cpu_get_fflags(CPURISCVState *env) { @@ -230,21 +231,7 @@ uint64_t helper_fcvt_s_lu(CPURISCVState *env, uint64_t rs1) target_ulong helper_fclass_s(uint64_t frs1) { - float32 f = frs1; - bool sign = float32_is_neg(f); - - if (float32_is_infinity(f)) { - return sign ? 1 << 0 : 1 << 7; - } else if (float32_is_zero(f)) { - return sign ? 1 << 3 : 1 << 4; - } else if (float32_is_zero_or_denormal(f)) { - return sign ? 1 << 2 : 1 << 5; - } else if (float32_is_any_nan(f)) { - float_status s = { }; /* for snan_bit_is_one */ - return float32_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8; - } else { - return sign ? 1 << 1 : 1 << 6; - } + return fclass_s(frs1); } uint64_t helper_fadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) @@ -353,19 +340,5 @@ uint64_t helper_fcvt_d_lu(CPURISCVState *env, uint64_t rs1) target_ulong helper_fclass_d(uint64_t frs1) { - float64 f = frs1; - bool sign = float64_is_neg(f); - - if (float64_is_infinity(f)) { - return sign ? 1 << 0 : 1 << 7; - } else if (float64_is_zero(f)) { - return sign ? 1 << 3 : 1 << 4; - } else if (float64_is_zero_or_denormal(f)) { - return sign ? 1 << 2 : 1 << 5; - } else if (float64_is_any_nan(f)) { - float_status s = { }; /* for snan_bit_is_one */ - return float64_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8; - } else { - return sign ? 1 << 1 : 1 << 6; - } + return fclass_d(frs1); } diff --git a/target/riscv/helper.h b/target/riscv/helper.h index b36be97..acc2982 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -81,3 +81,1072 @@ DEF_HELPER_1(tlb_flush, void, env) #ifndef CONFIG_USER_ONLY DEF_HELPER_1(hyp_tlb_flush, void, env) #endif + +/* Vector functions */ +DEF_HELPER_3(vsetvl, tl, env, tl, tl) +DEF_HELPER_5(vlb_v_b, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlb_v_b_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlb_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlb_v_h_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlb_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlb_v_w_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlb_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlb_v_d_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlh_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlh_v_h_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlh_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlh_v_w_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlh_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlh_v_d_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlw_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlw_v_w_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlw_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlw_v_d_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle_v_b, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle_v_b_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle_v_h_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle_v_w_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vle_v_d_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbu_v_b, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbu_v_b_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbu_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbu_v_h_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbu_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbu_v_w_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbu_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbu_v_d_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlhu_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlhu_v_h_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlhu_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlhu_v_w_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlhu_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlhu_v_d_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlwu_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlwu_v_w_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlwu_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlwu_v_d_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsb_v_b, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsb_v_b_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsb_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsb_v_h_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsb_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsb_v_w_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsb_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsb_v_d_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsh_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsh_v_h_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsh_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsh_v_w_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsh_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsh_v_d_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsw_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsw_v_w_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsw_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vsw_v_d_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse_v_b, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse_v_b_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse_v_h_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse_v_w_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vse_v_d_mask, void, ptr, ptr, tl, env, i32) +DEF_HELPER_6(vlsb_v_b, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlsb_v_h, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlsb_v_w, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlsb_v_d, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlsh_v_h, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlsh_v_w, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlsh_v_d, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlsw_v_w, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlsw_v_d, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlse_v_b, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlse_v_h, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlse_v_w, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlse_v_d, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlsbu_v_b, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlsbu_v_h, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlsbu_v_w, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlsbu_v_d, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlshu_v_h, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlshu_v_w, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlshu_v_d, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlswu_v_w, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlswu_v_d, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vssb_v_b, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vssb_v_h, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vssb_v_w, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vssb_v_d, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vssh_v_h, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vssh_v_w, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vssh_v_d, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vssw_v_w, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vssw_v_d, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vsse_v_b, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vsse_v_h, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vsse_v_w, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vsse_v_d, void, ptr, ptr, tl, tl, env, i32) +DEF_HELPER_6(vlxb_v_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxb_v_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxb_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxb_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxh_v_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxh_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxh_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxe_v_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxe_v_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxe_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxe_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxbu_v_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxbu_v_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxbu_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxbu_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxhu_v_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxhu_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxhu_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxwu_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vlxwu_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxb_v_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxb_v_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxb_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxb_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxh_v_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxh_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxh_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxe_v_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxe_v_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxe_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsxe_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_5(vlbff_v_b, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbff_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbff_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbff_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlhff_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlhff_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlhff_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlwff_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlwff_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vleff_v_b, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vleff_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vleff_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vleff_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbuff_v_b, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbuff_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbuff_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlbuff_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlhuff_v_h, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlhuff_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlhuff_v_d, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlwuff_v_w, void, ptr, ptr, tl, env, i32) +DEF_HELPER_5(vlwuff_v_d, void, ptr, ptr, tl, env, i32) +#ifdef TARGET_RISCV64 +DEF_HELPER_6(vamoswapw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoswapd_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoaddw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoaddd_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoxorw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoxord_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoandw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoandd_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoorw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoord_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamominw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamomind_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamomaxw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamomaxd_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamominuw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamominud_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamomaxuw_v_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamomaxud_v_d, void, ptr, ptr, tl, ptr, env, i32) +#endif +DEF_HELPER_6(vamoswapw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoaddw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoxorw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoandw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamoorw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamominw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamomaxw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamominuw_v_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vamomaxuw_v_w, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsub_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vadd_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vadd_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vadd_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vadd_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsub_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsub_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsub_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsub_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vrsub_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vrsub_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vrsub_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vrsub_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_FLAGS_4(vec_rsubs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vec_rsubs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vec_rsubs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) +DEF_HELPER_FLAGS_4(vec_rsubs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32) + +DEF_HELPER_6(vwaddu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwaddu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwaddu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsubu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsubu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsubu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsub_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwaddu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwaddu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwaddu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsubu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsubu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsubu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwadd_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwadd_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwadd_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsub_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsub_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsub_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwaddu_wv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwaddu_wv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwaddu_wv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsubu_wv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsubu_wv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsubu_wv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwadd_wv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwadd_wv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwadd_wv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsub_wv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsub_wv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsub_wv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwaddu_wx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwaddu_wx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwaddu_wx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsubu_wx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsubu_wx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsubu_wx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwadd_wx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwadd_wx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwadd_wx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsub_wx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsub_wx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsub_wx_w, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vadc_vvm_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vadc_vvm_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vadc_vvm_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vadc_vvm_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsbc_vvm_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsbc_vvm_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsbc_vvm_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsbc_vvm_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmadc_vvm_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmadc_vvm_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmadc_vvm_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmadc_vvm_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsbc_vvm_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsbc_vvm_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsbc_vvm_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsbc_vvm_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vadc_vxm_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vadc_vxm_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vadc_vxm_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vadc_vxm_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsbc_vxm_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsbc_vxm_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsbc_vxm_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsbc_vxm_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmadc_vxm_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmadc_vxm_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmadc_vxm_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmadc_vxm_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsbc_vxm_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsbc_vxm_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsbc_vxm_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsbc_vxm_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vand_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vand_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vand_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vand_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vor_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vor_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vor_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vor_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vxor_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vxor_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vxor_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vxor_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vand_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vand_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vand_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vand_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vor_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vor_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vor_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vor_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vxor_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vxor_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vxor_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vxor_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vsll_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsll_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsll_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsll_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsrl_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsra_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsra_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsra_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsra_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsll_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsll_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsll_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsll_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsrl_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsrl_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsrl_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsrl_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsra_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsra_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsra_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsra_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vnsrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnsrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnsrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnsra_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnsra_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnsra_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnsrl_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnsrl_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnsrl_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnsra_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnsra_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnsra_vx_w, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vmseq_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmseq_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmseq_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmseq_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsne_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsne_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsne_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsne_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsltu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsltu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsltu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsltu_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmslt_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmslt_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmslt_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmslt_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsleu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsleu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsleu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsleu_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsle_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsle_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsle_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmsle_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmseq_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmseq_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmseq_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmseq_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsne_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsne_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsne_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsne_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsltu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsltu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsltu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsltu_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmslt_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmslt_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmslt_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmslt_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsleu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsleu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsleu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsleu_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsle_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsle_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsle_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsle_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsgtu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsgtu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsgtu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsgtu_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsgt_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsgt_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsgt_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmsgt_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vminu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vminu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vminu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vminu_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmin_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmin_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmin_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmin_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmaxu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmaxu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmaxu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmaxu_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmax_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmax_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmax_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmax_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vminu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vminu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vminu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vminu_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmin_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmin_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmin_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmin_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmaxu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmaxu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmaxu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmaxu_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmax_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmax_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmax_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmax_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmul_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmulh_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmulh_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmulh_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmulh_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmulhu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmulhu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmulhu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmulhu_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmulhsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmulhsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmulhsu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmulhsu_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmul_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmul_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmul_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmul_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmulh_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmulh_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmulh_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmulh_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmulhu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmulhu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmulhu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmulhu_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmulhsu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmulhsu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmulhsu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmulhsu_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vdivu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vdivu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vdivu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vdivu_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vdiv_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vdiv_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vdiv_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vdiv_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vremu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vremu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vremu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vremu_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vrem_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vrem_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vrem_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vrem_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vdivu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vdivu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vdivu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vdivu_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vdiv_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vdiv_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vdiv_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vdiv_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vremu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vremu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vremu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vremu_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vrem_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vrem_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vrem_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vrem_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vwmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmulu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmulu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmulu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmulsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmulsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmulsu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmul_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmul_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmul_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmulu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmulu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmulu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmulsu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmulsu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmulsu_vx_w, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vmacc_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmacc_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnmsac_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnmsac_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnmsub_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnmsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnmsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnmsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmacc_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmacc_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmacc_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmacc_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnmsac_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnmsac_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnmsac_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnmsac_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmadd_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmadd_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmadd_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmadd_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnmsub_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnmsub_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnmsub_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnmsub_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vwmaccu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmaccu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmaccu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmacc_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmaccsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmaccsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmaccsu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwmaccu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmaccu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmaccu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmacc_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmacc_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmacc_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmaccsu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmaccsu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmaccsu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmaccus_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmaccus_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwmaccus_vx_w, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vmerge_vvm_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmerge_vvm_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmerge_vvm_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmerge_vvm_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmerge_vxm_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmerge_vxm_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmerge_vxm_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vmerge_vxm_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_4(vmv_v_v_b, void, ptr, ptr, env, i32) +DEF_HELPER_4(vmv_v_v_h, void, ptr, ptr, env, i32) +DEF_HELPER_4(vmv_v_v_w, void, ptr, ptr, env, i32) +DEF_HELPER_4(vmv_v_v_d, void, ptr, ptr, env, i32) +DEF_HELPER_4(vmv_v_x_b, void, ptr, i64, env, i32) +DEF_HELPER_4(vmv_v_x_h, void, ptr, i64, env, i32) +DEF_HELPER_4(vmv_v_x_w, void, ptr, i64, env, i32) +DEF_HELPER_4(vmv_v_x_d, void, ptr, i64, env, i32) + +DEF_HELPER_6(vsaddu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsaddu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsaddu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsaddu_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssubu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssubu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssubu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssubu_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssub_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssub_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssub_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssub_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsaddu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsaddu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsaddu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsaddu_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsadd_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsadd_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsadd_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsadd_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssubu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssubu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssubu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssubu_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssub_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssub_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssub_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssub_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vaadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vaadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vaadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vaadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vasub_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vasub_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vasub_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vasub_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vaadd_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vaadd_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vaadd_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vaadd_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vasub_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vasub_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vasub_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vasub_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vsmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsmul_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vsmul_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsmul_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsmul_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vsmul_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vwsmaccu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsmaccu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsmaccu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsmacc_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsmaccsu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsmaccsu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsmaccsu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwsmaccu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsmaccu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsmaccu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsmacc_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsmacc_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsmacc_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsmaccsu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsmaccsu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsmaccsu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsmaccus_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsmaccus_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vwsmaccus_vx_w, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vssrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssrl_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssra_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssra_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssra_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssra_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vssrl_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssrl_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssrl_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssrl_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssra_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssra_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssra_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vssra_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vnclip_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclip_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclip_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclipu_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclipu_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclipu_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vnclipu_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclipu_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclipu_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclip_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclip_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vnclip_vx_w, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vfadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfadd_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfadd_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfadd_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfsub_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfsub_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfsub_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfrsub_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfrsub_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfrsub_vf_d, void, ptr, ptr, i64, ptr, env, i32) + +DEF_HELPER_6(vfwadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwadd_wv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwadd_wv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwsub_wv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwsub_wv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwadd_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwadd_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwsub_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwsub_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwadd_wf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwadd_wf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwsub_wf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwsub_wf_w, void, ptr, ptr, i64, ptr, env, i32) + +DEF_HELPER_6(vfmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmul_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfdiv_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfdiv_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfdiv_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmul_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmul_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmul_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfdiv_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfdiv_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfdiv_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfrdiv_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfrdiv_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfrdiv_vf_d, void, ptr, ptr, i64, ptr, env, i32) + +DEF_HELPER_6(vfwmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwmul_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwmul_vf_w, void, ptr, ptr, i64, ptr, env, i32) + +DEF_HELPER_6(vfmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmacc_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfnmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfnmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfnmacc_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmsac_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfnmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfnmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfnmsac_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfnmadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfnmadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfnmadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfnmsub_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfnmsub_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfnmsub_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmacc_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmacc_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmacc_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfnmacc_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfnmacc_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfnmacc_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmsac_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmsac_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmsac_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfnmsac_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfnmsac_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfnmsac_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmadd_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmadd_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmadd_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfnmadd_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfnmadd_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfnmadd_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmsub_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmsub_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmsub_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfnmsub_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfnmsub_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfnmsub_vf_d, void, ptr, ptr, i64, ptr, env, i32) + +DEF_HELPER_6(vfwmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwnmacc_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwnmacc_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwnmsac_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwnmsac_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwmacc_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwmacc_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwnmacc_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwnmacc_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwmsac_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwmsac_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwnmsac_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfwnmsac_vf_w, void, ptr, ptr, i64, ptr, env, i32) + +DEF_HELPER_5(vfsqrt_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfsqrt_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfsqrt_v_d, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_6(vfmin_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmin_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmin_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmax_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmax_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmax_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfmin_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmin_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmin_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmax_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmax_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmax_vf_d, void, ptr, ptr, i64, ptr, env, i32) + +DEF_HELPER_6(vfsgnj_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfsgnj_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfsgnj_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfsgnjn_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfsgnjn_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfsgnjn_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfsgnjx_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfsgnjx_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfsgnjx_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfsgnj_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfsgnj_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfsgnj_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfsgnjn_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfsgnjn_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfsgnjn_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfsgnjx_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfsgnjx_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfsgnjx_vf_d, void, ptr, ptr, i64, ptr, env, i32) + +DEF_HELPER_6(vmfeq_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmfeq_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmfeq_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmfne_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmfne_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmfne_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmflt_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmflt_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmflt_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmfle_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmfle_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmfle_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmfeq_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfeq_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfeq_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfne_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfne_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfne_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmflt_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmflt_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmflt_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfle_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfle_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfle_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfgt_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfgt_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfgt_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfge_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfge_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmfge_vf_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmford_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmford_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmford_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmford_vf_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmford_vf_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vmford_vf_d, void, ptr, ptr, i64, ptr, env, i32) + +DEF_HELPER_5(vfclass_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfclass_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfclass_v_d, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_6(vfmerge_vfm_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmerge_vfm_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vfmerge_vfm_d, void, ptr, ptr, i64, ptr, env, i32) + +DEF_HELPER_5(vfcvt_xu_f_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfcvt_xu_f_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfcvt_xu_f_v_d, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfcvt_x_f_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfcvt_x_f_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfcvt_x_f_v_d, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfcvt_f_xu_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfcvt_f_xu_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfcvt_f_xu_v_d, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfcvt_f_x_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfcvt_f_x_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfcvt_f_x_v_d, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_5(vfwcvt_xu_f_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfwcvt_xu_f_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfwcvt_x_f_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfwcvt_x_f_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfwcvt_f_xu_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfwcvt_f_xu_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfwcvt_f_x_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfwcvt_f_x_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfwcvt_f_f_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfwcvt_f_f_v_w, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_5(vfncvt_xu_f_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_xu_f_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_x_f_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_x_f_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_f_xu_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_f_xu_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_f_x_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_f_x_v_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_f_f_v_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vfncvt_f_f_v_w, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_6(vredsum_vs_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredsum_vs_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredmaxu_vs_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredmaxu_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredmaxu_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredmaxu_vs_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredmax_vs_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredmax_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredmax_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredmax_vs_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredminu_vs_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredminu_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredminu_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredminu_vs_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredmin_vs_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredmin_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredmin_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredand_vs_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredand_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredand_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredand_vs_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredor_vs_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredor_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredor_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredor_vs_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredxor_vs_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredxor_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredxor_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vredxor_vs_d, void, ptr, ptr, ptr, ptr, env, i32) + +DEF_HELPER_6(vwredsumu_vs_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwredsumu_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwredsumu_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwredsum_vs_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32) + +DEF_HELPER_6(vfredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredsum_vs_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredmax_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredmax_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredmax_vs_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredmin_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredmin_vs_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfredmin_vs_d, void, ptr, ptr, ptr, ptr, env, i32) + +DEF_HELPER_6(vfwredsum_vs_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vfwredsum_vs_w, void, ptr, ptr, ptr, ptr, env, i32) + +DEF_HELPER_6(vmand_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmnand_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmandnot_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmxor_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmor_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmnor_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmornot_mm, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vmxnor_mm, void, ptr, ptr, ptr, ptr, env, i32) + +DEF_HELPER_4(vmpopc_m, tl, ptr, ptr, env, i32) + +DEF_HELPER_4(vmfirst_m, tl, ptr, ptr, env, i32) + +DEF_HELPER_5(vmsbf_m, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vmsif_m, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vmsof_m, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_5(viota_m_b, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(viota_m_h, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(viota_m_w, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(viota_m_d, void, ptr, ptr, ptr, env, i32) + +DEF_HELPER_4(vid_v_b, void, ptr, ptr, env, i32) +DEF_HELPER_4(vid_v_h, void, ptr, ptr, env, i32) +DEF_HELPER_4(vid_v_w, void, ptr, ptr, env, i32) +DEF_HELPER_4(vid_v_d, void, ptr, ptr, env, i32) + +DEF_HELPER_6(vslideup_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslideup_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslideup_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslideup_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslidedown_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslidedown_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslidedown_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslidedown_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslide1up_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslide1up_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslide1up_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslide1up_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslide1down_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vrgather_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vrgather_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vrgather_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vrgather_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vrgather_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vrgather_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vrgather_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vrgather_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(vcompress_vm_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vcompress_vm_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vcompress_vm_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(vcompress_vm_d, void, ptr, ptr, ptr, ptr, env, i32) diff --git a/target/riscv/insn32-64.decode b/target/riscv/insn32-64.decode index 380bf79..86153d9 100644 --- a/target/riscv/insn32-64.decode +++ b/target/riscv/insn32-64.decode @@ -57,6 +57,17 @@ amomax_d 10100 . . ..... ..... 011 ..... 0101111 @atom_st amominu_d 11000 . . ..... ..... 011 ..... 0101111 @atom_st amomaxu_d 11100 . . ..... ..... 011 ..... 0101111 @atom_st +#*** Vector AMO operations (in addition to Zvamo) *** +vamoswapd_v 00001 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamoaddd_v 00000 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamoxord_v 00100 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamoandd_v 01100 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamoord_v 01000 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamomind_v 10000 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamomaxd_v 10100 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamominud_v 11000 . . ..... ..... 111 ..... 0101111 @r_wdvm +vamomaxud_v 11100 . . ..... ..... 111 ..... 0101111 @r_wdvm + # *** RV64F Standard Extension (in addition to RV32F) *** fcvt_l_s 1100000 00010 ..... ... ..... 1010011 @r2_rm fcvt_lu_s 1100000 00011 ..... ... ..... 1010011 @r2_rm diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index 4c8d121..bdd8563 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -25,6 +25,7 @@ %sh10 20:10 %csr 20:12 %rm 12:3 +%nf 29:3 !function=ex_plus_1 # immediates: %imm_i 20:s12 @@ -43,6 +44,11 @@ &u imm rd &shift shamt rs1 rd &atomic aq rl rs2 rs1 rd +&rmrr vm rd rs1 rs2 +&rmr vm rd rs2 +&rwdvm vm wd rd rs1 rs2 +&r2nfvm vm rd rs1 nf +&rnfvm vm rd rs1 rs2 nf # Formats 32: @r ....... ..... ..... ... ..... ....... &r %rs2 %rs1 %rd @@ -62,6 +68,16 @@ @r_rm ....... ..... ..... ... ..... ....... %rs2 %rs1 %rm %rd @r2_rm ....... ..... ..... ... ..... ....... %rs1 %rm %rd @r2 ....... ..... ..... ... ..... ....... %rs1 %rd +@r2_nfvm ... ... vm:1 ..... ..... ... ..... ....... &r2nfvm %nf %rs1 %rd +@r2_vm ...... vm:1 ..... ..... ... ..... ....... &rmr %rs2 %rd +@r1_vm ...... vm:1 ..... ..... ... ..... ....... %rd +@r_nfvm ... ... vm:1 ..... ..... ... ..... ....... &rnfvm %nf %rs2 %rs1 %rd +@r2rd ....... ..... ..... ... ..... ....... %rs2 %rd +@r_vm ...... vm:1 ..... ..... ... ..... ....... &rmrr %rs2 %rs1 %rd +@r_vm_1 ...... . ..... ..... ... ..... ....... &rmrr vm=1 %rs2 %rs1 %rd +@r_vm_0 ...... . ..... ..... ... ..... ....... &rmrr vm=0 %rs2 %rs1 %rd +@r_wdvm ..... wd:1 vm:1 ..... ..... ... ..... ....... &rwdvm %rs2 %rs1 %rd +@r2_zimm . zimm:11 ..... ... ..... ....... %rs1 %rd @hfence_gvma ....... ..... ..... ... ..... ....... %rs2 %rs1 @hfence_vvma ....... ..... ..... ... ..... ....... %rs2 %rs1 @@ -209,3 +225,359 @@ fcvt_d_wu 1101001 00001 ..... ... ..... 1010011 @r2_rm # *** RV32H Base Instruction Set *** hfence_gvma 0110001 ..... ..... 000 00000 1110011 @hfence_gvma hfence_vvma 0010001 ..... ..... 000 00000 1110011 @hfence_vvma + +# *** RV32V Extension *** + +# *** Vector loads and stores are encoded within LOADFP/STORE-FP *** +vlb_v ... 100 . 00000 ..... 000 ..... 0000111 @r2_nfvm +vlh_v ... 100 . 00000 ..... 101 ..... 0000111 @r2_nfvm +vlw_v ... 100 . 00000 ..... 110 ..... 0000111 @r2_nfvm +vle_v ... 000 . 00000 ..... 111 ..... 0000111 @r2_nfvm +vlbu_v ... 000 . 00000 ..... 000 ..... 0000111 @r2_nfvm +vlhu_v ... 000 . 00000 ..... 101 ..... 0000111 @r2_nfvm +vlwu_v ... 000 . 00000 ..... 110 ..... 0000111 @r2_nfvm +vlbff_v ... 100 . 10000 ..... 000 ..... 0000111 @r2_nfvm +vlhff_v ... 100 . 10000 ..... 101 ..... 0000111 @r2_nfvm +vlwff_v ... 100 . 10000 ..... 110 ..... 0000111 @r2_nfvm +vleff_v ... 000 . 10000 ..... 111 ..... 0000111 @r2_nfvm +vlbuff_v ... 000 . 10000 ..... 000 ..... 0000111 @r2_nfvm +vlhuff_v ... 000 . 10000 ..... 101 ..... 0000111 @r2_nfvm +vlwuff_v ... 000 . 10000 ..... 110 ..... 0000111 @r2_nfvm +vsb_v ... 000 . 00000 ..... 000 ..... 0100111 @r2_nfvm +vsh_v ... 000 . 00000 ..... 101 ..... 0100111 @r2_nfvm +vsw_v ... 000 . 00000 ..... 110 ..... 0100111 @r2_nfvm +vse_v ... 000 . 00000 ..... 111 ..... 0100111 @r2_nfvm + +vlsb_v ... 110 . ..... ..... 000 ..... 0000111 @r_nfvm +vlsh_v ... 110 . ..... ..... 101 ..... 0000111 @r_nfvm +vlsw_v ... 110 . ..... ..... 110 ..... 0000111 @r_nfvm +vlse_v ... 010 . ..... ..... 111 ..... 0000111 @r_nfvm +vlsbu_v ... 010 . ..... ..... 000 ..... 0000111 @r_nfvm +vlshu_v ... 010 . ..... ..... 101 ..... 0000111 @r_nfvm +vlswu_v ... 010 . ..... ..... 110 ..... 0000111 @r_nfvm +vssb_v ... 010 . ..... ..... 000 ..... 0100111 @r_nfvm +vssh_v ... 010 . ..... ..... 101 ..... 0100111 @r_nfvm +vssw_v ... 010 . ..... ..... 110 ..... 0100111 @r_nfvm +vsse_v ... 010 . ..... ..... 111 ..... 0100111 @r_nfvm + +vlxb_v ... 111 . ..... ..... 000 ..... 0000111 @r_nfvm +vlxh_v ... 111 . ..... ..... 101 ..... 0000111 @r_nfvm +vlxw_v ... 111 . ..... ..... 110 ..... 0000111 @r_nfvm +vlxe_v ... 011 . ..... ..... 111 ..... 0000111 @r_nfvm +vlxbu_v ... 011 . ..... ..... 000 ..... 0000111 @r_nfvm +vlxhu_v ... 011 . ..... ..... 101 ..... 0000111 @r_nfvm +vlxwu_v ... 011 . ..... ..... 110 ..... 0000111 @r_nfvm +# Vector ordered-indexed and unordered-indexed store insns. +vsxb_v ... -11 . ..... ..... 000 ..... 0100111 @r_nfvm +vsxh_v ... -11 . ..... ..... 101 ..... 0100111 @r_nfvm +vsxw_v ... -11 . ..... ..... 110 ..... 0100111 @r_nfvm +vsxe_v ... -11 . ..... ..... 111 ..... 0100111 @r_nfvm + +#*** Vector AMO operations are encoded under the standard AMO major opcode *** +vamoswapw_v 00001 . . ..... ..... 110 ..... 0101111 @r_wdvm +vamoaddw_v 00000 . . ..... ..... 110 ..... 0101111 @r_wdvm +vamoxorw_v 00100 . . ..... ..... 110 ..... 0101111 @r_wdvm +vamoandw_v 01100 . . ..... ..... 110 ..... 0101111 @r_wdvm +vamoorw_v 01000 . . ..... ..... 110 ..... 0101111 @r_wdvm +vamominw_v 10000 . . ..... ..... 110 ..... 0101111 @r_wdvm +vamomaxw_v 10100 . . ..... ..... 110 ..... 0101111 @r_wdvm +vamominuw_v 11000 . . ..... ..... 110 ..... 0101111 @r_wdvm +vamomaxuw_v 11100 . . ..... ..... 110 ..... 0101111 @r_wdvm + +# *** new major opcode OP-V *** +vadd_vv 000000 . ..... ..... 000 ..... 1010111 @r_vm +vadd_vx 000000 . ..... ..... 100 ..... 1010111 @r_vm +vadd_vi 000000 . ..... ..... 011 ..... 1010111 @r_vm +vsub_vv 000010 . ..... ..... 000 ..... 1010111 @r_vm +vsub_vx 000010 . ..... ..... 100 ..... 1010111 @r_vm +vrsub_vx 000011 . ..... ..... 100 ..... 1010111 @r_vm +vrsub_vi 000011 . ..... ..... 011 ..... 1010111 @r_vm +vwaddu_vv 110000 . ..... ..... 010 ..... 1010111 @r_vm +vwaddu_vx 110000 . ..... ..... 110 ..... 1010111 @r_vm +vwadd_vv 110001 . ..... ..... 010 ..... 1010111 @r_vm +vwadd_vx 110001 . ..... ..... 110 ..... 1010111 @r_vm +vwsubu_vv 110010 . ..... ..... 010 ..... 1010111 @r_vm +vwsubu_vx 110010 . ..... ..... 110 ..... 1010111 @r_vm +vwsub_vv 110011 . ..... ..... 010 ..... 1010111 @r_vm +vwsub_vx 110011 . ..... ..... 110 ..... 1010111 @r_vm +vwaddu_wv 110100 . ..... ..... 010 ..... 1010111 @r_vm +vwaddu_wx 110100 . ..... ..... 110 ..... 1010111 @r_vm +vwadd_wv 110101 . ..... ..... 010 ..... 1010111 @r_vm +vwadd_wx 110101 . ..... ..... 110 ..... 1010111 @r_vm +vwsubu_wv 110110 . ..... ..... 010 ..... 1010111 @r_vm +vwsubu_wx 110110 . ..... ..... 110 ..... 1010111 @r_vm +vwsub_wv 110111 . ..... ..... 010 ..... 1010111 @r_vm +vwsub_wx 110111 . ..... ..... 110 ..... 1010111 @r_vm +vadc_vvm 010000 1 ..... ..... 000 ..... 1010111 @r_vm_1 +vadc_vxm 010000 1 ..... ..... 100 ..... 1010111 @r_vm_1 +vadc_vim 010000 1 ..... ..... 011 ..... 1010111 @r_vm_1 +vmadc_vvm 010001 1 ..... ..... 000 ..... 1010111 @r_vm_1 +vmadc_vxm 010001 1 ..... ..... 100 ..... 1010111 @r_vm_1 +vmadc_vim 010001 1 ..... ..... 011 ..... 1010111 @r_vm_1 +vsbc_vvm 010010 1 ..... ..... 000 ..... 1010111 @r_vm_1 +vsbc_vxm 010010 1 ..... ..... 100 ..... 1010111 @r_vm_1 +vmsbc_vvm 010011 1 ..... ..... 000 ..... 1010111 @r_vm_1 +vmsbc_vxm 010011 1 ..... ..... 100 ..... 1010111 @r_vm_1 +vand_vv 001001 . ..... ..... 000 ..... 1010111 @r_vm +vand_vx 001001 . ..... ..... 100 ..... 1010111 @r_vm +vand_vi 001001 . ..... ..... 011 ..... 1010111 @r_vm +vor_vv 001010 . ..... ..... 000 ..... 1010111 @r_vm +vor_vx 001010 . ..... ..... 100 ..... 1010111 @r_vm +vor_vi 001010 . ..... ..... 011 ..... 1010111 @r_vm +vxor_vv 001011 . ..... ..... 000 ..... 1010111 @r_vm +vxor_vx 001011 . ..... ..... 100 ..... 1010111 @r_vm +vxor_vi 001011 . ..... ..... 011 ..... 1010111 @r_vm +vsll_vv 100101 . ..... ..... 000 ..... 1010111 @r_vm +vsll_vx 100101 . ..... ..... 100 ..... 1010111 @r_vm +vsll_vi 100101 . ..... ..... 011 ..... 1010111 @r_vm +vsrl_vv 101000 . ..... ..... 000 ..... 1010111 @r_vm +vsrl_vx 101000 . ..... ..... 100 ..... 1010111 @r_vm +vsrl_vi 101000 . ..... ..... 011 ..... 1010111 @r_vm +vsra_vv 101001 . ..... ..... 000 ..... 1010111 @r_vm +vsra_vx 101001 . ..... ..... 100 ..... 1010111 @r_vm +vsra_vi 101001 . ..... ..... 011 ..... 1010111 @r_vm +vnsrl_vv 101100 . ..... ..... 000 ..... 1010111 @r_vm +vnsrl_vx 101100 . ..... ..... 100 ..... 1010111 @r_vm +vnsrl_vi 101100 . ..... ..... 011 ..... 1010111 @r_vm +vnsra_vv 101101 . ..... ..... 000 ..... 1010111 @r_vm +vnsra_vx 101101 . ..... ..... 100 ..... 1010111 @r_vm +vnsra_vi 101101 . ..... ..... 011 ..... 1010111 @r_vm +vmseq_vv 011000 . ..... ..... 000 ..... 1010111 @r_vm +vmseq_vx 011000 . ..... ..... 100 ..... 1010111 @r_vm +vmseq_vi 011000 . ..... ..... 011 ..... 1010111 @r_vm +vmsne_vv 011001 . ..... ..... 000 ..... 1010111 @r_vm +vmsne_vx 011001 . ..... ..... 100 ..... 1010111 @r_vm +vmsne_vi 011001 . ..... ..... 011 ..... 1010111 @r_vm +vmsltu_vv 011010 . ..... ..... 000 ..... 1010111 @r_vm +vmsltu_vx 011010 . ..... ..... 100 ..... 1010111 @r_vm +vmslt_vv 011011 . ..... ..... 000 ..... 1010111 @r_vm +vmslt_vx 011011 . ..... ..... 100 ..... 1010111 @r_vm +vmsleu_vv 011100 . ..... ..... 000 ..... 1010111 @r_vm +vmsleu_vx 011100 . ..... ..... 100 ..... 1010111 @r_vm +vmsleu_vi 011100 . ..... ..... 011 ..... 1010111 @r_vm +vmsle_vv 011101 . ..... ..... 000 ..... 1010111 @r_vm +vmsle_vx 011101 . ..... ..... 100 ..... 1010111 @r_vm +vmsle_vi 011101 . ..... ..... 011 ..... 1010111 @r_vm +vmsgtu_vx 011110 . ..... ..... 100 ..... 1010111 @r_vm +vmsgtu_vi 011110 . ..... ..... 011 ..... 1010111 @r_vm +vmsgt_vx 011111 . ..... ..... 100 ..... 1010111 @r_vm +vmsgt_vi 011111 . ..... ..... 011 ..... 1010111 @r_vm +vminu_vv 000100 . ..... ..... 000 ..... 1010111 @r_vm +vminu_vx 000100 . ..... ..... 100 ..... 1010111 @r_vm +vmin_vv 000101 . ..... ..... 000 ..... 1010111 @r_vm +vmin_vx 000101 . ..... ..... 100 ..... 1010111 @r_vm +vmaxu_vv 000110 . ..... ..... 000 ..... 1010111 @r_vm +vmaxu_vx 000110 . ..... ..... 100 ..... 1010111 @r_vm +vmax_vv 000111 . ..... ..... 000 ..... 1010111 @r_vm +vmax_vx 000111 . ..... ..... 100 ..... 1010111 @r_vm +vmul_vv 100101 . ..... ..... 010 ..... 1010111 @r_vm +vmul_vx 100101 . ..... ..... 110 ..... 1010111 @r_vm +vmulh_vv 100111 . ..... ..... 010 ..... 1010111 @r_vm +vmulh_vx 100111 . ..... ..... 110 ..... 1010111 @r_vm +vmulhu_vv 100100 . ..... ..... 010 ..... 1010111 @r_vm +vmulhu_vx 100100 . ..... ..... 110 ..... 1010111 @r_vm +vmulhsu_vv 100110 . ..... ..... 010 ..... 1010111 @r_vm +vmulhsu_vx 100110 . ..... ..... 110 ..... 1010111 @r_vm +vdivu_vv 100000 . ..... ..... 010 ..... 1010111 @r_vm +vdivu_vx 100000 . ..... ..... 110 ..... 1010111 @r_vm +vdiv_vv 100001 . ..... ..... 010 ..... 1010111 @r_vm +vdiv_vx 100001 . ..... ..... 110 ..... 1010111 @r_vm +vremu_vv 100010 . ..... ..... 010 ..... 1010111 @r_vm +vremu_vx 100010 . ..... ..... 110 ..... 1010111 @r_vm +vrem_vv 100011 . ..... ..... 010 ..... 1010111 @r_vm +vrem_vx 100011 . ..... ..... 110 ..... 1010111 @r_vm +vwmulu_vv 111000 . ..... ..... 010 ..... 1010111 @r_vm +vwmulu_vx 111000 . ..... ..... 110 ..... 1010111 @r_vm +vwmulsu_vv 111010 . ..... ..... 010 ..... 1010111 @r_vm +vwmulsu_vx 111010 . ..... ..... 110 ..... 1010111 @r_vm +vwmul_vv 111011 . ..... ..... 010 ..... 1010111 @r_vm +vwmul_vx 111011 . ..... ..... 110 ..... 1010111 @r_vm +vmacc_vv 101101 . ..... ..... 010 ..... 1010111 @r_vm +vmacc_vx 101101 . ..... ..... 110 ..... 1010111 @r_vm +vnmsac_vv 101111 . ..... ..... 010 ..... 1010111 @r_vm +vnmsac_vx 101111 . ..... ..... 110 ..... 1010111 @r_vm +vmadd_vv 101001 . ..... ..... 010 ..... 1010111 @r_vm +vmadd_vx 101001 . ..... ..... 110 ..... 1010111 @r_vm +vnmsub_vv 101011 . ..... ..... 010 ..... 1010111 @r_vm +vnmsub_vx 101011 . ..... ..... 110 ..... 1010111 @r_vm +vwmaccu_vv 111100 . ..... ..... 010 ..... 1010111 @r_vm +vwmaccu_vx 111100 . ..... ..... 110 ..... 1010111 @r_vm +vwmacc_vv 111101 . ..... ..... 010 ..... 1010111 @r_vm +vwmacc_vx 111101 . ..... ..... 110 ..... 1010111 @r_vm +vwmaccsu_vv 111110 . ..... ..... 010 ..... 1010111 @r_vm +vwmaccsu_vx 111110 . ..... ..... 110 ..... 1010111 @r_vm +vwmaccus_vx 111111 . ..... ..... 110 ..... 1010111 @r_vm +vmv_v_v 010111 1 00000 ..... 000 ..... 1010111 @r2 +vmv_v_x 010111 1 00000 ..... 100 ..... 1010111 @r2 +vmv_v_i 010111 1 00000 ..... 011 ..... 1010111 @r2 +vmerge_vvm 010111 0 ..... ..... 000 ..... 1010111 @r_vm_0 +vmerge_vxm 010111 0 ..... ..... 100 ..... 1010111 @r_vm_0 +vmerge_vim 010111 0 ..... ..... 011 ..... 1010111 @r_vm_0 +vsaddu_vv 100000 . ..... ..... 000 ..... 1010111 @r_vm +vsaddu_vx 100000 . ..... ..... 100 ..... 1010111 @r_vm +vsaddu_vi 100000 . ..... ..... 011 ..... 1010111 @r_vm +vsadd_vv 100001 . ..... ..... 000 ..... 1010111 @r_vm +vsadd_vx 100001 . ..... ..... 100 ..... 1010111 @r_vm +vsadd_vi 100001 . ..... ..... 011 ..... 1010111 @r_vm +vssubu_vv 100010 . ..... ..... 000 ..... 1010111 @r_vm +vssubu_vx 100010 . ..... ..... 100 ..... 1010111 @r_vm +vssub_vv 100011 . ..... ..... 000 ..... 1010111 @r_vm +vssub_vx 100011 . ..... ..... 100 ..... 1010111 @r_vm +vaadd_vv 100100 . ..... ..... 000 ..... 1010111 @r_vm +vaadd_vx 100100 . ..... ..... 100 ..... 1010111 @r_vm +vaadd_vi 100100 . ..... ..... 011 ..... 1010111 @r_vm +vasub_vv 100110 . ..... ..... 000 ..... 1010111 @r_vm +vasub_vx 100110 . ..... ..... 100 ..... 1010111 @r_vm +vsmul_vv 100111 . ..... ..... 000 ..... 1010111 @r_vm +vsmul_vx 100111 . ..... ..... 100 ..... 1010111 @r_vm +vwsmaccu_vv 111100 . ..... ..... 000 ..... 1010111 @r_vm +vwsmaccu_vx 111100 . ..... ..... 100 ..... 1010111 @r_vm +vwsmacc_vv 111101 . ..... ..... 000 ..... 1010111 @r_vm +vwsmacc_vx 111101 . ..... ..... 100 ..... 1010111 @r_vm +vwsmaccsu_vv 111110 . ..... ..... 000 ..... 1010111 @r_vm +vwsmaccsu_vx 111110 . ..... ..... 100 ..... 1010111 @r_vm +vwsmaccus_vx 111111 . ..... ..... 100 ..... 1010111 @r_vm +vssrl_vv 101010 . ..... ..... 000 ..... 1010111 @r_vm +vssrl_vx 101010 . ..... ..... 100 ..... 1010111 @r_vm +vssrl_vi 101010 . ..... ..... 011 ..... 1010111 @r_vm +vssra_vv 101011 . ..... ..... 000 ..... 1010111 @r_vm +vssra_vx 101011 . ..... ..... 100 ..... 1010111 @r_vm +vssra_vi 101011 . ..... ..... 011 ..... 1010111 @r_vm +vnclipu_vv 101110 . ..... ..... 000 ..... 1010111 @r_vm +vnclipu_vx 101110 . ..... ..... 100 ..... 1010111 @r_vm +vnclipu_vi 101110 . ..... ..... 011 ..... 1010111 @r_vm +vnclip_vv 101111 . ..... ..... 000 ..... 1010111 @r_vm +vnclip_vx 101111 . ..... ..... 100 ..... 1010111 @r_vm +vnclip_vi 101111 . ..... ..... 011 ..... 1010111 @r_vm +vfadd_vv 000000 . ..... ..... 001 ..... 1010111 @r_vm +vfadd_vf 000000 . ..... ..... 101 ..... 1010111 @r_vm +vfsub_vv 000010 . ..... ..... 001 ..... 1010111 @r_vm +vfsub_vf 000010 . ..... ..... 101 ..... 1010111 @r_vm +vfrsub_vf 100111 . ..... ..... 101 ..... 1010111 @r_vm +vfwadd_vv 110000 . ..... ..... 001 ..... 1010111 @r_vm +vfwadd_vf 110000 . ..... ..... 101 ..... 1010111 @r_vm +vfwadd_wv 110100 . ..... ..... 001 ..... 1010111 @r_vm +vfwadd_wf 110100 . ..... ..... 101 ..... 1010111 @r_vm +vfwsub_vv 110010 . ..... ..... 001 ..... 1010111 @r_vm +vfwsub_vf 110010 . ..... ..... 101 ..... 1010111 @r_vm +vfwsub_wv 110110 . ..... ..... 001 ..... 1010111 @r_vm +vfwsub_wf 110110 . ..... ..... 101 ..... 1010111 @r_vm +vfmul_vv 100100 . ..... ..... 001 ..... 1010111 @r_vm +vfmul_vf 100100 . ..... ..... 101 ..... 1010111 @r_vm +vfdiv_vv 100000 . ..... ..... 001 ..... 1010111 @r_vm +vfdiv_vf 100000 . ..... ..... 101 ..... 1010111 @r_vm +vfrdiv_vf 100001 . ..... ..... 101 ..... 1010111 @r_vm +vfwmul_vv 111000 . ..... ..... 001 ..... 1010111 @r_vm +vfwmul_vf 111000 . ..... ..... 101 ..... 1010111 @r_vm +vfmacc_vv 101100 . ..... ..... 001 ..... 1010111 @r_vm +vfnmacc_vv 101101 . ..... ..... 001 ..... 1010111 @r_vm +vfnmacc_vf 101101 . ..... ..... 101 ..... 1010111 @r_vm +vfmacc_vf 101100 . ..... ..... 101 ..... 1010111 @r_vm +vfmsac_vv 101110 . ..... ..... 001 ..... 1010111 @r_vm +vfmsac_vf 101110 . ..... ..... 101 ..... 1010111 @r_vm +vfnmsac_vv 101111 . ..... ..... 001 ..... 1010111 @r_vm +vfnmsac_vf 101111 . ..... ..... 101 ..... 1010111 @r_vm +vfmadd_vv 101000 . ..... ..... 001 ..... 1010111 @r_vm +vfmadd_vf 101000 . ..... ..... 101 ..... 1010111 @r_vm +vfnmadd_vv 101001 . ..... ..... 001 ..... 1010111 @r_vm +vfnmadd_vf 101001 . ..... ..... 101 ..... 1010111 @r_vm +vfmsub_vv 101010 . ..... ..... 001 ..... 1010111 @r_vm +vfmsub_vf 101010 . ..... ..... 101 ..... 1010111 @r_vm +vfnmsub_vv 101011 . ..... ..... 001 ..... 1010111 @r_vm +vfnmsub_vf 101011 . ..... ..... 101 ..... 1010111 @r_vm +vfwmacc_vv 111100 . ..... ..... 001 ..... 1010111 @r_vm +vfwmacc_vf 111100 . ..... ..... 101 ..... 1010111 @r_vm +vfwnmacc_vv 111101 . ..... ..... 001 ..... 1010111 @r_vm +vfwnmacc_vf 111101 . ..... ..... 101 ..... 1010111 @r_vm +vfwmsac_vv 111110 . ..... ..... 001 ..... 1010111 @r_vm +vfwmsac_vf 111110 . ..... ..... 101 ..... 1010111 @r_vm +vfwnmsac_vv 111111 . ..... ..... 001 ..... 1010111 @r_vm +vfwnmsac_vf 111111 . ..... ..... 101 ..... 1010111 @r_vm +vfsqrt_v 100011 . ..... 00000 001 ..... 1010111 @r2_vm +vfmin_vv 000100 . ..... ..... 001 ..... 1010111 @r_vm +vfmin_vf 000100 . ..... ..... 101 ..... 1010111 @r_vm +vfmax_vv 000110 . ..... ..... 001 ..... 1010111 @r_vm +vfmax_vf 000110 . ..... ..... 101 ..... 1010111 @r_vm +vfsgnj_vv 001000 . ..... ..... 001 ..... 1010111 @r_vm +vfsgnj_vf 001000 . ..... ..... 101 ..... 1010111 @r_vm +vfsgnjn_vv 001001 . ..... ..... 001 ..... 1010111 @r_vm +vfsgnjn_vf 001001 . ..... ..... 101 ..... 1010111 @r_vm +vfsgnjx_vv 001010 . ..... ..... 001 ..... 1010111 @r_vm +vfsgnjx_vf 001010 . ..... ..... 101 ..... 1010111 @r_vm +vmfeq_vv 011000 . ..... ..... 001 ..... 1010111 @r_vm +vmfeq_vf 011000 . ..... ..... 101 ..... 1010111 @r_vm +vmfne_vv 011100 . ..... ..... 001 ..... 1010111 @r_vm +vmfne_vf 011100 . ..... ..... 101 ..... 1010111 @r_vm +vmflt_vv 011011 . ..... ..... 001 ..... 1010111 @r_vm +vmflt_vf 011011 . ..... ..... 101 ..... 1010111 @r_vm +vmfle_vv 011001 . ..... ..... 001 ..... 1010111 @r_vm +vmfle_vf 011001 . ..... ..... 101 ..... 1010111 @r_vm +vmfgt_vf 011101 . ..... ..... 101 ..... 1010111 @r_vm +vmfge_vf 011111 . ..... ..... 101 ..... 1010111 @r_vm +vmford_vv 011010 . ..... ..... 001 ..... 1010111 @r_vm +vmford_vf 011010 . ..... ..... 101 ..... 1010111 @r_vm +vfclass_v 100011 . ..... 10000 001 ..... 1010111 @r2_vm +vfmerge_vfm 010111 0 ..... ..... 101 ..... 1010111 @r_vm_0 +vfmv_v_f 010111 1 00000 ..... 101 ..... 1010111 @r2 +vfcvt_xu_f_v 100010 . ..... 00000 001 ..... 1010111 @r2_vm +vfcvt_x_f_v 100010 . ..... 00001 001 ..... 1010111 @r2_vm +vfcvt_f_xu_v 100010 . ..... 00010 001 ..... 1010111 @r2_vm +vfcvt_f_x_v 100010 . ..... 00011 001 ..... 1010111 @r2_vm +vfwcvt_xu_f_v 100010 . ..... 01000 001 ..... 1010111 @r2_vm +vfwcvt_x_f_v 100010 . ..... 01001 001 ..... 1010111 @r2_vm +vfwcvt_f_xu_v 100010 . ..... 01010 001 ..... 1010111 @r2_vm +vfwcvt_f_x_v 100010 . ..... 01011 001 ..... 1010111 @r2_vm +vfwcvt_f_f_v 100010 . ..... 01100 001 ..... 1010111 @r2_vm +vfncvt_xu_f_v 100010 . ..... 10000 001 ..... 1010111 @r2_vm +vfncvt_x_f_v 100010 . ..... 10001 001 ..... 1010111 @r2_vm +vfncvt_f_xu_v 100010 . ..... 10010 001 ..... 1010111 @r2_vm +vfncvt_f_x_v 100010 . ..... 10011 001 ..... 1010111 @r2_vm +vfncvt_f_f_v 100010 . ..... 10100 001 ..... 1010111 @r2_vm +vredsum_vs 000000 . ..... ..... 010 ..... 1010111 @r_vm +vredand_vs 000001 . ..... ..... 010 ..... 1010111 @r_vm +vredor_vs 000010 . ..... ..... 010 ..... 1010111 @r_vm +vredxor_vs 000011 . ..... ..... 010 ..... 1010111 @r_vm +vredminu_vs 000100 . ..... ..... 010 ..... 1010111 @r_vm +vredmin_vs 000101 . ..... ..... 010 ..... 1010111 @r_vm +vredmaxu_vs 000110 . ..... ..... 010 ..... 1010111 @r_vm +vredmax_vs 000111 . ..... ..... 010 ..... 1010111 @r_vm +vwredsumu_vs 110000 . ..... ..... 000 ..... 1010111 @r_vm +vwredsum_vs 110001 . ..... ..... 000 ..... 1010111 @r_vm +# Vector ordered and unordered reduction sum +vfredsum_vs 0000-1 . ..... ..... 001 ..... 1010111 @r_vm +vfredmin_vs 000101 . ..... ..... 001 ..... 1010111 @r_vm +vfredmax_vs 000111 . ..... ..... 001 ..... 1010111 @r_vm +# Vector widening ordered and unordered float reduction sum +vfwredsum_vs 1100-1 . ..... ..... 001 ..... 1010111 @r_vm +vmand_mm 011001 - ..... ..... 010 ..... 1010111 @r +vmnand_mm 011101 - ..... ..... 010 ..... 1010111 @r +vmandnot_mm 011000 - ..... ..... 010 ..... 1010111 @r +vmxor_mm 011011 - ..... ..... 010 ..... 1010111 @r +vmor_mm 011010 - ..... ..... 010 ..... 1010111 @r +vmnor_mm 011110 - ..... ..... 010 ..... 1010111 @r +vmornot_mm 011100 - ..... ..... 010 ..... 1010111 @r +vmxnor_mm 011111 - ..... ..... 010 ..... 1010111 @r +vmpopc_m 010100 . ..... ----- 010 ..... 1010111 @r2_vm +vmfirst_m 010101 . ..... ----- 010 ..... 1010111 @r2_vm +vmsbf_m 010110 . ..... 00001 010 ..... 1010111 @r2_vm +vmsif_m 010110 . ..... 00011 010 ..... 1010111 @r2_vm +vmsof_m 010110 . ..... 00010 010 ..... 1010111 @r2_vm +viota_m 010110 . ..... 10000 010 ..... 1010111 @r2_vm +vid_v 010110 . 00000 10001 010 ..... 1010111 @r1_vm +vext_x_v 001100 1 ..... ..... 010 ..... 1010111 @r +vmv_s_x 001101 1 00000 ..... 110 ..... 1010111 @r2 +vfmv_f_s 001100 1 ..... 00000 001 ..... 1010111 @r2rd +vfmv_s_f 001101 1 00000 ..... 101 ..... 1010111 @r2 +vslideup_vx 001110 . ..... ..... 100 ..... 1010111 @r_vm +vslideup_vi 001110 . ..... ..... 011 ..... 1010111 @r_vm +vslide1up_vx 001110 . ..... ..... 110 ..... 1010111 @r_vm +vslidedown_vx 001111 . ..... ..... 100 ..... 1010111 @r_vm +vslidedown_vi 001111 . ..... ..... 011 ..... 1010111 @r_vm +vslide1down_vx 001111 . ..... ..... 110 ..... 1010111 @r_vm +vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm +vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm +vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm +vcompress_vm 010111 - ..... ..... 010 ..... 1010111 @r + +vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm +vsetvl 1000000 ..... ..... 111 ..... 1010111 @r diff --git a/target/riscv/insn_trans/trans_rvv.inc.c b/target/riscv/insn_trans/trans_rvv.inc.c new file mode 100644 index 0000000..dc333e6 --- /dev/null +++ b/target/riscv/insn_trans/trans_rvv.inc.c @@ -0,0 +1,2888 @@ +/* + * RISC-V translation routines for the RVV Standard Extension. + * + * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include "tcg/tcg-op-gvec.h" +#include "tcg/tcg-gvec-desc.h" +#include "internals.h" + +static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a) +{ + TCGv s1, s2, dst; + + if (!has_ext(ctx, RVV)) { + return false; + } + + s2 = tcg_temp_new(); + dst = tcg_temp_new(); + + /* Using x0 as the rs1 register specifier, encodes an infinite AVL */ + if (a->rs1 == 0) { + /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */ + s1 = tcg_const_tl(RV_VLEN_MAX); + } else { + s1 = tcg_temp_new(); + gen_get_gpr(s1, a->rs1); + } + gen_get_gpr(s2, a->rs2); + gen_helper_vsetvl(dst, cpu_env, s1, s2); + gen_set_gpr(a->rd, dst); + tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); + lookup_and_goto_ptr(ctx); + ctx->base.is_jmp = DISAS_NORETURN; + + tcg_temp_free(s1); + tcg_temp_free(s2); + tcg_temp_free(dst); + return true; +} + +static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a) +{ + TCGv s1, s2, dst; + + if (!has_ext(ctx, RVV)) { + return false; + } + + s2 = tcg_const_tl(a->zimm); + dst = tcg_temp_new(); + + /* Using x0 as the rs1 register specifier, encodes an infinite AVL */ + if (a->rs1 == 0) { + /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */ + s1 = tcg_const_tl(RV_VLEN_MAX); + } else { + s1 = tcg_temp_new(); + gen_get_gpr(s1, a->rs1); + } + gen_helper_vsetvl(dst, cpu_env, s1, s2); + gen_set_gpr(a->rd, dst); + gen_goto_tb(ctx, 0, ctx->pc_succ_insn); + ctx->base.is_jmp = DISAS_NORETURN; + + tcg_temp_free(s1); + tcg_temp_free(s2); + tcg_temp_free(dst); + return true; +} + +/* vector register offset from env */ +static uint32_t vreg_ofs(DisasContext *s, int reg) +{ + return offsetof(CPURISCVState, vreg) + reg * s->vlen / 8; +} + +/* check functions */ + +/* + * In cpu_get_tb_cpu_state(), set VILL if RVV was not present. + * So RVV is also be checked in this function. + */ +static bool vext_check_isa_ill(DisasContext *s) +{ + return !s->vill; +} + +/* + * There are two rules check here. + * + * 1. Vector register numbers are multiples of LMUL. (Section 3.2) + * + * 2. For all widening instructions, the destination LMUL value must also be + * a supported LMUL value. (Section 11.2) + */ +static bool vext_check_reg(DisasContext *s, uint32_t reg, bool widen) +{ + /* + * The destination vector register group results are arranged as if both + * SEW and LMUL were at twice their current settings. (Section 11.2). + */ + int legal = widen ? 2 << s->lmul : 1 << s->lmul; + + return !((s->lmul == 0x3 && widen) || (reg % legal)); +} + +/* + * There are two rules check here. + * + * 1. The destination vector register group for a masked vector instruction can + * only overlap the source mask register (v0) when LMUL=1. (Section 5.3) + * + * 2. In widen instructions and some other insturctions, like vslideup.vx, + * there is no need to check whether LMUL=1. + */ +static bool vext_check_overlap_mask(DisasContext *s, uint32_t vd, bool vm, + bool force) +{ + return (vm != 0 || vd != 0) || (!force && (s->lmul == 0)); +} + +/* The LMUL setting must be such that LMUL * NFIELDS <= 8. (Section 7.8) */ +static bool vext_check_nf(DisasContext *s, uint32_t nf) +{ + return (1 << s->lmul) * nf <= 8; +} + +/* + * The destination vector register group cannot overlap a source vector register + * group of a different element width. (Section 11.2) + */ +static inline bool vext_check_overlap_group(int rd, int dlen, int rs, int slen) +{ + return ((rd >= rs + slen) || (rs >= rd + dlen)); +} +/* common translation macro */ +#define GEN_VEXT_TRANS(NAME, SEQ, ARGTYPE, OP, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE *a)\ +{ \ + if (CHECK(s, a)) { \ + return OP(s, a, SEQ); \ + } \ + return false; \ +} + +/* + *** unit stride load and store + */ +typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv, + TCGv_env, TCGv_i32); + +static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, + gen_helper_ldst_us *fn, DisasContext *s) +{ + TCGv_ptr dest, mask; + TCGv base; + TCGv_i32 desc; + + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + dest = tcg_temp_new_ptr(); + mask = tcg_temp_new_ptr(); + base = tcg_temp_new(); + + /* + * As simd_desc supports at most 256 bytes, and in this implementation, + * the max vector group length is 2048 bytes. So split it into two parts. + * + * The first part is vlen in bytes, encoded in maxsz of simd_desc. + * The second part is lmul, encoded in data of simd_desc. + */ + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + + gen_get_gpr(base, rs1); + tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); + tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); + + fn(dest, mask, base, cpu_env, desc); + + tcg_temp_free_ptr(dest); + tcg_temp_free_ptr(mask); + tcg_temp_free(base); + tcg_temp_free_i32(desc); + gen_set_label(over); + return true; +} + +static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq) +{ + uint32_t data = 0; + gen_helper_ldst_us *fn; + static gen_helper_ldst_us * const fns[2][7][4] = { + /* masked unit stride load */ + { { gen_helper_vlb_v_b_mask, gen_helper_vlb_v_h_mask, + gen_helper_vlb_v_w_mask, gen_helper_vlb_v_d_mask }, + { NULL, gen_helper_vlh_v_h_mask, + gen_helper_vlh_v_w_mask, gen_helper_vlh_v_d_mask }, + { NULL, NULL, + gen_helper_vlw_v_w_mask, gen_helper_vlw_v_d_mask }, + { gen_helper_vle_v_b_mask, gen_helper_vle_v_h_mask, + gen_helper_vle_v_w_mask, gen_helper_vle_v_d_mask }, + { gen_helper_vlbu_v_b_mask, gen_helper_vlbu_v_h_mask, + gen_helper_vlbu_v_w_mask, gen_helper_vlbu_v_d_mask }, + { NULL, gen_helper_vlhu_v_h_mask, + gen_helper_vlhu_v_w_mask, gen_helper_vlhu_v_d_mask }, + { NULL, NULL, + gen_helper_vlwu_v_w_mask, gen_helper_vlwu_v_d_mask } }, + /* unmasked unit stride load */ + { { gen_helper_vlb_v_b, gen_helper_vlb_v_h, + gen_helper_vlb_v_w, gen_helper_vlb_v_d }, + { NULL, gen_helper_vlh_v_h, + gen_helper_vlh_v_w, gen_helper_vlh_v_d }, + { NULL, NULL, + gen_helper_vlw_v_w, gen_helper_vlw_v_d }, + { gen_helper_vle_v_b, gen_helper_vle_v_h, + gen_helper_vle_v_w, gen_helper_vle_v_d }, + { gen_helper_vlbu_v_b, gen_helper_vlbu_v_h, + gen_helper_vlbu_v_w, gen_helper_vlbu_v_d }, + { NULL, gen_helper_vlhu_v_h, + gen_helper_vlhu_v_w, gen_helper_vlhu_v_d }, + { NULL, NULL, + gen_helper_vlwu_v_w, gen_helper_vlwu_v_d } } + }; + + fn = fns[a->vm][seq][s->sew]; + if (fn == NULL) { + return false; + } + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, NF, a->nf); + return ldst_us_trans(a->rd, a->rs1, data, fn, s); +} + +static bool ld_us_check(DisasContext *s, arg_r2nfvm* a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, false) && + vext_check_reg(s, a->rd, false) && + vext_check_nf(s, a->nf)); +} + +GEN_VEXT_TRANS(vlb_v, 0, r2nfvm, ld_us_op, ld_us_check) +GEN_VEXT_TRANS(vlh_v, 1, r2nfvm, ld_us_op, ld_us_check) +GEN_VEXT_TRANS(vlw_v, 2, r2nfvm, ld_us_op, ld_us_check) +GEN_VEXT_TRANS(vle_v, 3, r2nfvm, ld_us_op, ld_us_check) +GEN_VEXT_TRANS(vlbu_v, 4, r2nfvm, ld_us_op, ld_us_check) +GEN_VEXT_TRANS(vlhu_v, 5, r2nfvm, ld_us_op, ld_us_check) +GEN_VEXT_TRANS(vlwu_v, 6, r2nfvm, ld_us_op, ld_us_check) + +static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq) +{ + uint32_t data = 0; + gen_helper_ldst_us *fn; + static gen_helper_ldst_us * const fns[2][4][4] = { + /* masked unit stride load and store */ + { { gen_helper_vsb_v_b_mask, gen_helper_vsb_v_h_mask, + gen_helper_vsb_v_w_mask, gen_helper_vsb_v_d_mask }, + { NULL, gen_helper_vsh_v_h_mask, + gen_helper_vsh_v_w_mask, gen_helper_vsh_v_d_mask }, + { NULL, NULL, + gen_helper_vsw_v_w_mask, gen_helper_vsw_v_d_mask }, + { gen_helper_vse_v_b_mask, gen_helper_vse_v_h_mask, + gen_helper_vse_v_w_mask, gen_helper_vse_v_d_mask } }, + /* unmasked unit stride store */ + { { gen_helper_vsb_v_b, gen_helper_vsb_v_h, + gen_helper_vsb_v_w, gen_helper_vsb_v_d }, + { NULL, gen_helper_vsh_v_h, + gen_helper_vsh_v_w, gen_helper_vsh_v_d }, + { NULL, NULL, + gen_helper_vsw_v_w, gen_helper_vsw_v_d }, + { gen_helper_vse_v_b, gen_helper_vse_v_h, + gen_helper_vse_v_w, gen_helper_vse_v_d } } + }; + + fn = fns[a->vm][seq][s->sew]; + if (fn == NULL) { + return false; + } + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, NF, a->nf); + return ldst_us_trans(a->rd, a->rs1, data, fn, s); +} + +static bool st_us_check(DisasContext *s, arg_r2nfvm* a) +{ + return (vext_check_isa_ill(s) && + vext_check_reg(s, a->rd, false) && + vext_check_nf(s, a->nf)); +} + +GEN_VEXT_TRANS(vsb_v, 0, r2nfvm, st_us_op, st_us_check) +GEN_VEXT_TRANS(vsh_v, 1, r2nfvm, st_us_op, st_us_check) +GEN_VEXT_TRANS(vsw_v, 2, r2nfvm, st_us_op, st_us_check) +GEN_VEXT_TRANS(vse_v, 3, r2nfvm, st_us_op, st_us_check) + +/* + *** stride load and store + */ +typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv, + TCGv, TCGv_env, TCGv_i32); + +static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, + uint32_t data, gen_helper_ldst_stride *fn, + DisasContext *s) +{ + TCGv_ptr dest, mask; + TCGv base, stride; + TCGv_i32 desc; + + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + dest = tcg_temp_new_ptr(); + mask = tcg_temp_new_ptr(); + base = tcg_temp_new(); + stride = tcg_temp_new(); + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + + gen_get_gpr(base, rs1); + gen_get_gpr(stride, rs2); + tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); + tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); + + fn(dest, mask, base, stride, cpu_env, desc); + + tcg_temp_free_ptr(dest); + tcg_temp_free_ptr(mask); + tcg_temp_free(base); + tcg_temp_free(stride); + tcg_temp_free_i32(desc); + gen_set_label(over); + return true; +} + +static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq) +{ + uint32_t data = 0; + gen_helper_ldst_stride *fn; + static gen_helper_ldst_stride * const fns[7][4] = { + { gen_helper_vlsb_v_b, gen_helper_vlsb_v_h, + gen_helper_vlsb_v_w, gen_helper_vlsb_v_d }, + { NULL, gen_helper_vlsh_v_h, + gen_helper_vlsh_v_w, gen_helper_vlsh_v_d }, + { NULL, NULL, + gen_helper_vlsw_v_w, gen_helper_vlsw_v_d }, + { gen_helper_vlse_v_b, gen_helper_vlse_v_h, + gen_helper_vlse_v_w, gen_helper_vlse_v_d }, + { gen_helper_vlsbu_v_b, gen_helper_vlsbu_v_h, + gen_helper_vlsbu_v_w, gen_helper_vlsbu_v_d }, + { NULL, gen_helper_vlshu_v_h, + gen_helper_vlshu_v_w, gen_helper_vlshu_v_d }, + { NULL, NULL, + gen_helper_vlswu_v_w, gen_helper_vlswu_v_d }, + }; + + fn = fns[seq][s->sew]; + if (fn == NULL) { + return false; + } + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, NF, a->nf); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); +} + +static bool ld_stride_check(DisasContext *s, arg_rnfvm* a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, false) && + vext_check_reg(s, a->rd, false) && + vext_check_nf(s, a->nf)); +} + +GEN_VEXT_TRANS(vlsb_v, 0, rnfvm, ld_stride_op, ld_stride_check) +GEN_VEXT_TRANS(vlsh_v, 1, rnfvm, ld_stride_op, ld_stride_check) +GEN_VEXT_TRANS(vlsw_v, 2, rnfvm, ld_stride_op, ld_stride_check) +GEN_VEXT_TRANS(vlse_v, 3, rnfvm, ld_stride_op, ld_stride_check) +GEN_VEXT_TRANS(vlsbu_v, 4, rnfvm, ld_stride_op, ld_stride_check) +GEN_VEXT_TRANS(vlshu_v, 5, rnfvm, ld_stride_op, ld_stride_check) +GEN_VEXT_TRANS(vlswu_v, 6, rnfvm, ld_stride_op, ld_stride_check) + +static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq) +{ + uint32_t data = 0; + gen_helper_ldst_stride *fn; + static gen_helper_ldst_stride * const fns[4][4] = { + /* masked stride store */ + { gen_helper_vssb_v_b, gen_helper_vssb_v_h, + gen_helper_vssb_v_w, gen_helper_vssb_v_d }, + { NULL, gen_helper_vssh_v_h, + gen_helper_vssh_v_w, gen_helper_vssh_v_d }, + { NULL, NULL, + gen_helper_vssw_v_w, gen_helper_vssw_v_d }, + { gen_helper_vsse_v_b, gen_helper_vsse_v_h, + gen_helper_vsse_v_w, gen_helper_vsse_v_d } + }; + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, NF, a->nf); + fn = fns[seq][s->sew]; + if (fn == NULL) { + return false; + } + + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); +} + +static bool st_stride_check(DisasContext *s, arg_rnfvm* a) +{ + return (vext_check_isa_ill(s) && + vext_check_reg(s, a->rd, false) && + vext_check_nf(s, a->nf)); +} + +GEN_VEXT_TRANS(vssb_v, 0, rnfvm, st_stride_op, st_stride_check) +GEN_VEXT_TRANS(vssh_v, 1, rnfvm, st_stride_op, st_stride_check) +GEN_VEXT_TRANS(vssw_v, 2, rnfvm, st_stride_op, st_stride_check) +GEN_VEXT_TRANS(vsse_v, 3, rnfvm, st_stride_op, st_stride_check) + +/* + *** index load and store + */ +typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv, + TCGv_ptr, TCGv_env, TCGv_i32); + +static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, + uint32_t data, gen_helper_ldst_index *fn, + DisasContext *s) +{ + TCGv_ptr dest, mask, index; + TCGv base; + TCGv_i32 desc; + + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + dest = tcg_temp_new_ptr(); + mask = tcg_temp_new_ptr(); + index = tcg_temp_new_ptr(); + base = tcg_temp_new(); + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + + gen_get_gpr(base, rs1); + tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); + tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2)); + tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); + + fn(dest, mask, base, index, cpu_env, desc); + + tcg_temp_free_ptr(dest); + tcg_temp_free_ptr(mask); + tcg_temp_free_ptr(index); + tcg_temp_free(base); + tcg_temp_free_i32(desc); + gen_set_label(over); + return true; +} + +static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq) +{ + uint32_t data = 0; + gen_helper_ldst_index *fn; + static gen_helper_ldst_index * const fns[7][4] = { + { gen_helper_vlxb_v_b, gen_helper_vlxb_v_h, + gen_helper_vlxb_v_w, gen_helper_vlxb_v_d }, + { NULL, gen_helper_vlxh_v_h, + gen_helper_vlxh_v_w, gen_helper_vlxh_v_d }, + { NULL, NULL, + gen_helper_vlxw_v_w, gen_helper_vlxw_v_d }, + { gen_helper_vlxe_v_b, gen_helper_vlxe_v_h, + gen_helper_vlxe_v_w, gen_helper_vlxe_v_d }, + { gen_helper_vlxbu_v_b, gen_helper_vlxbu_v_h, + gen_helper_vlxbu_v_w, gen_helper_vlxbu_v_d }, + { NULL, gen_helper_vlxhu_v_h, + gen_helper_vlxhu_v_w, gen_helper_vlxhu_v_d }, + { NULL, NULL, + gen_helper_vlxwu_v_w, gen_helper_vlxwu_v_d }, + }; + + fn = fns[seq][s->sew]; + if (fn == NULL) { + return false; + } + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, NF, a->nf); + return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s); +} + +static bool ld_index_check(DisasContext *s, arg_rnfvm* a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, false) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false) && + vext_check_nf(s, a->nf)); +} + +GEN_VEXT_TRANS(vlxb_v, 0, rnfvm, ld_index_op, ld_index_check) +GEN_VEXT_TRANS(vlxh_v, 1, rnfvm, ld_index_op, ld_index_check) +GEN_VEXT_TRANS(vlxw_v, 2, rnfvm, ld_index_op, ld_index_check) +GEN_VEXT_TRANS(vlxe_v, 3, rnfvm, ld_index_op, ld_index_check) +GEN_VEXT_TRANS(vlxbu_v, 4, rnfvm, ld_index_op, ld_index_check) +GEN_VEXT_TRANS(vlxhu_v, 5, rnfvm, ld_index_op, ld_index_check) +GEN_VEXT_TRANS(vlxwu_v, 6, rnfvm, ld_index_op, ld_index_check) + +static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq) +{ + uint32_t data = 0; + gen_helper_ldst_index *fn; + static gen_helper_ldst_index * const fns[4][4] = { + { gen_helper_vsxb_v_b, gen_helper_vsxb_v_h, + gen_helper_vsxb_v_w, gen_helper_vsxb_v_d }, + { NULL, gen_helper_vsxh_v_h, + gen_helper_vsxh_v_w, gen_helper_vsxh_v_d }, + { NULL, NULL, + gen_helper_vsxw_v_w, gen_helper_vsxw_v_d }, + { gen_helper_vsxe_v_b, gen_helper_vsxe_v_h, + gen_helper_vsxe_v_w, gen_helper_vsxe_v_d } + }; + + fn = fns[seq][s->sew]; + if (fn == NULL) { + return false; + } + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, NF, a->nf); + return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s); +} + +static bool st_index_check(DisasContext *s, arg_rnfvm* a) +{ + return (vext_check_isa_ill(s) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false) && + vext_check_nf(s, a->nf)); +} + +GEN_VEXT_TRANS(vsxb_v, 0, rnfvm, st_index_op, st_index_check) +GEN_VEXT_TRANS(vsxh_v, 1, rnfvm, st_index_op, st_index_check) +GEN_VEXT_TRANS(vsxw_v, 2, rnfvm, st_index_op, st_index_check) +GEN_VEXT_TRANS(vsxe_v, 3, rnfvm, st_index_op, st_index_check) + +/* + *** unit stride fault-only-first load + */ +static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data, + gen_helper_ldst_us *fn, DisasContext *s) +{ + TCGv_ptr dest, mask; + TCGv base; + TCGv_i32 desc; + + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + dest = tcg_temp_new_ptr(); + mask = tcg_temp_new_ptr(); + base = tcg_temp_new(); + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + + gen_get_gpr(base, rs1); + tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); + tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); + + fn(dest, mask, base, cpu_env, desc); + + tcg_temp_free_ptr(dest); + tcg_temp_free_ptr(mask); + tcg_temp_free(base); + tcg_temp_free_i32(desc); + gen_set_label(over); + return true; +} + +static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq) +{ + uint32_t data = 0; + gen_helper_ldst_us *fn; + static gen_helper_ldst_us * const fns[7][4] = { + { gen_helper_vlbff_v_b, gen_helper_vlbff_v_h, + gen_helper_vlbff_v_w, gen_helper_vlbff_v_d }, + { NULL, gen_helper_vlhff_v_h, + gen_helper_vlhff_v_w, gen_helper_vlhff_v_d }, + { NULL, NULL, + gen_helper_vlwff_v_w, gen_helper_vlwff_v_d }, + { gen_helper_vleff_v_b, gen_helper_vleff_v_h, + gen_helper_vleff_v_w, gen_helper_vleff_v_d }, + { gen_helper_vlbuff_v_b, gen_helper_vlbuff_v_h, + gen_helper_vlbuff_v_w, gen_helper_vlbuff_v_d }, + { NULL, gen_helper_vlhuff_v_h, + gen_helper_vlhuff_v_w, gen_helper_vlhuff_v_d }, + { NULL, NULL, + gen_helper_vlwuff_v_w, gen_helper_vlwuff_v_d } + }; + + fn = fns[seq][s->sew]; + if (fn == NULL) { + return false; + } + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, NF, a->nf); + return ldff_trans(a->rd, a->rs1, data, fn, s); +} + +GEN_VEXT_TRANS(vlbff_v, 0, r2nfvm, ldff_op, ld_us_check) +GEN_VEXT_TRANS(vlhff_v, 1, r2nfvm, ldff_op, ld_us_check) +GEN_VEXT_TRANS(vlwff_v, 2, r2nfvm, ldff_op, ld_us_check) +GEN_VEXT_TRANS(vleff_v, 3, r2nfvm, ldff_op, ld_us_check) +GEN_VEXT_TRANS(vlbuff_v, 4, r2nfvm, ldff_op, ld_us_check) +GEN_VEXT_TRANS(vlhuff_v, 5, r2nfvm, ldff_op, ld_us_check) +GEN_VEXT_TRANS(vlwuff_v, 6, r2nfvm, ldff_op, ld_us_check) + +/* + *** vector atomic operation + */ +typedef void gen_helper_amo(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr, + TCGv_env, TCGv_i32); + +static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, + uint32_t data, gen_helper_amo *fn, DisasContext *s) +{ + TCGv_ptr dest, mask, index; + TCGv base; + TCGv_i32 desc; + + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + dest = tcg_temp_new_ptr(); + mask = tcg_temp_new_ptr(); + index = tcg_temp_new_ptr(); + base = tcg_temp_new(); + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + + gen_get_gpr(base, rs1); + tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); + tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2)); + tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); + + fn(dest, mask, base, index, cpu_env, desc); + + tcg_temp_free_ptr(dest); + tcg_temp_free_ptr(mask); + tcg_temp_free_ptr(index); + tcg_temp_free(base); + tcg_temp_free_i32(desc); + gen_set_label(over); + return true; +} + +static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq) +{ + uint32_t data = 0; + gen_helper_amo *fn; + static gen_helper_amo *const fnsw[9] = { + /* no atomic operation */ + gen_helper_vamoswapw_v_w, + gen_helper_vamoaddw_v_w, + gen_helper_vamoxorw_v_w, + gen_helper_vamoandw_v_w, + gen_helper_vamoorw_v_w, + gen_helper_vamominw_v_w, + gen_helper_vamomaxw_v_w, + gen_helper_vamominuw_v_w, + gen_helper_vamomaxuw_v_w + }; +#ifdef TARGET_RISCV64 + static gen_helper_amo *const fnsd[18] = { + gen_helper_vamoswapw_v_d, + gen_helper_vamoaddw_v_d, + gen_helper_vamoxorw_v_d, + gen_helper_vamoandw_v_d, + gen_helper_vamoorw_v_d, + gen_helper_vamominw_v_d, + gen_helper_vamomaxw_v_d, + gen_helper_vamominuw_v_d, + gen_helper_vamomaxuw_v_d, + gen_helper_vamoswapd_v_d, + gen_helper_vamoaddd_v_d, + gen_helper_vamoxord_v_d, + gen_helper_vamoandd_v_d, + gen_helper_vamoord_v_d, + gen_helper_vamomind_v_d, + gen_helper_vamomaxd_v_d, + gen_helper_vamominud_v_d, + gen_helper_vamomaxud_v_d + }; +#endif + + if (tb_cflags(s->base.tb) & CF_PARALLEL) { + gen_helper_exit_atomic(cpu_env); + s->base.is_jmp = DISAS_NORETURN; + return true; + } else { + if (s->sew == 3) { +#ifdef TARGET_RISCV64 + fn = fnsd[seq]; +#else + /* Check done in amo_check(). */ + g_assert_not_reached(); +#endif + } else { + fn = fnsw[seq]; + } + } + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + data = FIELD_DP32(data, VDATA, WD, a->wd); + return amo_trans(a->rd, a->rs1, a->rs2, data, fn, s); +} +/* + * There are two rules check here. + * + * 1. SEW must be at least as wide as the AMO memory element size. + * + * 2. If SEW is greater than XLEN, an illegal instruction exception is raised. + */ +static bool amo_check(DisasContext *s, arg_rwdvm* a) +{ + return (!s->vill && has_ext(s, RVA) && + (!a->wd || vext_check_overlap_mask(s, a->rd, a->vm, false)) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false) && + ((1 << s->sew) <= sizeof(target_ulong)) && + ((1 << s->sew) >= 4)); +} + +GEN_VEXT_TRANS(vamoswapw_v, 0, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamoaddw_v, 1, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamoxorw_v, 2, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamoandw_v, 3, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamoorw_v, 4, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamominw_v, 5, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamomaxw_v, 6, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamominuw_v, 7, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamomaxuw_v, 8, rwdvm, amo_op, amo_check) +#ifdef TARGET_RISCV64 +GEN_VEXT_TRANS(vamoswapd_v, 9, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamoaddd_v, 10, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamoxord_v, 11, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamoandd_v, 12, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamoord_v, 13, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamomind_v, 14, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamomaxd_v, 15, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamominud_v, 16, rwdvm, amo_op, amo_check) +GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check) +#endif + +/* + *** Vector Integer Arithmetic Instructions + */ +#define MAXSZ(s) (s->vlen >> (3 - s->lmul)) + +static bool opivv_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, false) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false) && + vext_check_reg(s, a->rs1, false)); +} + +typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t, + uint32_t, uint32_t, uint32_t); + +static inline bool +do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn, + gen_helper_gvec_4_ptr *fn) +{ + TCGLabel *over = gen_new_label(); + if (!opivv_check(s, a)) { + return false; + } + + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + if (a->vm && s->vl_eq_vlmax) { + gvec_fn(s->sew, vreg_ofs(s, a->rd), + vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1), + MAXSZ(s), MAXSZ(s)); + } else { + uint32_t data = 0; + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), + vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), + cpu_env, 0, s->vlen / 8, data, fn); + } + gen_set_label(over); + return true; +} + +/* OPIVV with GVEC IR */ +#define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + static gen_helper_gvec_4_ptr * const fns[4] = { \ + gen_helper_##NAME##_b, gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, gen_helper_##NAME##_d, \ + }; \ + return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \ +} + +GEN_OPIVV_GVEC_TRANS(vadd_vv, add) +GEN_OPIVV_GVEC_TRANS(vsub_vv, sub) + +typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr, + TCGv_env, TCGv_i32); + +static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm, + gen_helper_opivx *fn, DisasContext *s) +{ + TCGv_ptr dest, src2, mask; + TCGv src1; + TCGv_i32 desc; + uint32_t data = 0; + + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + dest = tcg_temp_new_ptr(); + mask = tcg_temp_new_ptr(); + src2 = tcg_temp_new_ptr(); + src1 = tcg_temp_new(); + gen_get_gpr(src1, rs1); + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + + tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); + tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2)); + tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); + + fn(dest, mask, src1, src2, cpu_env, desc); + + tcg_temp_free_ptr(dest); + tcg_temp_free_ptr(mask); + tcg_temp_free_ptr(src2); + tcg_temp_free(src1); + tcg_temp_free_i32(desc); + gen_set_label(over); + return true; +} + +static bool opivx_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, false) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false)); +} + +typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64, + uint32_t, uint32_t); + +static inline bool +do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn, + gen_helper_opivx *fn) +{ + if (!opivx_check(s, a)) { + return false; + } + + if (a->vm && s->vl_eq_vlmax) { + TCGv_i64 src1 = tcg_temp_new_i64(); + TCGv tmp = tcg_temp_new(); + + gen_get_gpr(tmp, a->rs1); + tcg_gen_ext_tl_i64(src1, tmp); + gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), + src1, MAXSZ(s), MAXSZ(s)); + + tcg_temp_free_i64(src1); + tcg_temp_free(tmp); + return true; + } + return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s); +} + +/* OPIVX with GVEC IR */ +#define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + static gen_helper_opivx * const fns[4] = { \ + gen_helper_##NAME##_b, gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, gen_helper_##NAME##_d, \ + }; \ + return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \ +} + +GEN_OPIVX_GVEC_TRANS(vadd_vx, adds) +GEN_OPIVX_GVEC_TRANS(vsub_vx, subs) + +static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_vec_sub8_i64(d, b, a); +} + +static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_vec_sub8_i64(d, b, a); +} + +static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) +{ + tcg_gen_sub_i32(ret, arg2, arg1); +} + +static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) +{ + tcg_gen_sub_i64(ret, arg2, arg1); +} + +static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_sub_vec(vece, r, b, a); +} + +static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs, + TCGv_i64 c, uint32_t oprsz, uint32_t maxsz) +{ + static const GVecGen2s rsub_op[4] = { + { .fni8 = gen_vec_rsub8_i64, + .fniv = gen_rsub_vec, + .fno = gen_helper_vec_rsubs8, + .vece = MO_8 }, + { .fni8 = gen_vec_rsub16_i64, + .fniv = gen_rsub_vec, + .fno = gen_helper_vec_rsubs16, + .vece = MO_16 }, + { .fni4 = gen_rsub_i32, + .fniv = gen_rsub_vec, + .fno = gen_helper_vec_rsubs32, + .vece = MO_32 }, + { .fni8 = gen_rsub_i64, + .fniv = gen_rsub_vec, + .fno = gen_helper_vec_rsubs64, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, + }; + + tcg_debug_assert(vece <= MO_64); + tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]); +} + +GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs) + +static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm, + gen_helper_opivx *fn, DisasContext *s, int zx) +{ + TCGv_ptr dest, src2, mask; + TCGv src1; + TCGv_i32 desc; + uint32_t data = 0; + + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + dest = tcg_temp_new_ptr(); + mask = tcg_temp_new_ptr(); + src2 = tcg_temp_new_ptr(); + if (zx) { + src1 = tcg_const_tl(imm); + } else { + src1 = tcg_const_tl(sextract64(imm, 0, 5)); + } + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + + tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); + tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2)); + tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); + + fn(dest, mask, src1, src2, cpu_env, desc); + + tcg_temp_free_ptr(dest); + tcg_temp_free_ptr(mask); + tcg_temp_free_ptr(src2); + tcg_temp_free(src1); + tcg_temp_free_i32(desc); + gen_set_label(over); + return true; +} + +typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t, + uint32_t, uint32_t); + +static inline bool +do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn, + gen_helper_opivx *fn, int zx) +{ + if (!opivx_check(s, a)) { + return false; + } + + if (a->vm && s->vl_eq_vlmax) { + if (zx) { + gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), + extract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s)); + } else { + gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), + sextract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s)); + } + } else { + return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, zx); + } + return true; +} + +/* OPIVI with GVEC IR */ +#define GEN_OPIVI_GVEC_TRANS(NAME, ZX, OPIVX, SUF) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + static gen_helper_opivx * const fns[4] = { \ + gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \ + gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \ + }; \ + return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \ + fns[s->sew], ZX); \ +} + +GEN_OPIVI_GVEC_TRANS(vadd_vi, 0, vadd_vx, addi) + +static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs, + int64_t c, uint32_t oprsz, uint32_t maxsz) +{ + TCGv_i64 tmp = tcg_const_i64(c); + tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz); + tcg_temp_free_i64(tmp); +} + +GEN_OPIVI_GVEC_TRANS(vrsub_vi, 0, vrsub_vx, rsubi) + +/* Vector Widening Integer Add/Subtract */ + +/* OPIVV with WIDEN */ +static bool opivv_widen_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, false) && + vext_check_reg(s, a->rs1, false) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2, + 1 << s->lmul) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1, + 1 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3)); +} + +static bool do_opivv_widen(DisasContext *s, arg_rmrr *a, + gen_helper_gvec_4_ptr *fn, + bool (*checkfn)(DisasContext *, arg_rmrr *)) +{ + if (checkfn(s, a)) { + uint32_t data = 0; + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), + vreg_ofs(s, a->rs1), + vreg_ofs(s, a->rs2), + cpu_env, 0, s->vlen / 8, + data, fn); + gen_set_label(over); + return true; + } + return false; +} + +#define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + static gen_helper_gvec_4_ptr * const fns[3] = { \ + gen_helper_##NAME##_b, \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w \ + }; \ + return do_opivv_widen(s, a, fns[s->sew], CHECK); \ +} + +GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check) + +/* OPIVX with WIDEN */ +static bool opivx_widen_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, false) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2, + 1 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3)); +} + +static bool do_opivx_widen(DisasContext *s, arg_rmrr *a, + gen_helper_opivx *fn) +{ + if (opivx_widen_check(s, a)) { + return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s); + } + return true; +} + +#define GEN_OPIVX_WIDEN_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + static gen_helper_opivx * const fns[3] = { \ + gen_helper_##NAME##_b, \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w \ + }; \ + return do_opivx_widen(s, a, fns[s->sew]); \ +} + +GEN_OPIVX_WIDEN_TRANS(vwaddu_vx) +GEN_OPIVX_WIDEN_TRANS(vwadd_vx) +GEN_OPIVX_WIDEN_TRANS(vwsubu_vx) +GEN_OPIVX_WIDEN_TRANS(vwsub_vx) + +/* WIDEN OPIVV with WIDEN */ +static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, true) && + vext_check_reg(s, a->rs1, false) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1, + 1 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3)); +} + +static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a, + gen_helper_gvec_4_ptr *fn) +{ + if (opiwv_widen_check(s, a)) { + uint32_t data = 0; + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), + vreg_ofs(s, a->rs1), + vreg_ofs(s, a->rs2), + cpu_env, 0, s->vlen / 8, data, fn); + gen_set_label(over); + return true; + } + return false; +} + +#define GEN_OPIWV_WIDEN_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + static gen_helper_gvec_4_ptr * const fns[3] = { \ + gen_helper_##NAME##_b, \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w \ + }; \ + return do_opiwv_widen(s, a, fns[s->sew]); \ +} + +GEN_OPIWV_WIDEN_TRANS(vwaddu_wv) +GEN_OPIWV_WIDEN_TRANS(vwadd_wv) +GEN_OPIWV_WIDEN_TRANS(vwsubu_wv) +GEN_OPIWV_WIDEN_TRANS(vwsub_wv) + +/* WIDEN OPIVX with WIDEN */ +static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, true) && + (s->lmul < 0x3) && (s->sew < 0x3)); +} + +static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a, + gen_helper_opivx *fn) +{ + if (opiwx_widen_check(s, a)) { + return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s); + } + return false; +} + +#define GEN_OPIWX_WIDEN_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + static gen_helper_opivx * const fns[3] = { \ + gen_helper_##NAME##_b, \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w \ + }; \ + return do_opiwx_widen(s, a, fns[s->sew]); \ +} + +GEN_OPIWX_WIDEN_TRANS(vwaddu_wx) +GEN_OPIWX_WIDEN_TRANS(vwadd_wx) +GEN_OPIWX_WIDEN_TRANS(vwsubu_wx) +GEN_OPIWX_WIDEN_TRANS(vwsub_wx) + +/* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */ +/* OPIVV without GVEC IR */ +#define GEN_OPIVV_TRANS(NAME, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (CHECK(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_gvec_4_ptr * const fns[4] = { \ + gen_helper_##NAME##_b, gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, gen_helper_##NAME##_d, \ + }; \ + TCGLabel *over = gen_new_label(); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs1), \ + vreg_ofs(s, a->rs2), cpu_env, 0, \ + s->vlen / 8, data, fns[s->sew]); \ + gen_set_label(over); \ + return true; \ + } \ + return false; \ +} + +/* + * For vadc and vsbc, an illegal instruction exception is raised if the + * destination vector register is v0 and LMUL > 1. (Section 12.3) + */ +static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false) && + vext_check_reg(s, a->rs1, false) && + ((a->rd != 0) || (s->lmul == 0))); +} + +GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check) +GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check) + +/* + * For vmadc and vmsbc, an illegal instruction exception is raised if the + * destination vector register overlaps a source vector register group. + */ +static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_reg(s, a->rs2, false) && + vext_check_reg(s, a->rs1, false) && + vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) && + vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)); +} + +GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check) +GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check) + +static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false) && + ((a->rd != 0) || (s->lmul == 0))); +} + +/* OPIVX without GVEC IR */ +#define GEN_OPIVX_TRANS(NAME, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (CHECK(s, a)) { \ + static gen_helper_opivx * const fns[4] = { \ + gen_helper_##NAME##_b, gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, gen_helper_##NAME##_d, \ + }; \ + \ + return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\ + } \ + return false; \ +} + +GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check) +GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check) + +static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_reg(s, a->rs2, false) && + vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)); +} + +GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check) +GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check) + +/* OPIVI without GVEC IR */ +#define GEN_OPIVI_TRANS(NAME, ZX, OPIVX, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (CHECK(s, a)) { \ + static gen_helper_opivx * const fns[4] = { \ + gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \ + gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \ + }; \ + return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \ + fns[s->sew], s, ZX); \ + } \ + return false; \ +} + +GEN_OPIVI_TRANS(vadc_vim, 0, vadc_vxm, opivx_vadc_check) +GEN_OPIVI_TRANS(vmadc_vim, 0, vmadc_vxm, opivx_vmadc_check) + +/* Vector Bitwise Logical Instructions */ +GEN_OPIVV_GVEC_TRANS(vand_vv, and) +GEN_OPIVV_GVEC_TRANS(vor_vv, or) +GEN_OPIVV_GVEC_TRANS(vxor_vv, xor) +GEN_OPIVX_GVEC_TRANS(vand_vx, ands) +GEN_OPIVX_GVEC_TRANS(vor_vx, ors) +GEN_OPIVX_GVEC_TRANS(vxor_vx, xors) +GEN_OPIVI_GVEC_TRANS(vand_vi, 0, vand_vx, andi) +GEN_OPIVI_GVEC_TRANS(vor_vi, 0, vor_vx, ori) +GEN_OPIVI_GVEC_TRANS(vxor_vi, 0, vxor_vx, xori) + +/* Vector Single-Width Bit Shift Instructions */ +GEN_OPIVV_GVEC_TRANS(vsll_vv, shlv) +GEN_OPIVV_GVEC_TRANS(vsrl_vv, shrv) +GEN_OPIVV_GVEC_TRANS(vsra_vv, sarv) + +typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32, + uint32_t, uint32_t); + +static inline bool +do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn, + gen_helper_opivx *fn) +{ + if (!opivx_check(s, a)) { + return false; + } + + if (a->vm && s->vl_eq_vlmax) { + TCGv_i32 src1 = tcg_temp_new_i32(); + TCGv tmp = tcg_temp_new(); + + gen_get_gpr(tmp, a->rs1); + tcg_gen_trunc_tl_i32(src1, tmp); + tcg_gen_extract_i32(src1, src1, 0, s->sew + 3); + gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), + src1, MAXSZ(s), MAXSZ(s)); + + tcg_temp_free_i32(src1); + tcg_temp_free(tmp); + return true; + } + return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s); +} + +#define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + static gen_helper_opivx * const fns[4] = { \ + gen_helper_##NAME##_b, gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, gen_helper_##NAME##_d, \ + }; \ + \ + return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \ +} + +GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx, shls) +GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx, shrs) +GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx, sars) + +GEN_OPIVI_GVEC_TRANS(vsll_vi, 1, vsll_vx, shli) +GEN_OPIVI_GVEC_TRANS(vsrl_vi, 1, vsrl_vx, shri) +GEN_OPIVI_GVEC_TRANS(vsra_vi, 1, vsra_vx, sari) + +/* Vector Narrowing Integer Right Shift Instructions */ +static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, false) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, true) && + vext_check_reg(s, a->rs1, false) && + vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, + 2 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3)); +} + +/* OPIVV with NARROW */ +#define GEN_OPIVV_NARROW_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (opivv_narrow_check(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_gvec_4_ptr * const fns[3] = { \ + gen_helper_##NAME##_b, \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, \ + }; \ + TCGLabel *over = gen_new_label(); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs1), \ + vreg_ofs(s, a->rs2), cpu_env, 0, \ + s->vlen / 8, data, fns[s->sew]); \ + gen_set_label(over); \ + return true; \ + } \ + return false; \ +} +GEN_OPIVV_NARROW_TRANS(vnsra_vv) +GEN_OPIVV_NARROW_TRANS(vnsrl_vv) + +static bool opivx_narrow_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, false) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, true) && + vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, + 2 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3)); +} + +/* OPIVX with NARROW */ +#define GEN_OPIVX_NARROW_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (opivx_narrow_check(s, a)) { \ + static gen_helper_opivx * const fns[3] = { \ + gen_helper_##NAME##_b, \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, \ + }; \ + return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\ + } \ + return false; \ +} + +GEN_OPIVX_NARROW_TRANS(vnsra_vx) +GEN_OPIVX_NARROW_TRANS(vnsrl_vx) + +/* OPIVI with NARROW */ +#define GEN_OPIVI_NARROW_TRANS(NAME, ZX, OPIVX) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (opivx_narrow_check(s, a)) { \ + static gen_helper_opivx * const fns[3] = { \ + gen_helper_##OPIVX##_b, \ + gen_helper_##OPIVX##_h, \ + gen_helper_##OPIVX##_w, \ + }; \ + return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \ + fns[s->sew], s, ZX); \ + } \ + return false; \ +} + +GEN_OPIVI_NARROW_TRANS(vnsra_vi, 1, vnsra_vx) +GEN_OPIVI_NARROW_TRANS(vnsrl_vi, 1, vnsrl_vx) + +/* Vector Integer Comparison Instructions */ +/* + * For all comparison instructions, an illegal instruction exception is raised + * if the destination vector register overlaps a source vector register group + * and LMUL > 1. + */ +static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_reg(s, a->rs2, false) && + vext_check_reg(s, a->rs1, false) && + ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) && + vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) || + (s->lmul == 0))); +} +GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check) +GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check) +GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check) +GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check) +GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check) +GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check) + +static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_reg(s, a->rs2, false) && + (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) || + (s->lmul == 0))); +} + +GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check) +GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check) +GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check) +GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check) +GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check) +GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check) +GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check) +GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check) + +GEN_OPIVI_TRANS(vmseq_vi, 0, vmseq_vx, opivx_cmp_check) +GEN_OPIVI_TRANS(vmsne_vi, 0, vmsne_vx, opivx_cmp_check) +GEN_OPIVI_TRANS(vmsleu_vi, 1, vmsleu_vx, opivx_cmp_check) +GEN_OPIVI_TRANS(vmsle_vi, 0, vmsle_vx, opivx_cmp_check) +GEN_OPIVI_TRANS(vmsgtu_vi, 1, vmsgtu_vx, opivx_cmp_check) +GEN_OPIVI_TRANS(vmsgt_vi, 0, vmsgt_vx, opivx_cmp_check) + +/* Vector Integer Min/Max Instructions */ +GEN_OPIVV_GVEC_TRANS(vminu_vv, umin) +GEN_OPIVV_GVEC_TRANS(vmin_vv, smin) +GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax) +GEN_OPIVV_GVEC_TRANS(vmax_vv, smax) +GEN_OPIVX_TRANS(vminu_vx, opivx_check) +GEN_OPIVX_TRANS(vmin_vx, opivx_check) +GEN_OPIVX_TRANS(vmaxu_vx, opivx_check) +GEN_OPIVX_TRANS(vmax_vx, opivx_check) + +/* Vector Single-Width Integer Multiply Instructions */ +GEN_OPIVV_GVEC_TRANS(vmul_vv, mul) +GEN_OPIVV_TRANS(vmulh_vv, opivv_check) +GEN_OPIVV_TRANS(vmulhu_vv, opivv_check) +GEN_OPIVV_TRANS(vmulhsu_vv, opivv_check) +GEN_OPIVX_GVEC_TRANS(vmul_vx, muls) +GEN_OPIVX_TRANS(vmulh_vx, opivx_check) +GEN_OPIVX_TRANS(vmulhu_vx, opivx_check) +GEN_OPIVX_TRANS(vmulhsu_vx, opivx_check) + +/* Vector Integer Divide Instructions */ +GEN_OPIVV_TRANS(vdivu_vv, opivv_check) +GEN_OPIVV_TRANS(vdiv_vv, opivv_check) +GEN_OPIVV_TRANS(vremu_vv, opivv_check) +GEN_OPIVV_TRANS(vrem_vv, opivv_check) +GEN_OPIVX_TRANS(vdivu_vx, opivx_check) +GEN_OPIVX_TRANS(vdiv_vx, opivx_check) +GEN_OPIVX_TRANS(vremu_vx, opivx_check) +GEN_OPIVX_TRANS(vrem_vx, opivx_check) + +/* Vector Widening Integer Multiply Instructions */ +GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmul_vx) +GEN_OPIVX_WIDEN_TRANS(vwmulu_vx) +GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx) + +/* Vector Single-Width Integer Multiply-Add Instructions */ +GEN_OPIVV_TRANS(vmacc_vv, opivv_check) +GEN_OPIVV_TRANS(vnmsac_vv, opivv_check) +GEN_OPIVV_TRANS(vmadd_vv, opivv_check) +GEN_OPIVV_TRANS(vnmsub_vv, opivv_check) +GEN_OPIVX_TRANS(vmacc_vx, opivx_check) +GEN_OPIVX_TRANS(vnmsac_vx, opivx_check) +GEN_OPIVX_TRANS(vmadd_vx, opivx_check) +GEN_OPIVX_TRANS(vnmsub_vx, opivx_check) + +/* Vector Widening Integer Multiply-Add Instructions */ +GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx) +GEN_OPIVX_WIDEN_TRANS(vwmacc_vx) +GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx) +GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx) + +/* Vector Integer Merge and Move Instructions */ +static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a) +{ + if (vext_check_isa_ill(s) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs1, false)) { + + if (s->vl_eq_vlmax) { + tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd), + vreg_ofs(s, a->rs1), + MAXSZ(s), MAXSZ(s)); + } else { + uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul); + static gen_helper_gvec_2_ptr * const fns[4] = { + gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h, + gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d, + }; + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), + cpu_env, 0, s->vlen / 8, data, fns[s->sew]); + gen_set_label(over); + } + return true; + } + return false; +} + +typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32); +static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a) +{ + if (vext_check_isa_ill(s) && + vext_check_reg(s, a->rd, false)) { + + TCGv s1; + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + s1 = tcg_temp_new(); + gen_get_gpr(s1, a->rs1); + + if (s->vl_eq_vlmax) { + tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd), + MAXSZ(s), MAXSZ(s), s1); + } else { + TCGv_i32 desc ; + TCGv_i64 s1_i64 = tcg_temp_new_i64(); + TCGv_ptr dest = tcg_temp_new_ptr(); + uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul); + static gen_helper_vmv_vx * const fns[4] = { + gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h, + gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d, + }; + + tcg_gen_ext_tl_i64(s1_i64, s1); + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); + fns[s->sew](dest, s1_i64, cpu_env, desc); + + tcg_temp_free_ptr(dest); + tcg_temp_free_i32(desc); + tcg_temp_free_i64(s1_i64); + } + + tcg_temp_free(s1); + gen_set_label(over); + return true; + } + return false; +} + +static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a) +{ + if (vext_check_isa_ill(s) && + vext_check_reg(s, a->rd, false)) { + + int64_t simm = sextract64(a->rs1, 0, 5); + if (s->vl_eq_vlmax) { + tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd), + MAXSZ(s), MAXSZ(s), simm); + } else { + TCGv_i32 desc; + TCGv_i64 s1; + TCGv_ptr dest; + uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul); + static gen_helper_vmv_vx * const fns[4] = { + gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h, + gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d, + }; + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + s1 = tcg_const_i64(simm); + dest = tcg_temp_new_ptr(); + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); + fns[s->sew](dest, s1, cpu_env, desc); + + tcg_temp_free_ptr(dest); + tcg_temp_free_i32(desc); + tcg_temp_free_i64(s1); + gen_set_label(over); + } + return true; + } + return false; +} + +GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check) +GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check) +GEN_OPIVI_TRANS(vmerge_vim, 0, vmerge_vxm, opivx_vadc_check) + +/* + *** Vector Fixed-Point Arithmetic Instructions + */ + +/* Vector Single-Width Saturating Add and Subtract */ +GEN_OPIVV_TRANS(vsaddu_vv, opivv_check) +GEN_OPIVV_TRANS(vsadd_vv, opivv_check) +GEN_OPIVV_TRANS(vssubu_vv, opivv_check) +GEN_OPIVV_TRANS(vssub_vv, opivv_check) +GEN_OPIVX_TRANS(vsaddu_vx, opivx_check) +GEN_OPIVX_TRANS(vsadd_vx, opivx_check) +GEN_OPIVX_TRANS(vssubu_vx, opivx_check) +GEN_OPIVX_TRANS(vssub_vx, opivx_check) +GEN_OPIVI_TRANS(vsaddu_vi, 1, vsaddu_vx, opivx_check) +GEN_OPIVI_TRANS(vsadd_vi, 0, vsadd_vx, opivx_check) + +/* Vector Single-Width Averaging Add and Subtract */ +GEN_OPIVV_TRANS(vaadd_vv, opivv_check) +GEN_OPIVV_TRANS(vasub_vv, opivv_check) +GEN_OPIVX_TRANS(vaadd_vx, opivx_check) +GEN_OPIVX_TRANS(vasub_vx, opivx_check) +GEN_OPIVI_TRANS(vaadd_vi, 0, vaadd_vx, opivx_check) + +/* Vector Single-Width Fractional Multiply with Rounding and Saturation */ +GEN_OPIVV_TRANS(vsmul_vv, opivv_check) +GEN_OPIVX_TRANS(vsmul_vx, opivx_check) + +/* Vector Widening Saturating Scaled Multiply-Add */ +GEN_OPIVV_WIDEN_TRANS(vwsmaccu_vv, opivv_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwsmacc_vv, opivv_widen_check) +GEN_OPIVV_WIDEN_TRANS(vwsmaccsu_vv, opivv_widen_check) +GEN_OPIVX_WIDEN_TRANS(vwsmaccu_vx) +GEN_OPIVX_WIDEN_TRANS(vwsmacc_vx) +GEN_OPIVX_WIDEN_TRANS(vwsmaccsu_vx) +GEN_OPIVX_WIDEN_TRANS(vwsmaccus_vx) + +/* Vector Single-Width Scaling Shift Instructions */ +GEN_OPIVV_TRANS(vssrl_vv, opivv_check) +GEN_OPIVV_TRANS(vssra_vv, opivv_check) +GEN_OPIVX_TRANS(vssrl_vx, opivx_check) +GEN_OPIVX_TRANS(vssra_vx, opivx_check) +GEN_OPIVI_TRANS(vssrl_vi, 1, vssrl_vx, opivx_check) +GEN_OPIVI_TRANS(vssra_vi, 0, vssra_vx, opivx_check) + +/* Vector Narrowing Fixed-Point Clip Instructions */ +GEN_OPIVV_NARROW_TRANS(vnclipu_vv) +GEN_OPIVV_NARROW_TRANS(vnclip_vv) +GEN_OPIVX_NARROW_TRANS(vnclipu_vx) +GEN_OPIVX_NARROW_TRANS(vnclip_vx) +GEN_OPIVI_NARROW_TRANS(vnclipu_vi, 1, vnclipu_vx) +GEN_OPIVI_NARROW_TRANS(vnclip_vi, 1, vnclip_vx) + +/* + *** Vector Float Point Arithmetic Instructions + */ +/* Vector Single-Width Floating-Point Add/Subtract Instructions */ + +/* + * If the current SEW does not correspond to a supported IEEE floating-point + * type, an illegal instruction exception is raised. + */ +static bool opfvv_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, false) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false) && + vext_check_reg(s, a->rs1, false) && + (s->sew != 0)); +} + +/* OPFVV without GVEC IR */ +#define GEN_OPFVV_TRANS(NAME, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (CHECK(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_gvec_4_ptr * const fns[3] = { \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, \ + gen_helper_##NAME##_d, \ + }; \ + TCGLabel *over = gen_new_label(); \ + gen_set_rm(s, 7); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs1), \ + vreg_ofs(s, a->rs2), cpu_env, 0, \ + s->vlen / 8, data, fns[s->sew - 1]); \ + gen_set_label(over); \ + return true; \ + } \ + return false; \ +} +GEN_OPFVV_TRANS(vfadd_vv, opfvv_check) +GEN_OPFVV_TRANS(vfsub_vv, opfvv_check) + +typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr, + TCGv_env, TCGv_i32); + +static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, + uint32_t data, gen_helper_opfvf *fn, DisasContext *s) +{ + TCGv_ptr dest, src2, mask; + TCGv_i32 desc; + + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + dest = tcg_temp_new_ptr(); + mask = tcg_temp_new_ptr(); + src2 = tcg_temp_new_ptr(); + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + + tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); + tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2)); + tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); + + fn(dest, mask, cpu_fpr[rs1], src2, cpu_env, desc); + + tcg_temp_free_ptr(dest); + tcg_temp_free_ptr(mask); + tcg_temp_free_ptr(src2); + tcg_temp_free_i32(desc); + gen_set_label(over); + return true; +} + +static bool opfvf_check(DisasContext *s, arg_rmrr *a) +{ +/* + * If the current SEW does not correspond to a supported IEEE floating-point + * type, an illegal instruction exception is raised + */ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, false) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false) && + (s->sew != 0)); +} + +/* OPFVF without GVEC IR */ +#define GEN_OPFVF_TRANS(NAME, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (CHECK(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_opfvf *const fns[3] = { \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, \ + gen_helper_##NAME##_d, \ + }; \ + gen_set_rm(s, 7); \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ + fns[s->sew - 1], s); \ + } \ + return false; \ +} + +GEN_OPFVF_TRANS(vfadd_vf, opfvf_check) +GEN_OPFVF_TRANS(vfsub_vf, opfvf_check) +GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check) + +/* Vector Widening Floating-Point Add/Subtract Instructions */ +static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, false) && + vext_check_reg(s, a->rs1, false) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2, + 1 << s->lmul) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1, + 1 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); +} + +/* OPFVV with WIDEN */ +#define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (CHECK(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_gvec_4_ptr * const fns[2] = { \ + gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ + }; \ + TCGLabel *over = gen_new_label(); \ + gen_set_rm(s, 7); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs1), \ + vreg_ofs(s, a->rs2), cpu_env, 0, \ + s->vlen / 8, data, fns[s->sew - 1]); \ + gen_set_label(over); \ + return true; \ + } \ + return false; \ +} + +GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check) + +static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, false) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2, + 1 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); +} + +/* OPFVF with WIDEN */ +#define GEN_OPFVF_WIDEN_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (opfvf_widen_check(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_opfvf *const fns[2] = { \ + gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ + }; \ + gen_set_rm(s, 7); \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ + fns[s->sew - 1], s); \ + } \ + return false; \ +} + +GEN_OPFVF_WIDEN_TRANS(vfwadd_vf) +GEN_OPFVF_WIDEN_TRANS(vfwsub_vf) + +static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, true) && + vext_check_reg(s, a->rs1, false) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1, + 1 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); +} + +/* WIDEN OPFVV with WIDEN */ +#define GEN_OPFWV_WIDEN_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (opfwv_widen_check(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_gvec_4_ptr * const fns[2] = { \ + gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ + }; \ + TCGLabel *over = gen_new_label(); \ + gen_set_rm(s, 7); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs1), \ + vreg_ofs(s, a->rs2), cpu_env, 0, \ + s->vlen / 8, data, fns[s->sew - 1]); \ + gen_set_label(over); \ + return true; \ + } \ + return false; \ +} + +GEN_OPFWV_WIDEN_TRANS(vfwadd_wv) +GEN_OPFWV_WIDEN_TRANS(vfwsub_wv) + +static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, true) && + (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); +} + +/* WIDEN OPFVF with WIDEN */ +#define GEN_OPFWF_WIDEN_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (opfwf_widen_check(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_opfvf *const fns[2] = { \ + gen_helper_##NAME##_h, gen_helper_##NAME##_w, \ + }; \ + gen_set_rm(s, 7); \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + return opfvf_trans(a->rd, a->rs1, a->rs2, data, \ + fns[s->sew - 1], s); \ + } \ + return false; \ +} + +GEN_OPFWF_WIDEN_TRANS(vfwadd_wf) +GEN_OPFWF_WIDEN_TRANS(vfwsub_wf) + +/* Vector Single-Width Floating-Point Multiply/Divide Instructions */ +GEN_OPFVV_TRANS(vfmul_vv, opfvv_check) +GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check) +GEN_OPFVF_TRANS(vfmul_vf, opfvf_check) +GEN_OPFVF_TRANS(vfdiv_vf, opfvf_check) +GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check) + +/* Vector Widening Floating-Point Multiply */ +GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwmul_vf) + +/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */ +GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check) +GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check) +GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check) +GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check) +GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check) +GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check) +GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check) +GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check) +GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check) +GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check) +GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check) +GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check) +GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check) +GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check) +GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check) +GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check) + +/* Vector Widening Floating-Point Fused Multiply-Add Instructions */ +GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check) +GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check) +GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf) +GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf) +GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf) +GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf) + +/* Vector Floating-Point Square-Root Instruction */ + +/* + * If the current SEW does not correspond to a supported IEEE floating-point + * type, an illegal instruction exception is raised + */ +static bool opfv_check(DisasContext *s, arg_rmr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, false) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false) && + (s->sew != 0)); +} + +#define GEN_OPFV_TRANS(NAME, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ +{ \ + if (CHECK(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_gvec_3_ptr * const fns[3] = { \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, \ + gen_helper_##NAME##_d, \ + }; \ + TCGLabel *over = gen_new_label(); \ + gen_set_rm(s, 7); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs2), cpu_env, 0, \ + s->vlen / 8, data, fns[s->sew - 1]); \ + gen_set_label(over); \ + return true; \ + } \ + return false; \ +} + +GEN_OPFV_TRANS(vfsqrt_v, opfv_check) + +/* Vector Floating-Point MIN/MAX Instructions */ +GEN_OPFVV_TRANS(vfmin_vv, opfvv_check) +GEN_OPFVV_TRANS(vfmax_vv, opfvv_check) +GEN_OPFVF_TRANS(vfmin_vf, opfvf_check) +GEN_OPFVF_TRANS(vfmax_vf, opfvf_check) + +/* Vector Floating-Point Sign-Injection Instructions */ +GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check) +GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check) +GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check) +GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check) +GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check) +GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check) + +/* Vector Floating-Point Compare Instructions */ +static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_reg(s, a->rs2, false) && + vext_check_reg(s, a->rs1, false) && + (s->sew != 0) && + ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) && + vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) || + (s->lmul == 0))); +} + +GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check) +GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check) +GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check) +GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check) +GEN_OPFVV_TRANS(vmford_vv, opfvv_cmp_check) + +static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_reg(s, a->rs2, false) && + (s->sew != 0) && + (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) || + (s->lmul == 0))); +} + +GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check) +GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check) +GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check) +GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check) +GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check) +GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check) +GEN_OPFVF_TRANS(vmford_vf, opfvf_cmp_check) + +/* Vector Floating-Point Classify Instruction */ +GEN_OPFV_TRANS(vfclass_v, opfv_check) + +/* Vector Floating-Point Merge Instruction */ +GEN_OPFVF_TRANS(vfmerge_vfm, opfvf_check) + +static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) +{ + if (vext_check_isa_ill(s) && + vext_check_reg(s, a->rd, false) && + (s->sew != 0)) { + + if (s->vl_eq_vlmax) { + tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), + MAXSZ(s), MAXSZ(s), cpu_fpr[a->rs1]); + } else { + TCGv_ptr dest; + TCGv_i32 desc; + uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul); + static gen_helper_vmv_vx * const fns[3] = { + gen_helper_vmv_v_x_h, + gen_helper_vmv_v_x_w, + gen_helper_vmv_v_x_d, + }; + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + dest = tcg_temp_new_ptr(); + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); + fns[s->sew - 1](dest, cpu_fpr[a->rs1], cpu_env, desc); + + tcg_temp_free_ptr(dest); + tcg_temp_free_i32(desc); + gen_set_label(over); + } + return true; + } + return false; +} + +/* Single-Width Floating-Point/Integer Type-Convert Instructions */ +GEN_OPFV_TRANS(vfcvt_xu_f_v, opfv_check) +GEN_OPFV_TRANS(vfcvt_x_f_v, opfv_check) +GEN_OPFV_TRANS(vfcvt_f_xu_v, opfv_check) +GEN_OPFV_TRANS(vfcvt_f_x_v, opfv_check) + +/* Widening Floating-Point/Integer Type-Convert Instructions */ + +/* + * If the current SEW does not correspond to a supported IEEE floating-point + * type, an illegal instruction exception is raised + */ +static bool opfv_widen_check(DisasContext *s, arg_rmr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, true) && + vext_check_reg(s, a->rs2, false) && + vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2, + 1 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); +} + +#define GEN_OPFV_WIDEN_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ +{ \ + if (opfv_widen_check(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_gvec_3_ptr * const fns[2] = { \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, \ + }; \ + TCGLabel *over = gen_new_label(); \ + gen_set_rm(s, 7); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs2), cpu_env, 0, \ + s->vlen / 8, data, fns[s->sew - 1]); \ + gen_set_label(over); \ + return true; \ + } \ + return false; \ +} + +GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v) +GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v) +GEN_OPFV_WIDEN_TRANS(vfwcvt_f_xu_v) +GEN_OPFV_WIDEN_TRANS(vfwcvt_f_x_v) +GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v) + +/* Narrowing Floating-Point/Integer Type-Convert Instructions */ + +/* + * If the current SEW does not correspond to a supported IEEE floating-point + * type, an illegal instruction exception is raised + */ +static bool opfv_narrow_check(DisasContext *s, arg_rmr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, false) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, true) && + vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, + 2 << s->lmul) && + (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0)); +} + +#define GEN_OPFV_NARROW_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ +{ \ + if (opfv_narrow_check(s, a)) { \ + uint32_t data = 0; \ + static gen_helper_gvec_3_ptr * const fns[2] = { \ + gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, \ + }; \ + TCGLabel *over = gen_new_label(); \ + gen_set_rm(s, 7); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs2), cpu_env, 0, \ + s->vlen / 8, data, fns[s->sew - 1]); \ + gen_set_label(over); \ + return true; \ + } \ + return false; \ +} + +GEN_OPFV_NARROW_TRANS(vfncvt_xu_f_v) +GEN_OPFV_NARROW_TRANS(vfncvt_x_f_v) +GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_v) +GEN_OPFV_NARROW_TRANS(vfncvt_f_x_v) +GEN_OPFV_NARROW_TRANS(vfncvt_f_f_v) + +/* + *** Vector Reduction Operations + */ +/* Vector Single-Width Integer Reduction Instructions */ +static bool reduction_check(DisasContext *s, arg_rmrr *a) +{ + return vext_check_isa_ill(s) && vext_check_reg(s, a->rs2, false); +} + +GEN_OPIVV_TRANS(vredsum_vs, reduction_check) +GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check) +GEN_OPIVV_TRANS(vredmax_vs, reduction_check) +GEN_OPIVV_TRANS(vredminu_vs, reduction_check) +GEN_OPIVV_TRANS(vredmin_vs, reduction_check) +GEN_OPIVV_TRANS(vredand_vs, reduction_check) +GEN_OPIVV_TRANS(vredor_vs, reduction_check) +GEN_OPIVV_TRANS(vredxor_vs, reduction_check) + +/* Vector Widening Integer Reduction Instructions */ +GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_check) +GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_check) + +/* Vector Single-Width Floating-Point Reduction Instructions */ +GEN_OPFVV_TRANS(vfredsum_vs, reduction_check) +GEN_OPFVV_TRANS(vfredmax_vs, reduction_check) +GEN_OPFVV_TRANS(vfredmin_vs, reduction_check) + +/* Vector Widening Floating-Point Reduction Instructions */ +GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check) + +/* + *** Vector Mask Operations + */ + +/* Vector Mask-Register Logical Instructions */ +#define GEN_MM_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_r *a) \ +{ \ + if (vext_check_isa_ill(s)) { \ + uint32_t data = 0; \ + gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \ + TCGLabel *over = gen_new_label(); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ + vreg_ofs(s, a->rs1), \ + vreg_ofs(s, a->rs2), cpu_env, 0, \ + s->vlen / 8, data, fn); \ + gen_set_label(over); \ + return true; \ + } \ + return false; \ +} + +GEN_MM_TRANS(vmand_mm) +GEN_MM_TRANS(vmnand_mm) +GEN_MM_TRANS(vmandnot_mm) +GEN_MM_TRANS(vmxor_mm) +GEN_MM_TRANS(vmor_mm) +GEN_MM_TRANS(vmnor_mm) +GEN_MM_TRANS(vmornot_mm) +GEN_MM_TRANS(vmxnor_mm) + +/* Vector mask population count vmpopc */ +static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a) +{ + if (vext_check_isa_ill(s)) { + TCGv_ptr src2, mask; + TCGv dst; + TCGv_i32 desc; + uint32_t data = 0; + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + + mask = tcg_temp_new_ptr(); + src2 = tcg_temp_new_ptr(); + dst = tcg_temp_new(); + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + + tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2)); + tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); + + gen_helper_vmpopc_m(dst, mask, src2, cpu_env, desc); + gen_set_gpr(a->rd, dst); + + tcg_temp_free_ptr(mask); + tcg_temp_free_ptr(src2); + tcg_temp_free(dst); + tcg_temp_free_i32(desc); + return true; + } + return false; +} + +/* vmfirst find-first-set mask bit */ +static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a) +{ + if (vext_check_isa_ill(s)) { + TCGv_ptr src2, mask; + TCGv dst; + TCGv_i32 desc; + uint32_t data = 0; + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + + mask = tcg_temp_new_ptr(); + src2 = tcg_temp_new_ptr(); + dst = tcg_temp_new(); + desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data)); + + tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2)); + tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); + + gen_helper_vmfirst_m(dst, mask, src2, cpu_env, desc); + gen_set_gpr(a->rd, dst); + + tcg_temp_free_ptr(mask); + tcg_temp_free_ptr(src2); + tcg_temp_free(dst); + tcg_temp_free_i32(desc); + return true; + } + return false; +} + +/* vmsbf.m set-before-first mask bit */ +/* vmsif.m set-includ-first mask bit */ +/* vmsof.m set-only-first mask bit */ +#define GEN_M_TRANS(NAME) \ +static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ +{ \ + if (vext_check_isa_ill(s)) { \ + uint32_t data = 0; \ + gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \ + TCGLabel *over = gen_new_label(); \ + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \ + \ + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \ + vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \ + cpu_env, 0, s->vlen / 8, data, fn); \ + gen_set_label(over); \ + return true; \ + } \ + return false; \ +} + +GEN_M_TRANS(vmsbf_m) +GEN_M_TRANS(vmsif_m) +GEN_M_TRANS(vmsof_m) + +/* Vector Iota Instruction */ +static bool trans_viota_m(DisasContext *s, arg_viota_m *a) +{ + if (vext_check_isa_ill(s) && + vext_check_reg(s, a->rd, false) && + vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, 1) && + (a->vm != 0 || a->rd != 0)) { + uint32_t data = 0; + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + static gen_helper_gvec_3_ptr * const fns[4] = { + gen_helper_viota_m_b, gen_helper_viota_m_h, + gen_helper_viota_m_w, gen_helper_viota_m_d, + }; + tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), + vreg_ofs(s, a->rs2), cpu_env, 0, + s->vlen / 8, data, fns[s->sew]); + gen_set_label(over); + return true; + } + return false; +} + +/* Vector Element Index Instruction */ +static bool trans_vid_v(DisasContext *s, arg_vid_v *a) +{ + if (vext_check_isa_ill(s) && + vext_check_reg(s, a->rd, false) && + vext_check_overlap_mask(s, a->rd, a->vm, false)) { + uint32_t data = 0; + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, VM, a->vm); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + static gen_helper_gvec_2_ptr * const fns[4] = { + gen_helper_vid_v_b, gen_helper_vid_v_h, + gen_helper_vid_v_w, gen_helper_vid_v_d, + }; + tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), + cpu_env, 0, s->vlen / 8, data, fns[s->sew]); + gen_set_label(over); + return true; + } + return false; +} + +/* + *** Vector Permutation Instructions + */ + +/* Integer Extract Instruction */ + +static void load_element(TCGv_i64 dest, TCGv_ptr base, + int ofs, int sew) +{ + switch (sew) { + case MO_8: + tcg_gen_ld8u_i64(dest, base, ofs); + break; + case MO_16: + tcg_gen_ld16u_i64(dest, base, ofs); + break; + case MO_32: + tcg_gen_ld32u_i64(dest, base, ofs); + break; + case MO_64: + tcg_gen_ld_i64(dest, base, ofs); + break; + default: + g_assert_not_reached(); + break; + } +} + +/* offset of the idx element with base regsiter r */ +static uint32_t endian_ofs(DisasContext *s, int r, int idx) +{ +#ifdef HOST_WORDS_BIGENDIAN + return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew); +#else + return vreg_ofs(s, r) + (idx << s->sew); +#endif +} + +/* adjust the index according to the endian */ +static void endian_adjust(TCGv_i32 ofs, int sew) +{ +#ifdef HOST_WORDS_BIGENDIAN + tcg_gen_xori_i32(ofs, ofs, 7 >> sew); +#endif +} + +/* Load idx >= VLMAX ? 0 : vreg[idx] */ +static void vec_element_loadx(DisasContext *s, TCGv_i64 dest, + int vreg, TCGv idx, int vlmax) +{ + TCGv_i32 ofs = tcg_temp_new_i32(); + TCGv_ptr base = tcg_temp_new_ptr(); + TCGv_i64 t_idx = tcg_temp_new_i64(); + TCGv_i64 t_vlmax, t_zero; + + /* + * Mask the index to the length so that we do + * not produce an out-of-range load. + */ + tcg_gen_trunc_tl_i32(ofs, idx); + tcg_gen_andi_i32(ofs, ofs, vlmax - 1); + + /* Convert the index to an offset. */ + endian_adjust(ofs, s->sew); + tcg_gen_shli_i32(ofs, ofs, s->sew); + + /* Convert the index to a pointer. */ + tcg_gen_ext_i32_ptr(base, ofs); + tcg_gen_add_ptr(base, base, cpu_env); + + /* Perform the load. */ + load_element(dest, base, + vreg_ofs(s, vreg), s->sew); + tcg_temp_free_ptr(base); + tcg_temp_free_i32(ofs); + + /* Flush out-of-range indexing to zero. */ + t_vlmax = tcg_const_i64(vlmax); + t_zero = tcg_const_i64(0); + tcg_gen_extu_tl_i64(t_idx, idx); + + tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx, + t_vlmax, dest, t_zero); + + tcg_temp_free_i64(t_vlmax); + tcg_temp_free_i64(t_zero); + tcg_temp_free_i64(t_idx); +} + +static void vec_element_loadi(DisasContext *s, TCGv_i64 dest, + int vreg, int idx) +{ + load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew); +} + +static bool trans_vext_x_v(DisasContext *s, arg_r *a) +{ + TCGv_i64 tmp = tcg_temp_new_i64(); + TCGv dest = tcg_temp_new(); + + if (a->rs1 == 0) { + /* Special case vmv.x.s rd, vs2. */ + vec_element_loadi(s, tmp, a->rs2, 0); + } else { + /* This instruction ignores LMUL and vector register groups */ + int vlmax = s->vlen >> (3 + s->sew); + vec_element_loadx(s, tmp, a->rs2, cpu_gpr[a->rs1], vlmax); + } + tcg_gen_trunc_i64_tl(dest, tmp); + gen_set_gpr(a->rd, dest); + + tcg_temp_free(dest); + tcg_temp_free_i64(tmp); + return true; +} + +/* Integer Scalar Move Instruction */ + +static void store_element(TCGv_i64 val, TCGv_ptr base, + int ofs, int sew) +{ + switch (sew) { + case MO_8: + tcg_gen_st8_i64(val, base, ofs); + break; + case MO_16: + tcg_gen_st16_i64(val, base, ofs); + break; + case MO_32: + tcg_gen_st32_i64(val, base, ofs); + break; + case MO_64: + tcg_gen_st_i64(val, base, ofs); + break; + default: + g_assert_not_reached(); + break; + } +} + +/* + * Store vreg[idx] = val. + * The index must be in range of VLMAX. + */ +static void vec_element_storei(DisasContext *s, int vreg, + int idx, TCGv_i64 val) +{ + store_element(val, cpu_env, endian_ofs(s, vreg, idx), s->sew); +} + +/* vmv.s.x vd, rs1 # vd[0] = rs1 */ +static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a) +{ + if (vext_check_isa_ill(s)) { + /* This instruction ignores LMUL and vector register groups */ + int maxsz = s->vlen >> 3; + TCGv_i64 t1; + TCGLabel *over = gen_new_label(); + + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), maxsz, maxsz, 0); + if (a->rs1 == 0) { + goto done; + } + + t1 = tcg_temp_new_i64(); + tcg_gen_extu_tl_i64(t1, cpu_gpr[a->rs1]); + vec_element_storei(s, a->rd, 0, t1); + tcg_temp_free_i64(t1); + done: + gen_set_label(over); + return true; + } + return false; +} + +/* Floating-Point Scalar Move Instructions */ +static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a) +{ + if (!s->vill && has_ext(s, RVF) && + (s->mstatus_fs != 0) && (s->sew != 0)) { + unsigned int len = 8 << s->sew; + + vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0); + if (len < 64) { + tcg_gen_ori_i64(cpu_fpr[a->rd], cpu_fpr[a->rd], + MAKE_64BIT_MASK(len, 64 - len)); + } + + mark_fs_dirty(s); + return true; + } + return false; +} + +/* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */ +static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) +{ + if (!s->vill && has_ext(s, RVF) && (s->sew != 0)) { + TCGv_i64 t1; + /* The instructions ignore LMUL and vector register group. */ + uint32_t vlmax = s->vlen >> 3; + + /* if vl == 0, skip vector register write back */ + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + /* zeroed all elements */ + tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), vlmax, vlmax, 0); + + /* NaN-box f[rs1] as necessary for SEW */ + t1 = tcg_temp_new_i64(); + if (s->sew == MO_64 && !has_ext(s, RVD)) { + tcg_gen_ori_i64(t1, cpu_fpr[a->rs1], MAKE_64BIT_MASK(32, 32)); + } else { + tcg_gen_mov_i64(t1, cpu_fpr[a->rs1]); + } + vec_element_storei(s, a->rd, 0, t1); + tcg_temp_free_i64(t1); + gen_set_label(over); + return true; + } + return false; +} + +/* Vector Slide Instructions */ +static bool slideup_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false) && + (a->rd != a->rs2)); +} + +GEN_OPIVX_TRANS(vslideup_vx, slideup_check) +GEN_OPIVX_TRANS(vslide1up_vx, slideup_check) +GEN_OPIVI_TRANS(vslideup_vi, 1, vslideup_vx, slideup_check) + +GEN_OPIVX_TRANS(vslidedown_vx, opivx_check) +GEN_OPIVX_TRANS(vslide1down_vx, opivx_check) +GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, opivx_check) + +/* Vector Register Gather Instruction */ +static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs1, false) && + vext_check_reg(s, a->rs2, false) && + (a->rd != a->rs2) && (a->rd != a->rs1)); +} + +GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check) + +static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a) +{ + return (vext_check_isa_ill(s) && + vext_check_overlap_mask(s, a->rd, a->vm, true) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false) && + (a->rd != a->rs2)); +} + +/* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */ +static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a) +{ + if (!vrgather_vx_check(s, a)) { + return false; + } + + if (a->vm && s->vl_eq_vlmax) { + int vlmax = s->vlen / s->mlen; + TCGv_i64 dest = tcg_temp_new_i64(); + + if (a->rs1 == 0) { + vec_element_loadi(s, dest, a->rs2, 0); + } else { + vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax); + } + + tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd), + MAXSZ(s), MAXSZ(s), dest); + tcg_temp_free_i64(dest); + } else { + static gen_helper_opivx * const fns[4] = { + gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h, + gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d + }; + return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s); + } + return true; +} + +/* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */ +static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a) +{ + if (!vrgather_vx_check(s, a)) { + return false; + } + + if (a->vm && s->vl_eq_vlmax) { + if (a->rs1 >= s->vlen / s->mlen) { + tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), + MAXSZ(s), MAXSZ(s), 0); + } else { + tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd), + endian_ofs(s, a->rs2, a->rs1), + MAXSZ(s), MAXSZ(s)); + } + } else { + static gen_helper_opivx * const fns[4] = { + gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h, + gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d + }; + return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s, 1); + } + return true; +} + +/* Vector Compress Instruction */ +static bool vcompress_vm_check(DisasContext *s, arg_r *a) +{ + return (vext_check_isa_ill(s) && + vext_check_reg(s, a->rd, false) && + vext_check_reg(s, a->rs2, false) && + vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs1, 1) && + (a->rd != a->rs2)); +} + +static bool trans_vcompress_vm(DisasContext *s, arg_r *a) +{ + if (vcompress_vm_check(s, a)) { + uint32_t data = 0; + static gen_helper_gvec_4_ptr * const fns[4] = { + gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h, + gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d, + }; + TCGLabel *over = gen_new_label(); + tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); + + data = FIELD_DP32(data, VDATA, MLEN, s->mlen); + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); + tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), + vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), + cpu_env, 0, s->vlen / 8, data, fns[s->sew]); + gen_set_label(over); + return true; + } + return false; +} diff --git a/target/riscv/internals.h b/target/riscv/internals.h new file mode 100644 index 0000000..37d3382 --- /dev/null +++ b/target/riscv/internals.h @@ -0,0 +1,41 @@ +/* + * QEMU RISC-V CPU -- internal functions and types + * + * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef RISCV_CPU_INTERNALS_H +#define RISCV_CPU_INTERNALS_H + +#include "hw/registerfields.h" + +/* share data between vector helpers and decode code */ +FIELD(VDATA, MLEN, 0, 8) +FIELD(VDATA, VM, 8, 1) +FIELD(VDATA, LMUL, 9, 2) +FIELD(VDATA, NF, 11, 4) +FIELD(VDATA, WD, 11, 1) + +/* float point classify helpers */ +target_ulong fclass_h(uint64_t frs1); +target_ulong fclass_s(uint64_t frs1); +target_ulong fclass_d(uint64_t frs1); + +#define SEW8 0 +#define SEW16 1 +#define SEW32 2 +#define SEW64 3 + +#endif diff --git a/target/riscv/translate.c b/target/riscv/translate.c index ce71ca7..9632e79 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -32,7 +32,7 @@ #include "instmap.h" /* global register indices */ -static TCGv cpu_gpr[32], cpu_pc; +static TCGv cpu_gpr[32], cpu_pc, cpu_vl; static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */ static TCGv load_res; static TCGv load_val; @@ -56,6 +56,13 @@ typedef struct DisasContext { to reset this known value. */ int frm; bool ext_ifencei; + /* vector extension */ + bool vill; + uint8_t lmul; + uint8_t sew; + uint16_t vlen; + uint16_t mlen; + bool vl_eq_vlmax; } DisasContext; #ifdef TARGET_RISCV64 @@ -542,6 +549,11 @@ static void decode_RV32_64C(DisasContext *ctx, uint16_t opcode) } } +static int ex_plus_1(DisasContext *ctx, int nf) +{ + return nf + 1; +} + #define EX_SH(amount) \ static int ex_shift_##amount(DisasContext *ctx, int imm) \ { \ @@ -712,6 +724,7 @@ static bool gen_shift(DisasContext *ctx, arg_r *a, #include "insn_trans/trans_rvf.inc.c" #include "insn_trans/trans_rvd.inc.c" #include "insn_trans/trans_rvh.inc.c" +#include "insn_trans/trans_rvv.inc.c" #include "insn_trans/trans_privileged.inc.c" /* Include the auto-generated decoder for 16 bit insn */ @@ -746,10 +759,11 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) DisasContext *ctx = container_of(dcbase, DisasContext, base); CPURISCVState *env = cs->env_ptr; RISCVCPU *cpu = RISCV_CPU(cs); + uint32_t tb_flags = ctx->base.tb->flags; ctx->pc_succ_insn = ctx->base.pc_first; - ctx->mem_idx = ctx->base.tb->flags & TB_FLAGS_MMU_MASK; - ctx->mstatus_fs = ctx->base.tb->flags & TB_FLAGS_MSTATUS_FS; + ctx->mem_idx = tb_flags & TB_FLAGS_MMU_MASK; + ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS; ctx->priv_ver = env->priv_ver; #if !defined(CONFIG_USER_ONLY) if (riscv_has_ext(env, RVH)) { @@ -773,6 +787,12 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->misa = env->misa; ctx->frm = -1; /* unknown rounding mode */ ctx->ext_ifencei = cpu->cfg.ext_ifencei; + ctx->vlen = cpu->cfg.vlen; + ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL); + ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW); + ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL); + ctx->mlen = 1 << (ctx->sew + 3 - ctx->lmul); + ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX); } static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu) @@ -887,6 +907,7 @@ void riscv_translate_init(void) } cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, pc), "pc"); + cpu_vl = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, vl), "vl"); load_res = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_res), "load_res"); load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val), diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c new file mode 100644 index 0000000..39f44d1 --- /dev/null +++ b/target/riscv/vector_helper.c @@ -0,0 +1,4899 @@ +/* + * RISC-V Vector Extension Helpers for QEMU. + * + * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/memop.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "fpu/softfloat.h" +#include "tcg/tcg-gvec-desc.h" +#include "internals.h" +#include <math.h> + +target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, + target_ulong s2) +{ + int vlmax, vl; + RISCVCPU *cpu = env_archcpu(env); + uint16_t sew = 8 << FIELD_EX64(s2, VTYPE, VSEW); + uint8_t ediv = FIELD_EX64(s2, VTYPE, VEDIV); + bool vill = FIELD_EX64(s2, VTYPE, VILL); + target_ulong reserved = FIELD_EX64(s2, VTYPE, RESERVED); + + if ((sew > cpu->cfg.elen) || vill || (ediv != 0) || (reserved != 0)) { + /* only set vill bit. */ + env->vtype = FIELD_DP64(0, VTYPE, VILL, 1); + env->vl = 0; + env->vstart = 0; + return 0; + } + + vlmax = vext_get_vlmax(cpu, s2); + if (s1 <= vlmax) { + vl = s1; + } else { + vl = vlmax; + } + env->vl = vl; + env->vtype = s2; + env->vstart = 0; + return vl; +} + +/* + * Note that vector data is stored in host-endian 64-bit chunks, + * so addressing units smaller than that needs a host-endian fixup. + */ +#ifdef HOST_WORDS_BIGENDIAN +#define H1(x) ((x) ^ 7) +#define H1_2(x) ((x) ^ 6) +#define H1_4(x) ((x) ^ 4) +#define H2(x) ((x) ^ 3) +#define H4(x) ((x) ^ 1) +#define H8(x) ((x)) +#else +#define H1(x) (x) +#define H1_2(x) (x) +#define H1_4(x) (x) +#define H2(x) (x) +#define H4(x) (x) +#define H8(x) (x) +#endif + +static inline uint32_t vext_nf(uint32_t desc) +{ + return FIELD_EX32(simd_data(desc), VDATA, NF); +} + +static inline uint32_t vext_mlen(uint32_t desc) +{ + return FIELD_EX32(simd_data(desc), VDATA, MLEN); +} + +static inline uint32_t vext_vm(uint32_t desc) +{ + return FIELD_EX32(simd_data(desc), VDATA, VM); +} + +static inline uint32_t vext_lmul(uint32_t desc) +{ + return FIELD_EX32(simd_data(desc), VDATA, LMUL); +} + +static uint32_t vext_wd(uint32_t desc) +{ + return (simd_data(desc) >> 11) & 0x1; +} + +/* + * Get vector group length in bytes. Its range is [64, 2048]. + * + * As simd_desc support at most 256, the max vlen is 512 bits. + * So vlen in bytes is encoded as maxsz. + */ +static inline uint32_t vext_maxsz(uint32_t desc) +{ + return simd_maxsz(desc) << vext_lmul(desc); +} + +/* + * This function checks watchpoint before real load operation. + * + * In softmmu mode, the TLB API probe_access is enough for watchpoint check. + * In user mode, there is no watchpoint support now. + * + * It will trigger an exception if there is no mapping in TLB + * and page table walk can't fill the TLB entry. Then the guest + * software can return here after process the exception or never return. + */ +static void probe_pages(CPURISCVState *env, target_ulong addr, + target_ulong len, uintptr_t ra, + MMUAccessType access_type) +{ + target_ulong pagelen = -(addr | TARGET_PAGE_MASK); + target_ulong curlen = MIN(pagelen, len); + + probe_access(env, addr, curlen, access_type, + cpu_mmu_index(env, false), ra); + if (len > curlen) { + addr += curlen; + curlen = len - curlen; + probe_access(env, addr, curlen, access_type, + cpu_mmu_index(env, false), ra); + } +} + +#ifdef HOST_WORDS_BIGENDIAN +static void vext_clear(void *tail, uint32_t cnt, uint32_t tot) +{ + /* + * Split the remaining range to two parts. + * The first part is in the last uint64_t unit. + * The second part start from the next uint64_t unit. + */ + int part1 = 0, part2 = tot - cnt; + if (cnt % 8) { + part1 = 8 - (cnt % 8); + part2 = tot - cnt - part1; + memset((void *)((uintptr_t)tail & ~(7ULL)), 0, part1); + memset((void *)(((uintptr_t)tail + 8) & ~(7ULL)), 0, part2); + } else { + memset(tail, 0, part2); + } +} +#else +static void vext_clear(void *tail, uint32_t cnt, uint32_t tot) +{ + memset(tail, 0, tot - cnt); +} +#endif + +static void clearb(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot) +{ + int8_t *cur = ((int8_t *)vd + H1(idx)); + vext_clear(cur, cnt, tot); +} + +static void clearh(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot) +{ + int16_t *cur = ((int16_t *)vd + H2(idx)); + vext_clear(cur, cnt, tot); +} + +static void clearl(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot) +{ + int32_t *cur = ((int32_t *)vd + H4(idx)); + vext_clear(cur, cnt, tot); +} + +static void clearq(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot) +{ + int64_t *cur = (int64_t *)vd + idx; + vext_clear(cur, cnt, tot); +} + +static inline void vext_set_elem_mask(void *v0, int mlen, int index, + uint8_t value) +{ + int idx = (index * mlen) / 64; + int pos = (index * mlen) % 64; + uint64_t old = ((uint64_t *)v0)[idx]; + ((uint64_t *)v0)[idx] = deposit64(old, pos, mlen, value); +} + +static inline int vext_elem_mask(void *v0, int mlen, int index) +{ + int idx = (index * mlen) / 64; + int pos = (index * mlen) % 64; + return (((uint64_t *)v0)[idx] >> pos) & 1; +} + +/* elements operations for load and store */ +typedef void vext_ldst_elem_fn(CPURISCVState *env, target_ulong addr, + uint32_t idx, void *vd, uintptr_t retaddr); +typedef void clear_fn(void *vd, uint32_t idx, uint32_t cnt, uint32_t tot); + +#define GEN_VEXT_LD_ELEM(NAME, MTYPE, ETYPE, H, LDSUF) \ +static void NAME(CPURISCVState *env, abi_ptr addr, \ + uint32_t idx, void *vd, uintptr_t retaddr)\ +{ \ + MTYPE data; \ + ETYPE *cur = ((ETYPE *)vd + H(idx)); \ + data = cpu_##LDSUF##_data_ra(env, addr, retaddr); \ + *cur = data; \ +} \ + +GEN_VEXT_LD_ELEM(ldb_b, int8_t, int8_t, H1, ldsb) +GEN_VEXT_LD_ELEM(ldb_h, int8_t, int16_t, H2, ldsb) +GEN_VEXT_LD_ELEM(ldb_w, int8_t, int32_t, H4, ldsb) +GEN_VEXT_LD_ELEM(ldb_d, int8_t, int64_t, H8, ldsb) +GEN_VEXT_LD_ELEM(ldh_h, int16_t, int16_t, H2, ldsw) +GEN_VEXT_LD_ELEM(ldh_w, int16_t, int32_t, H4, ldsw) +GEN_VEXT_LD_ELEM(ldh_d, int16_t, int64_t, H8, ldsw) +GEN_VEXT_LD_ELEM(ldw_w, int32_t, int32_t, H4, ldl) +GEN_VEXT_LD_ELEM(ldw_d, int32_t, int64_t, H8, ldl) +GEN_VEXT_LD_ELEM(lde_b, int8_t, int8_t, H1, ldsb) +GEN_VEXT_LD_ELEM(lde_h, int16_t, int16_t, H2, ldsw) +GEN_VEXT_LD_ELEM(lde_w, int32_t, int32_t, H4, ldl) +GEN_VEXT_LD_ELEM(lde_d, int64_t, int64_t, H8, ldq) +GEN_VEXT_LD_ELEM(ldbu_b, uint8_t, uint8_t, H1, ldub) +GEN_VEXT_LD_ELEM(ldbu_h, uint8_t, uint16_t, H2, ldub) +GEN_VEXT_LD_ELEM(ldbu_w, uint8_t, uint32_t, H4, ldub) +GEN_VEXT_LD_ELEM(ldbu_d, uint8_t, uint64_t, H8, ldub) +GEN_VEXT_LD_ELEM(ldhu_h, uint16_t, uint16_t, H2, lduw) +GEN_VEXT_LD_ELEM(ldhu_w, uint16_t, uint32_t, H4, lduw) +GEN_VEXT_LD_ELEM(ldhu_d, uint16_t, uint64_t, H8, lduw) +GEN_VEXT_LD_ELEM(ldwu_w, uint32_t, uint32_t, H4, ldl) +GEN_VEXT_LD_ELEM(ldwu_d, uint32_t, uint64_t, H8, ldl) + +#define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \ +static void NAME(CPURISCVState *env, abi_ptr addr, \ + uint32_t idx, void *vd, uintptr_t retaddr)\ +{ \ + ETYPE data = *((ETYPE *)vd + H(idx)); \ + cpu_##STSUF##_data_ra(env, addr, data, retaddr); \ +} + +GEN_VEXT_ST_ELEM(stb_b, int8_t, H1, stb) +GEN_VEXT_ST_ELEM(stb_h, int16_t, H2, stb) +GEN_VEXT_ST_ELEM(stb_w, int32_t, H4, stb) +GEN_VEXT_ST_ELEM(stb_d, int64_t, H8, stb) +GEN_VEXT_ST_ELEM(sth_h, int16_t, H2, stw) +GEN_VEXT_ST_ELEM(sth_w, int32_t, H4, stw) +GEN_VEXT_ST_ELEM(sth_d, int64_t, H8, stw) +GEN_VEXT_ST_ELEM(stw_w, int32_t, H4, stl) +GEN_VEXT_ST_ELEM(stw_d, int64_t, H8, stl) +GEN_VEXT_ST_ELEM(ste_b, int8_t, H1, stb) +GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw) +GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl) +GEN_VEXT_ST_ELEM(ste_d, int64_t, H8, stq) + +/* + *** stride: access vector element from strided memory + */ +static void +vext_ldst_stride(void *vd, void *v0, target_ulong base, + target_ulong stride, CPURISCVState *env, + uint32_t desc, uint32_t vm, + vext_ldst_elem_fn *ldst_elem, clear_fn *clear_elem, + uint32_t esz, uint32_t msz, uintptr_t ra, + MMUAccessType access_type) +{ + uint32_t i, k; + uint32_t nf = vext_nf(desc); + uint32_t mlen = vext_mlen(desc); + uint32_t vlmax = vext_maxsz(desc) / esz; + + /* probe every access*/ + for (i = 0; i < env->vl; i++) { + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + probe_pages(env, base + stride * i, nf * msz, ra, access_type); + } + /* do real access */ + for (i = 0; i < env->vl; i++) { + k = 0; + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + while (k < nf) { + target_ulong addr = base + stride * i + k * msz; + ldst_elem(env, addr, i + k * vlmax, vd, ra); + k++; + } + } + /* clear tail elements */ + if (clear_elem) { + for (k = 0; k < nf; k++) { + clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz); + } + } +} + +#define GEN_VEXT_LD_STRIDE(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void * v0, target_ulong base, \ + target_ulong stride, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + uint32_t vm = vext_vm(desc); \ + vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN, \ + CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \ + GETPC(), MMU_DATA_LOAD); \ +} + +GEN_VEXT_LD_STRIDE(vlsb_v_b, int8_t, int8_t, ldb_b, clearb) +GEN_VEXT_LD_STRIDE(vlsb_v_h, int8_t, int16_t, ldb_h, clearh) +GEN_VEXT_LD_STRIDE(vlsb_v_w, int8_t, int32_t, ldb_w, clearl) +GEN_VEXT_LD_STRIDE(vlsb_v_d, int8_t, int64_t, ldb_d, clearq) +GEN_VEXT_LD_STRIDE(vlsh_v_h, int16_t, int16_t, ldh_h, clearh) +GEN_VEXT_LD_STRIDE(vlsh_v_w, int16_t, int32_t, ldh_w, clearl) +GEN_VEXT_LD_STRIDE(vlsh_v_d, int16_t, int64_t, ldh_d, clearq) +GEN_VEXT_LD_STRIDE(vlsw_v_w, int32_t, int32_t, ldw_w, clearl) +GEN_VEXT_LD_STRIDE(vlsw_v_d, int32_t, int64_t, ldw_d, clearq) +GEN_VEXT_LD_STRIDE(vlse_v_b, int8_t, int8_t, lde_b, clearb) +GEN_VEXT_LD_STRIDE(vlse_v_h, int16_t, int16_t, lde_h, clearh) +GEN_VEXT_LD_STRIDE(vlse_v_w, int32_t, int32_t, lde_w, clearl) +GEN_VEXT_LD_STRIDE(vlse_v_d, int64_t, int64_t, lde_d, clearq) +GEN_VEXT_LD_STRIDE(vlsbu_v_b, uint8_t, uint8_t, ldbu_b, clearb) +GEN_VEXT_LD_STRIDE(vlsbu_v_h, uint8_t, uint16_t, ldbu_h, clearh) +GEN_VEXT_LD_STRIDE(vlsbu_v_w, uint8_t, uint32_t, ldbu_w, clearl) +GEN_VEXT_LD_STRIDE(vlsbu_v_d, uint8_t, uint64_t, ldbu_d, clearq) +GEN_VEXT_LD_STRIDE(vlshu_v_h, uint16_t, uint16_t, ldhu_h, clearh) +GEN_VEXT_LD_STRIDE(vlshu_v_w, uint16_t, uint32_t, ldhu_w, clearl) +GEN_VEXT_LD_STRIDE(vlshu_v_d, uint16_t, uint64_t, ldhu_d, clearq) +GEN_VEXT_LD_STRIDE(vlswu_v_w, uint32_t, uint32_t, ldwu_w, clearl) +GEN_VEXT_LD_STRIDE(vlswu_v_d, uint32_t, uint64_t, ldwu_d, clearq) + +#define GEN_VEXT_ST_STRIDE(NAME, MTYPE, ETYPE, STORE_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ + target_ulong stride, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + uint32_t vm = vext_vm(desc); \ + vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN, \ + NULL, sizeof(ETYPE), sizeof(MTYPE), \ + GETPC(), MMU_DATA_STORE); \ +} + +GEN_VEXT_ST_STRIDE(vssb_v_b, int8_t, int8_t, stb_b) +GEN_VEXT_ST_STRIDE(vssb_v_h, int8_t, int16_t, stb_h) +GEN_VEXT_ST_STRIDE(vssb_v_w, int8_t, int32_t, stb_w) +GEN_VEXT_ST_STRIDE(vssb_v_d, int8_t, int64_t, stb_d) +GEN_VEXT_ST_STRIDE(vssh_v_h, int16_t, int16_t, sth_h) +GEN_VEXT_ST_STRIDE(vssh_v_w, int16_t, int32_t, sth_w) +GEN_VEXT_ST_STRIDE(vssh_v_d, int16_t, int64_t, sth_d) +GEN_VEXT_ST_STRIDE(vssw_v_w, int32_t, int32_t, stw_w) +GEN_VEXT_ST_STRIDE(vssw_v_d, int32_t, int64_t, stw_d) +GEN_VEXT_ST_STRIDE(vsse_v_b, int8_t, int8_t, ste_b) +GEN_VEXT_ST_STRIDE(vsse_v_h, int16_t, int16_t, ste_h) +GEN_VEXT_ST_STRIDE(vsse_v_w, int32_t, int32_t, ste_w) +GEN_VEXT_ST_STRIDE(vsse_v_d, int64_t, int64_t, ste_d) + +/* + *** unit-stride: access elements stored contiguously in memory + */ + +/* unmasked unit-stride load and store operation*/ +static void +vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, + vext_ldst_elem_fn *ldst_elem, clear_fn *clear_elem, + uint32_t esz, uint32_t msz, uintptr_t ra, + MMUAccessType access_type) +{ + uint32_t i, k; + uint32_t nf = vext_nf(desc); + uint32_t vlmax = vext_maxsz(desc) / esz; + + /* probe every access */ + probe_pages(env, base, env->vl * nf * msz, ra, access_type); + /* load bytes from guest memory */ + for (i = 0; i < env->vl; i++) { + k = 0; + while (k < nf) { + target_ulong addr = base + (i * nf + k) * msz; + ldst_elem(env, addr, i + k * vlmax, vd, ra); + k++; + } + } + /* clear tail elements */ + if (clear_elem) { + for (k = 0; k < nf; k++) { + clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz); + } + } +} + +/* + * masked unit-stride load and store operation will be a special case of stride, + * stride = NF * sizeof (MTYPE) + */ + +#define GEN_VEXT_LD_US(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \ +void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \ + vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \ + CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \ + GETPC(), MMU_DATA_LOAD); \ +} \ + \ +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vext_ldst_us(vd, base, env, desc, LOAD_FN, CLEAR_FN, \ + sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_LOAD); \ +} + +GEN_VEXT_LD_US(vlb_v_b, int8_t, int8_t, ldb_b, clearb) +GEN_VEXT_LD_US(vlb_v_h, int8_t, int16_t, ldb_h, clearh) +GEN_VEXT_LD_US(vlb_v_w, int8_t, int32_t, ldb_w, clearl) +GEN_VEXT_LD_US(vlb_v_d, int8_t, int64_t, ldb_d, clearq) +GEN_VEXT_LD_US(vlh_v_h, int16_t, int16_t, ldh_h, clearh) +GEN_VEXT_LD_US(vlh_v_w, int16_t, int32_t, ldh_w, clearl) +GEN_VEXT_LD_US(vlh_v_d, int16_t, int64_t, ldh_d, clearq) +GEN_VEXT_LD_US(vlw_v_w, int32_t, int32_t, ldw_w, clearl) +GEN_VEXT_LD_US(vlw_v_d, int32_t, int64_t, ldw_d, clearq) +GEN_VEXT_LD_US(vle_v_b, int8_t, int8_t, lde_b, clearb) +GEN_VEXT_LD_US(vle_v_h, int16_t, int16_t, lde_h, clearh) +GEN_VEXT_LD_US(vle_v_w, int32_t, int32_t, lde_w, clearl) +GEN_VEXT_LD_US(vle_v_d, int64_t, int64_t, lde_d, clearq) +GEN_VEXT_LD_US(vlbu_v_b, uint8_t, uint8_t, ldbu_b, clearb) +GEN_VEXT_LD_US(vlbu_v_h, uint8_t, uint16_t, ldbu_h, clearh) +GEN_VEXT_LD_US(vlbu_v_w, uint8_t, uint32_t, ldbu_w, clearl) +GEN_VEXT_LD_US(vlbu_v_d, uint8_t, uint64_t, ldbu_d, clearq) +GEN_VEXT_LD_US(vlhu_v_h, uint16_t, uint16_t, ldhu_h, clearh) +GEN_VEXT_LD_US(vlhu_v_w, uint16_t, uint32_t, ldhu_w, clearl) +GEN_VEXT_LD_US(vlhu_v_d, uint16_t, uint64_t, ldhu_d, clearq) +GEN_VEXT_LD_US(vlwu_v_w, uint32_t, uint32_t, ldwu_w, clearl) +GEN_VEXT_LD_US(vlwu_v_d, uint32_t, uint64_t, ldwu_d, clearq) + +#define GEN_VEXT_ST_US(NAME, MTYPE, ETYPE, STORE_FN) \ +void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t stride = vext_nf(desc) * sizeof(MTYPE); \ + vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \ + NULL, sizeof(ETYPE), sizeof(MTYPE), \ + GETPC(), MMU_DATA_STORE); \ +} \ + \ +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vext_ldst_us(vd, base, env, desc, STORE_FN, NULL, \ + sizeof(ETYPE), sizeof(MTYPE), GETPC(), MMU_DATA_STORE);\ +} + +GEN_VEXT_ST_US(vsb_v_b, int8_t, int8_t , stb_b) +GEN_VEXT_ST_US(vsb_v_h, int8_t, int16_t, stb_h) +GEN_VEXT_ST_US(vsb_v_w, int8_t, int32_t, stb_w) +GEN_VEXT_ST_US(vsb_v_d, int8_t, int64_t, stb_d) +GEN_VEXT_ST_US(vsh_v_h, int16_t, int16_t, sth_h) +GEN_VEXT_ST_US(vsh_v_w, int16_t, int32_t, sth_w) +GEN_VEXT_ST_US(vsh_v_d, int16_t, int64_t, sth_d) +GEN_VEXT_ST_US(vsw_v_w, int32_t, int32_t, stw_w) +GEN_VEXT_ST_US(vsw_v_d, int32_t, int64_t, stw_d) +GEN_VEXT_ST_US(vse_v_b, int8_t, int8_t , ste_b) +GEN_VEXT_ST_US(vse_v_h, int16_t, int16_t, ste_h) +GEN_VEXT_ST_US(vse_v_w, int32_t, int32_t, ste_w) +GEN_VEXT_ST_US(vse_v_d, int64_t, int64_t, ste_d) + +/* + *** index: access vector element from indexed memory + */ +typedef target_ulong vext_get_index_addr(target_ulong base, + uint32_t idx, void *vs2); + +#define GEN_VEXT_GET_INDEX_ADDR(NAME, ETYPE, H) \ +static target_ulong NAME(target_ulong base, \ + uint32_t idx, void *vs2) \ +{ \ + return (base + *((ETYPE *)vs2 + H(idx))); \ +} + +GEN_VEXT_GET_INDEX_ADDR(idx_b, int8_t, H1) +GEN_VEXT_GET_INDEX_ADDR(idx_h, int16_t, H2) +GEN_VEXT_GET_INDEX_ADDR(idx_w, int32_t, H4) +GEN_VEXT_GET_INDEX_ADDR(idx_d, int64_t, H8) + +static inline void +vext_ldst_index(void *vd, void *v0, target_ulong base, + void *vs2, CPURISCVState *env, uint32_t desc, + vext_get_index_addr get_index_addr, + vext_ldst_elem_fn *ldst_elem, + clear_fn *clear_elem, + uint32_t esz, uint32_t msz, uintptr_t ra, + MMUAccessType access_type) +{ + uint32_t i, k; + uint32_t nf = vext_nf(desc); + uint32_t vm = vext_vm(desc); + uint32_t mlen = vext_mlen(desc); + uint32_t vlmax = vext_maxsz(desc) / esz; + + /* probe every access*/ + for (i = 0; i < env->vl; i++) { + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + probe_pages(env, get_index_addr(base, i, vs2), nf * msz, ra, + access_type); + } + /* load bytes from guest memory */ + for (i = 0; i < env->vl; i++) { + k = 0; + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + while (k < nf) { + abi_ptr addr = get_index_addr(base, i, vs2) + k * msz; + ldst_elem(env, addr, i + k * vlmax, vd, ra); + k++; + } + } + /* clear tail elements */ + if (clear_elem) { + for (k = 0; k < nf; k++) { + clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz); + } + } +} + +#define GEN_VEXT_LD_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, LOAD_FN, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ + void *vs2, CPURISCVState *env, uint32_t desc) \ +{ \ + vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \ + LOAD_FN, CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \ + GETPC(), MMU_DATA_LOAD); \ +} + +GEN_VEXT_LD_INDEX(vlxb_v_b, int8_t, int8_t, idx_b, ldb_b, clearb) +GEN_VEXT_LD_INDEX(vlxb_v_h, int8_t, int16_t, idx_h, ldb_h, clearh) +GEN_VEXT_LD_INDEX(vlxb_v_w, int8_t, int32_t, idx_w, ldb_w, clearl) +GEN_VEXT_LD_INDEX(vlxb_v_d, int8_t, int64_t, idx_d, ldb_d, clearq) +GEN_VEXT_LD_INDEX(vlxh_v_h, int16_t, int16_t, idx_h, ldh_h, clearh) +GEN_VEXT_LD_INDEX(vlxh_v_w, int16_t, int32_t, idx_w, ldh_w, clearl) +GEN_VEXT_LD_INDEX(vlxh_v_d, int16_t, int64_t, idx_d, ldh_d, clearq) +GEN_VEXT_LD_INDEX(vlxw_v_w, int32_t, int32_t, idx_w, ldw_w, clearl) +GEN_VEXT_LD_INDEX(vlxw_v_d, int32_t, int64_t, idx_d, ldw_d, clearq) +GEN_VEXT_LD_INDEX(vlxe_v_b, int8_t, int8_t, idx_b, lde_b, clearb) +GEN_VEXT_LD_INDEX(vlxe_v_h, int16_t, int16_t, idx_h, lde_h, clearh) +GEN_VEXT_LD_INDEX(vlxe_v_w, int32_t, int32_t, idx_w, lde_w, clearl) +GEN_VEXT_LD_INDEX(vlxe_v_d, int64_t, int64_t, idx_d, lde_d, clearq) +GEN_VEXT_LD_INDEX(vlxbu_v_b, uint8_t, uint8_t, idx_b, ldbu_b, clearb) +GEN_VEXT_LD_INDEX(vlxbu_v_h, uint8_t, uint16_t, idx_h, ldbu_h, clearh) +GEN_VEXT_LD_INDEX(vlxbu_v_w, uint8_t, uint32_t, idx_w, ldbu_w, clearl) +GEN_VEXT_LD_INDEX(vlxbu_v_d, uint8_t, uint64_t, idx_d, ldbu_d, clearq) +GEN_VEXT_LD_INDEX(vlxhu_v_h, uint16_t, uint16_t, idx_h, ldhu_h, clearh) +GEN_VEXT_LD_INDEX(vlxhu_v_w, uint16_t, uint32_t, idx_w, ldhu_w, clearl) +GEN_VEXT_LD_INDEX(vlxhu_v_d, uint16_t, uint64_t, idx_d, ldhu_d, clearq) +GEN_VEXT_LD_INDEX(vlxwu_v_w, uint32_t, uint32_t, idx_w, ldwu_w, clearl) +GEN_VEXT_LD_INDEX(vlxwu_v_d, uint32_t, uint64_t, idx_d, ldwu_d, clearq) + +#define GEN_VEXT_ST_INDEX(NAME, MTYPE, ETYPE, INDEX_FN, STORE_FN)\ +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ + void *vs2, CPURISCVState *env, uint32_t desc) \ +{ \ + vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \ + STORE_FN, NULL, sizeof(ETYPE), sizeof(MTYPE),\ + GETPC(), MMU_DATA_STORE); \ +} + +GEN_VEXT_ST_INDEX(vsxb_v_b, int8_t, int8_t, idx_b, stb_b) +GEN_VEXT_ST_INDEX(vsxb_v_h, int8_t, int16_t, idx_h, stb_h) +GEN_VEXT_ST_INDEX(vsxb_v_w, int8_t, int32_t, idx_w, stb_w) +GEN_VEXT_ST_INDEX(vsxb_v_d, int8_t, int64_t, idx_d, stb_d) +GEN_VEXT_ST_INDEX(vsxh_v_h, int16_t, int16_t, idx_h, sth_h) +GEN_VEXT_ST_INDEX(vsxh_v_w, int16_t, int32_t, idx_w, sth_w) +GEN_VEXT_ST_INDEX(vsxh_v_d, int16_t, int64_t, idx_d, sth_d) +GEN_VEXT_ST_INDEX(vsxw_v_w, int32_t, int32_t, idx_w, stw_w) +GEN_VEXT_ST_INDEX(vsxw_v_d, int32_t, int64_t, idx_d, stw_d) +GEN_VEXT_ST_INDEX(vsxe_v_b, int8_t, int8_t, idx_b, ste_b) +GEN_VEXT_ST_INDEX(vsxe_v_h, int16_t, int16_t, idx_h, ste_h) +GEN_VEXT_ST_INDEX(vsxe_v_w, int32_t, int32_t, idx_w, ste_w) +GEN_VEXT_ST_INDEX(vsxe_v_d, int64_t, int64_t, idx_d, ste_d) + +/* + *** unit-stride fault-only-fisrt load instructions + */ +static inline void +vext_ldff(void *vd, void *v0, target_ulong base, + CPURISCVState *env, uint32_t desc, + vext_ldst_elem_fn *ldst_elem, + clear_fn *clear_elem, + uint32_t esz, uint32_t msz, uintptr_t ra) +{ + void *host; + uint32_t i, k, vl = 0; + uint32_t mlen = vext_mlen(desc); + uint32_t nf = vext_nf(desc); + uint32_t vm = vext_vm(desc); + uint32_t vlmax = vext_maxsz(desc) / esz; + target_ulong addr, offset, remain; + + /* probe every access*/ + for (i = 0; i < env->vl; i++) { + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + addr = base + nf * i * msz; + if (i == 0) { + probe_pages(env, addr, nf * msz, ra, MMU_DATA_LOAD); + } else { + /* if it triggers an exception, no need to check watchpoint */ + remain = nf * msz; + while (remain > 0) { + offset = -(addr | TARGET_PAGE_MASK); + host = tlb_vaddr_to_host(env, addr, MMU_DATA_LOAD, + cpu_mmu_index(env, false)); + if (host) { +#ifdef CONFIG_USER_ONLY + if (page_check_range(addr, nf * msz, PAGE_READ) < 0) { + vl = i; + goto ProbeSuccess; + } +#else + probe_pages(env, addr, nf * msz, ra, MMU_DATA_LOAD); +#endif + } else { + vl = i; + goto ProbeSuccess; + } + if (remain <= offset) { + break; + } + remain -= offset; + addr += offset; + } + } + } +ProbeSuccess: + /* load bytes from guest memory */ + if (vl != 0) { + env->vl = vl; + } + for (i = 0; i < env->vl; i++) { + k = 0; + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + while (k < nf) { + target_ulong addr = base + (i * nf + k) * msz; + ldst_elem(env, addr, i + k * vlmax, vd, ra); + k++; + } + } + /* clear tail elements */ + if (vl != 0) { + return; + } + for (k = 0; k < nf; k++) { + clear_elem(vd, env->vl + k * vlmax, env->vl * esz, vlmax * esz); + } +} + +#define GEN_VEXT_LDFF(NAME, MTYPE, ETYPE, LOAD_FN, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong base, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vext_ldff(vd, v0, base, env, desc, LOAD_FN, CLEAR_FN, \ + sizeof(ETYPE), sizeof(MTYPE), GETPC()); \ +} + +GEN_VEXT_LDFF(vlbff_v_b, int8_t, int8_t, ldb_b, clearb) +GEN_VEXT_LDFF(vlbff_v_h, int8_t, int16_t, ldb_h, clearh) +GEN_VEXT_LDFF(vlbff_v_w, int8_t, int32_t, ldb_w, clearl) +GEN_VEXT_LDFF(vlbff_v_d, int8_t, int64_t, ldb_d, clearq) +GEN_VEXT_LDFF(vlhff_v_h, int16_t, int16_t, ldh_h, clearh) +GEN_VEXT_LDFF(vlhff_v_w, int16_t, int32_t, ldh_w, clearl) +GEN_VEXT_LDFF(vlhff_v_d, int16_t, int64_t, ldh_d, clearq) +GEN_VEXT_LDFF(vlwff_v_w, int32_t, int32_t, ldw_w, clearl) +GEN_VEXT_LDFF(vlwff_v_d, int32_t, int64_t, ldw_d, clearq) +GEN_VEXT_LDFF(vleff_v_b, int8_t, int8_t, lde_b, clearb) +GEN_VEXT_LDFF(vleff_v_h, int16_t, int16_t, lde_h, clearh) +GEN_VEXT_LDFF(vleff_v_w, int32_t, int32_t, lde_w, clearl) +GEN_VEXT_LDFF(vleff_v_d, int64_t, int64_t, lde_d, clearq) +GEN_VEXT_LDFF(vlbuff_v_b, uint8_t, uint8_t, ldbu_b, clearb) +GEN_VEXT_LDFF(vlbuff_v_h, uint8_t, uint16_t, ldbu_h, clearh) +GEN_VEXT_LDFF(vlbuff_v_w, uint8_t, uint32_t, ldbu_w, clearl) +GEN_VEXT_LDFF(vlbuff_v_d, uint8_t, uint64_t, ldbu_d, clearq) +GEN_VEXT_LDFF(vlhuff_v_h, uint16_t, uint16_t, ldhu_h, clearh) +GEN_VEXT_LDFF(vlhuff_v_w, uint16_t, uint32_t, ldhu_w, clearl) +GEN_VEXT_LDFF(vlhuff_v_d, uint16_t, uint64_t, ldhu_d, clearq) +GEN_VEXT_LDFF(vlwuff_v_w, uint32_t, uint32_t, ldwu_w, clearl) +GEN_VEXT_LDFF(vlwuff_v_d, uint32_t, uint64_t, ldwu_d, clearq) + +/* + *** Vector AMO Operations (Zvamo) + */ +typedef void vext_amo_noatomic_fn(void *vs3, target_ulong addr, + uint32_t wd, uint32_t idx, CPURISCVState *env, + uintptr_t retaddr); + +/* no atomic opreation for vector atomic insructions */ +#define DO_SWAP(N, M) (M) +#define DO_AND(N, M) (N & M) +#define DO_XOR(N, M) (N ^ M) +#define DO_OR(N, M) (N | M) +#define DO_ADD(N, M) (N + M) + +#define GEN_VEXT_AMO_NOATOMIC_OP(NAME, ESZ, MSZ, H, DO_OP, SUF) \ +static void \ +vext_##NAME##_noatomic_op(void *vs3, target_ulong addr, \ + uint32_t wd, uint32_t idx, \ + CPURISCVState *env, uintptr_t retaddr)\ +{ \ + typedef int##ESZ##_t ETYPE; \ + typedef int##MSZ##_t MTYPE; \ + typedef uint##MSZ##_t UMTYPE __attribute__((unused)); \ + ETYPE *pe3 = (ETYPE *)vs3 + H(idx); \ + MTYPE a = cpu_ld##SUF##_data(env, addr), b = *pe3; \ + \ + cpu_st##SUF##_data(env, addr, DO_OP(a, b)); \ + if (wd) { \ + *pe3 = a; \ + } \ +} + +/* Signed min/max */ +#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M)) +#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N)) + +/* Unsigned min/max */ +#define DO_MAXU(N, M) DO_MAX((UMTYPE)N, (UMTYPE)M) +#define DO_MINU(N, M) DO_MIN((UMTYPE)N, (UMTYPE)M) + +GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_w, 32, 32, H4, DO_SWAP, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_w, 32, 32, H4, DO_ADD, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_w, 32, 32, H4, DO_XOR, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_w, 32, 32, H4, DO_AND, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_w, 32, 32, H4, DO_OR, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_w, 32, 32, H4, DO_MIN, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_w, 32, 32, H4, DO_MAX, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_w, 32, 32, H4, DO_MINU, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_w, 32, 32, H4, DO_MAXU, l) +#ifdef TARGET_RISCV64 +GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_d, 64, 32, H8, DO_SWAP, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamoswapd_v_d, 64, 64, H8, DO_SWAP, q) +GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_d, 64, 32, H8, DO_ADD, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamoaddd_v_d, 64, 64, H8, DO_ADD, q) +GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_d, 64, 32, H8, DO_XOR, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamoxord_v_d, 64, 64, H8, DO_XOR, q) +GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_d, 64, 32, H8, DO_AND, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamoandd_v_d, 64, 64, H8, DO_AND, q) +GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_d, 64, 32, H8, DO_OR, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamoord_v_d, 64, 64, H8, DO_OR, q) +GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_d, 64, 32, H8, DO_MIN, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamomind_v_d, 64, 64, H8, DO_MIN, q) +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_d, 64, 32, H8, DO_MAX, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxd_v_d, 64, 64, H8, DO_MAX, q) +GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_d, 64, 32, H8, DO_MINU, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamominud_v_d, 64, 64, H8, DO_MINU, q) +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_d, 64, 32, H8, DO_MAXU, l) +GEN_VEXT_AMO_NOATOMIC_OP(vamomaxud_v_d, 64, 64, H8, DO_MAXU, q) +#endif + +static inline void +vext_amo_noatomic(void *vs3, void *v0, target_ulong base, + void *vs2, CPURISCVState *env, uint32_t desc, + vext_get_index_addr get_index_addr, + vext_amo_noatomic_fn *noatomic_op, + clear_fn *clear_elem, + uint32_t esz, uint32_t msz, uintptr_t ra) +{ + uint32_t i; + target_long addr; + uint32_t wd = vext_wd(desc); + uint32_t vm = vext_vm(desc); + uint32_t mlen = vext_mlen(desc); + uint32_t vlmax = vext_maxsz(desc) / esz; + + for (i = 0; i < env->vl; i++) { + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_LOAD); + probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_STORE); + } + for (i = 0; i < env->vl; i++) { + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + addr = get_index_addr(base, i, vs2); + noatomic_op(vs3, addr, wd, i, env, ra); + } + clear_elem(vs3, env->vl, env->vl * esz, vlmax * esz); +} + +#define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN, CLEAR_FN) \ +void HELPER(NAME)(void *vs3, void *v0, target_ulong base, \ + void *vs2, CPURISCVState *env, uint32_t desc) \ +{ \ + vext_amo_noatomic(vs3, v0, base, vs2, env, desc, \ + INDEX_FN, vext_##NAME##_noatomic_op, \ + CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \ + GETPC()); \ +} + +#ifdef TARGET_RISCV64 +GEN_VEXT_AMO(vamoswapw_v_d, int32_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamoswapd_v_d, int64_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamoaddw_v_d, int32_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamoaddd_v_d, int64_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamoxorw_v_d, int32_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamoxord_v_d, int64_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamoandw_v_d, int32_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamoandd_v_d, int64_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamoorw_v_d, int32_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamoord_v_d, int64_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamominw_v_d, int32_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamomind_v_d, int64_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamomaxw_v_d, int32_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamomaxd_v_d, int64_t, int64_t, idx_d, clearq) +GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d, clearq) +GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d, clearq) +GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d, clearq) +GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d, clearq) +#endif +GEN_VEXT_AMO(vamoswapw_v_w, int32_t, int32_t, idx_w, clearl) +GEN_VEXT_AMO(vamoaddw_v_w, int32_t, int32_t, idx_w, clearl) +GEN_VEXT_AMO(vamoxorw_v_w, int32_t, int32_t, idx_w, clearl) +GEN_VEXT_AMO(vamoandw_v_w, int32_t, int32_t, idx_w, clearl) +GEN_VEXT_AMO(vamoorw_v_w, int32_t, int32_t, idx_w, clearl) +GEN_VEXT_AMO(vamominw_v_w, int32_t, int32_t, idx_w, clearl) +GEN_VEXT_AMO(vamomaxw_v_w, int32_t, int32_t, idx_w, clearl) +GEN_VEXT_AMO(vamominuw_v_w, uint32_t, uint32_t, idx_w, clearl) +GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w, clearl) + +/* + *** Vector Integer Arithmetic Instructions + */ + +/* expand macro args before macro */ +#define RVVCALL(macro, ...) macro(__VA_ARGS__) + +/* (TD, T1, T2, TX1, TX2) */ +#define OP_SSS_B int8_t, int8_t, int8_t, int8_t, int8_t +#define OP_SSS_H int16_t, int16_t, int16_t, int16_t, int16_t +#define OP_SSS_W int32_t, int32_t, int32_t, int32_t, int32_t +#define OP_SSS_D int64_t, int64_t, int64_t, int64_t, int64_t +#define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t +#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t +#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t +#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t +#define OP_SUS_B int8_t, uint8_t, int8_t, uint8_t, int8_t +#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t +#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t +#define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t +#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t +#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t +#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t +#define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t +#define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t +#define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t +#define WOP_SUS_B int16_t, uint8_t, int8_t, uint16_t, int16_t +#define WOP_SUS_H int32_t, uint16_t, int16_t, uint32_t, int32_t +#define WOP_SUS_W int64_t, uint32_t, int32_t, uint64_t, int64_t +#define WOP_SSU_B int16_t, int8_t, uint8_t, int16_t, uint16_t +#define WOP_SSU_H int32_t, int16_t, uint16_t, int32_t, uint32_t +#define WOP_SSU_W int64_t, int32_t, uint32_t, int64_t, uint64_t +#define NOP_SSS_B int8_t, int8_t, int16_t, int8_t, int16_t +#define NOP_SSS_H int16_t, int16_t, int32_t, int16_t, int32_t +#define NOP_SSS_W int32_t, int32_t, int64_t, int32_t, int64_t +#define NOP_UUU_B uint8_t, uint8_t, uint16_t, uint8_t, uint16_t +#define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t +#define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t + +/* operation of two vector elements */ +typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i); + +#define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ +static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \ +{ \ + TX1 s1 = *((T1 *)vs1 + HS1(i)); \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + *((TD *)vd + HD(i)) = OP(s2, s1); \ +} +#define DO_SUB(N, M) (N - M) +#define DO_RSUB(N, M) (M - N) + +RVVCALL(OPIVV2, vadd_vv_b, OP_SSS_B, H1, H1, H1, DO_ADD) +RVVCALL(OPIVV2, vadd_vv_h, OP_SSS_H, H2, H2, H2, DO_ADD) +RVVCALL(OPIVV2, vadd_vv_w, OP_SSS_W, H4, H4, H4, DO_ADD) +RVVCALL(OPIVV2, vadd_vv_d, OP_SSS_D, H8, H8, H8, DO_ADD) +RVVCALL(OPIVV2, vsub_vv_b, OP_SSS_B, H1, H1, H1, DO_SUB) +RVVCALL(OPIVV2, vsub_vv_h, OP_SSS_H, H2, H2, H2, DO_SUB) +RVVCALL(OPIVV2, vsub_vv_w, OP_SSS_W, H4, H4, H4, DO_SUB) +RVVCALL(OPIVV2, vsub_vv_d, OP_SSS_D, H8, H8, H8, DO_SUB) + +static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2, + CPURISCVState *env, uint32_t desc, + uint32_t esz, uint32_t dsz, + opivv2_fn *fn, clear_fn *clearfn) +{ + uint32_t vlmax = vext_maxsz(desc) / esz; + uint32_t mlen = vext_mlen(desc); + uint32_t vm = vext_vm(desc); + uint32_t vl = env->vl; + uint32_t i; + + for (i = 0; i < vl; i++) { + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + fn(vd, vs1, vs2, i); + } + clearfn(vd, vl, vl * dsz, vlmax * dsz); +} + +/* generate the helpers for OPIVV */ +#define GEN_VEXT_VV(NAME, ESZ, DSZ, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, \ + void *vs2, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + do_vext_vv(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \ + do_##NAME, CLEAR_FN); \ +} + +GEN_VEXT_VV(vadd_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vadd_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vadd_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vadd_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vsub_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vsub_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vsub_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vsub_vv_d, 8, 8, clearq) + +typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i); + +/* + * (T1)s1 gives the real operator type. + * (TX1)(T1)s1 expands the operator type of widen or narrow operations. + */ +#define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ +static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \ +{ \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \ +} + +RVVCALL(OPIVX2, vadd_vx_b, OP_SSS_B, H1, H1, DO_ADD) +RVVCALL(OPIVX2, vadd_vx_h, OP_SSS_H, H2, H2, DO_ADD) +RVVCALL(OPIVX2, vadd_vx_w, OP_SSS_W, H4, H4, DO_ADD) +RVVCALL(OPIVX2, vadd_vx_d, OP_SSS_D, H8, H8, DO_ADD) +RVVCALL(OPIVX2, vsub_vx_b, OP_SSS_B, H1, H1, DO_SUB) +RVVCALL(OPIVX2, vsub_vx_h, OP_SSS_H, H2, H2, DO_SUB) +RVVCALL(OPIVX2, vsub_vx_w, OP_SSS_W, H4, H4, DO_SUB) +RVVCALL(OPIVX2, vsub_vx_d, OP_SSS_D, H8, H8, DO_SUB) +RVVCALL(OPIVX2, vrsub_vx_b, OP_SSS_B, H1, H1, DO_RSUB) +RVVCALL(OPIVX2, vrsub_vx_h, OP_SSS_H, H2, H2, DO_RSUB) +RVVCALL(OPIVX2, vrsub_vx_w, OP_SSS_W, H4, H4, DO_RSUB) +RVVCALL(OPIVX2, vrsub_vx_d, OP_SSS_D, H8, H8, DO_RSUB) + +static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2, + CPURISCVState *env, uint32_t desc, + uint32_t esz, uint32_t dsz, + opivx2_fn fn, clear_fn *clearfn) +{ + uint32_t vlmax = vext_maxsz(desc) / esz; + uint32_t mlen = vext_mlen(desc); + uint32_t vm = vext_vm(desc); + uint32_t vl = env->vl; + uint32_t i; + + for (i = 0; i < vl; i++) { + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + fn(vd, s1, vs2, i); + } + clearfn(vd, vl, vl * dsz, vlmax * dsz); +} + +/* generate the helpers for OPIVX */ +#define GEN_VEXT_VX(NAME, ESZ, DSZ, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ + void *vs2, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + do_vext_vx(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \ + do_##NAME, CLEAR_FN); \ +} + +GEN_VEXT_VX(vadd_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vadd_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vadd_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vadd_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vsub_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vsub_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vsub_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vsub_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vrsub_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vrsub_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vrsub_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vrsub_vx_d, 8, 8, clearq) + +void HELPER(vec_rsubs8)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint8_t)) { + *(uint8_t *)(d + i) = (uint8_t)b - *(uint8_t *)(a + i); + } +} + +void HELPER(vec_rsubs16)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint16_t)) { + *(uint16_t *)(d + i) = (uint16_t)b - *(uint16_t *)(a + i); + } +} + +void HELPER(vec_rsubs32)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint32_t)) { + *(uint32_t *)(d + i) = (uint32_t)b - *(uint32_t *)(a + i); + } +} + +void HELPER(vec_rsubs64)(void *d, void *a, uint64_t b, uint32_t desc) +{ + intptr_t oprsz = simd_oprsz(desc); + intptr_t i; + + for (i = 0; i < oprsz; i += sizeof(uint64_t)) { + *(uint64_t *)(d + i) = b - *(uint64_t *)(a + i); + } +} + +/* Vector Widening Integer Add/Subtract */ +#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t +#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t +#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t +#define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t +#define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t +#define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t +#define WOP_WUUU_B uint16_t, uint8_t, uint16_t, uint16_t, uint16_t +#define WOP_WUUU_H uint32_t, uint16_t, uint32_t, uint32_t, uint32_t +#define WOP_WUUU_W uint64_t, uint32_t, uint64_t, uint64_t, uint64_t +#define WOP_WSSS_B int16_t, int8_t, int16_t, int16_t, int16_t +#define WOP_WSSS_H int32_t, int16_t, int32_t, int32_t, int32_t +#define WOP_WSSS_W int64_t, int32_t, int64_t, int64_t, int64_t +RVVCALL(OPIVV2, vwaddu_vv_b, WOP_UUU_B, H2, H1, H1, DO_ADD) +RVVCALL(OPIVV2, vwaddu_vv_h, WOP_UUU_H, H4, H2, H2, DO_ADD) +RVVCALL(OPIVV2, vwaddu_vv_w, WOP_UUU_W, H8, H4, H4, DO_ADD) +RVVCALL(OPIVV2, vwsubu_vv_b, WOP_UUU_B, H2, H1, H1, DO_SUB) +RVVCALL(OPIVV2, vwsubu_vv_h, WOP_UUU_H, H4, H2, H2, DO_SUB) +RVVCALL(OPIVV2, vwsubu_vv_w, WOP_UUU_W, H8, H4, H4, DO_SUB) +RVVCALL(OPIVV2, vwadd_vv_b, WOP_SSS_B, H2, H1, H1, DO_ADD) +RVVCALL(OPIVV2, vwadd_vv_h, WOP_SSS_H, H4, H2, H2, DO_ADD) +RVVCALL(OPIVV2, vwadd_vv_w, WOP_SSS_W, H8, H4, H4, DO_ADD) +RVVCALL(OPIVV2, vwsub_vv_b, WOP_SSS_B, H2, H1, H1, DO_SUB) +RVVCALL(OPIVV2, vwsub_vv_h, WOP_SSS_H, H4, H2, H2, DO_SUB) +RVVCALL(OPIVV2, vwsub_vv_w, WOP_SSS_W, H8, H4, H4, DO_SUB) +RVVCALL(OPIVV2, vwaddu_wv_b, WOP_WUUU_B, H2, H1, H1, DO_ADD) +RVVCALL(OPIVV2, vwaddu_wv_h, WOP_WUUU_H, H4, H2, H2, DO_ADD) +RVVCALL(OPIVV2, vwaddu_wv_w, WOP_WUUU_W, H8, H4, H4, DO_ADD) +RVVCALL(OPIVV2, vwsubu_wv_b, WOP_WUUU_B, H2, H1, H1, DO_SUB) +RVVCALL(OPIVV2, vwsubu_wv_h, WOP_WUUU_H, H4, H2, H2, DO_SUB) +RVVCALL(OPIVV2, vwsubu_wv_w, WOP_WUUU_W, H8, H4, H4, DO_SUB) +RVVCALL(OPIVV2, vwadd_wv_b, WOP_WSSS_B, H2, H1, H1, DO_ADD) +RVVCALL(OPIVV2, vwadd_wv_h, WOP_WSSS_H, H4, H2, H2, DO_ADD) +RVVCALL(OPIVV2, vwadd_wv_w, WOP_WSSS_W, H8, H4, H4, DO_ADD) +RVVCALL(OPIVV2, vwsub_wv_b, WOP_WSSS_B, H2, H1, H1, DO_SUB) +RVVCALL(OPIVV2, vwsub_wv_h, WOP_WSSS_H, H4, H2, H2, DO_SUB) +RVVCALL(OPIVV2, vwsub_wv_w, WOP_WSSS_W, H8, H4, H4, DO_SUB) +GEN_VEXT_VV(vwaddu_vv_b, 1, 2, clearh) +GEN_VEXT_VV(vwaddu_vv_h, 2, 4, clearl) +GEN_VEXT_VV(vwaddu_vv_w, 4, 8, clearq) +GEN_VEXT_VV(vwsubu_vv_b, 1, 2, clearh) +GEN_VEXT_VV(vwsubu_vv_h, 2, 4, clearl) +GEN_VEXT_VV(vwsubu_vv_w, 4, 8, clearq) +GEN_VEXT_VV(vwadd_vv_b, 1, 2, clearh) +GEN_VEXT_VV(vwadd_vv_h, 2, 4, clearl) +GEN_VEXT_VV(vwadd_vv_w, 4, 8, clearq) +GEN_VEXT_VV(vwsub_vv_b, 1, 2, clearh) +GEN_VEXT_VV(vwsub_vv_h, 2, 4, clearl) +GEN_VEXT_VV(vwsub_vv_w, 4, 8, clearq) +GEN_VEXT_VV(vwaddu_wv_b, 1, 2, clearh) +GEN_VEXT_VV(vwaddu_wv_h, 2, 4, clearl) +GEN_VEXT_VV(vwaddu_wv_w, 4, 8, clearq) +GEN_VEXT_VV(vwsubu_wv_b, 1, 2, clearh) +GEN_VEXT_VV(vwsubu_wv_h, 2, 4, clearl) +GEN_VEXT_VV(vwsubu_wv_w, 4, 8, clearq) +GEN_VEXT_VV(vwadd_wv_b, 1, 2, clearh) +GEN_VEXT_VV(vwadd_wv_h, 2, 4, clearl) +GEN_VEXT_VV(vwadd_wv_w, 4, 8, clearq) +GEN_VEXT_VV(vwsub_wv_b, 1, 2, clearh) +GEN_VEXT_VV(vwsub_wv_h, 2, 4, clearl) +GEN_VEXT_VV(vwsub_wv_w, 4, 8, clearq) + +RVVCALL(OPIVX2, vwaddu_vx_b, WOP_UUU_B, H2, H1, DO_ADD) +RVVCALL(OPIVX2, vwaddu_vx_h, WOP_UUU_H, H4, H2, DO_ADD) +RVVCALL(OPIVX2, vwaddu_vx_w, WOP_UUU_W, H8, H4, DO_ADD) +RVVCALL(OPIVX2, vwsubu_vx_b, WOP_UUU_B, H2, H1, DO_SUB) +RVVCALL(OPIVX2, vwsubu_vx_h, WOP_UUU_H, H4, H2, DO_SUB) +RVVCALL(OPIVX2, vwsubu_vx_w, WOP_UUU_W, H8, H4, DO_SUB) +RVVCALL(OPIVX2, vwadd_vx_b, WOP_SSS_B, H2, H1, DO_ADD) +RVVCALL(OPIVX2, vwadd_vx_h, WOP_SSS_H, H4, H2, DO_ADD) +RVVCALL(OPIVX2, vwadd_vx_w, WOP_SSS_W, H8, H4, DO_ADD) +RVVCALL(OPIVX2, vwsub_vx_b, WOP_SSS_B, H2, H1, DO_SUB) +RVVCALL(OPIVX2, vwsub_vx_h, WOP_SSS_H, H4, H2, DO_SUB) +RVVCALL(OPIVX2, vwsub_vx_w, WOP_SSS_W, H8, H4, DO_SUB) +RVVCALL(OPIVX2, vwaddu_wx_b, WOP_WUUU_B, H2, H1, DO_ADD) +RVVCALL(OPIVX2, vwaddu_wx_h, WOP_WUUU_H, H4, H2, DO_ADD) +RVVCALL(OPIVX2, vwaddu_wx_w, WOP_WUUU_W, H8, H4, DO_ADD) +RVVCALL(OPIVX2, vwsubu_wx_b, WOP_WUUU_B, H2, H1, DO_SUB) +RVVCALL(OPIVX2, vwsubu_wx_h, WOP_WUUU_H, H4, H2, DO_SUB) +RVVCALL(OPIVX2, vwsubu_wx_w, WOP_WUUU_W, H8, H4, DO_SUB) +RVVCALL(OPIVX2, vwadd_wx_b, WOP_WSSS_B, H2, H1, DO_ADD) +RVVCALL(OPIVX2, vwadd_wx_h, WOP_WSSS_H, H4, H2, DO_ADD) +RVVCALL(OPIVX2, vwadd_wx_w, WOP_WSSS_W, H8, H4, DO_ADD) +RVVCALL(OPIVX2, vwsub_wx_b, WOP_WSSS_B, H2, H1, DO_SUB) +RVVCALL(OPIVX2, vwsub_wx_h, WOP_WSSS_H, H4, H2, DO_SUB) +RVVCALL(OPIVX2, vwsub_wx_w, WOP_WSSS_W, H8, H4, DO_SUB) +GEN_VEXT_VX(vwaddu_vx_b, 1, 2, clearh) +GEN_VEXT_VX(vwaddu_vx_h, 2, 4, clearl) +GEN_VEXT_VX(vwaddu_vx_w, 4, 8, clearq) +GEN_VEXT_VX(vwsubu_vx_b, 1, 2, clearh) +GEN_VEXT_VX(vwsubu_vx_h, 2, 4, clearl) +GEN_VEXT_VX(vwsubu_vx_w, 4, 8, clearq) +GEN_VEXT_VX(vwadd_vx_b, 1, 2, clearh) +GEN_VEXT_VX(vwadd_vx_h, 2, 4, clearl) +GEN_VEXT_VX(vwadd_vx_w, 4, 8, clearq) +GEN_VEXT_VX(vwsub_vx_b, 1, 2, clearh) +GEN_VEXT_VX(vwsub_vx_h, 2, 4, clearl) +GEN_VEXT_VX(vwsub_vx_w, 4, 8, clearq) +GEN_VEXT_VX(vwaddu_wx_b, 1, 2, clearh) +GEN_VEXT_VX(vwaddu_wx_h, 2, 4, clearl) +GEN_VEXT_VX(vwaddu_wx_w, 4, 8, clearq) +GEN_VEXT_VX(vwsubu_wx_b, 1, 2, clearh) +GEN_VEXT_VX(vwsubu_wx_h, 2, 4, clearl) +GEN_VEXT_VX(vwsubu_wx_w, 4, 8, clearq) +GEN_VEXT_VX(vwadd_wx_b, 1, 2, clearh) +GEN_VEXT_VX(vwadd_wx_h, 2, 4, clearl) +GEN_VEXT_VX(vwadd_wx_w, 4, 8, clearq) +GEN_VEXT_VX(vwsub_wx_b, 1, 2, clearh) +GEN_VEXT_VX(vwsub_wx_h, 2, 4, clearl) +GEN_VEXT_VX(vwsub_wx_w, 4, 8, clearq) + +/* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */ +#define DO_VADC(N, M, C) (N + M + C) +#define DO_VSBC(N, M, C) (N - M - C) + +#define GEN_VEXT_VADC_VVM(NAME, ETYPE, H, DO_OP, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ + ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ + uint8_t carry = vext_elem_mask(v0, mlen, i); \ + \ + *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry); \ + } \ + CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ +} + +GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t, H1, DO_VADC, clearb) +GEN_VEXT_VADC_VVM(vadc_vvm_h, uint16_t, H2, DO_VADC, clearh) +GEN_VEXT_VADC_VVM(vadc_vvm_w, uint32_t, H4, DO_VADC, clearl) +GEN_VEXT_VADC_VVM(vadc_vvm_d, uint64_t, H8, DO_VADC, clearq) + +GEN_VEXT_VADC_VVM(vsbc_vvm_b, uint8_t, H1, DO_VSBC, clearb) +GEN_VEXT_VADC_VVM(vsbc_vvm_h, uint16_t, H2, DO_VSBC, clearh) +GEN_VEXT_VADC_VVM(vsbc_vvm_w, uint32_t, H4, DO_VSBC, clearl) +GEN_VEXT_VADC_VVM(vsbc_vvm_d, uint64_t, H8, DO_VSBC, clearq) + +#define GEN_VEXT_VADC_VXM(NAME, ETYPE, H, DO_OP, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ + uint8_t carry = vext_elem_mask(v0, mlen, i); \ + \ + *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\ + } \ + CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ +} + +GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t, H1, DO_VADC, clearb) +GEN_VEXT_VADC_VXM(vadc_vxm_h, uint16_t, H2, DO_VADC, clearh) +GEN_VEXT_VADC_VXM(vadc_vxm_w, uint32_t, H4, DO_VADC, clearl) +GEN_VEXT_VADC_VXM(vadc_vxm_d, uint64_t, H8, DO_VADC, clearq) + +GEN_VEXT_VADC_VXM(vsbc_vxm_b, uint8_t, H1, DO_VSBC, clearb) +GEN_VEXT_VADC_VXM(vsbc_vxm_h, uint16_t, H2, DO_VSBC, clearh) +GEN_VEXT_VADC_VXM(vsbc_vxm_w, uint32_t, H4, DO_VSBC, clearl) +GEN_VEXT_VADC_VXM(vsbc_vxm_d, uint64_t, H8, DO_VSBC, clearq) + +#define DO_MADC(N, M, C) (C ? (__typeof(N))(N + M + 1) <= N : \ + (__typeof(N))(N + M) < N) +#define DO_MSBC(N, M, C) (C ? N <= M : N < M) + +#define GEN_VEXT_VMADC_VVM(NAME, ETYPE, H, DO_OP) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vl = env->vl; \ + uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ + ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ + uint8_t carry = vext_elem_mask(v0, mlen, i); \ + \ + vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1, carry));\ + } \ + for (; i < vlmax; i++) { \ + vext_set_elem_mask(vd, mlen, i, 0); \ + } \ +} + +GEN_VEXT_VMADC_VVM(vmadc_vvm_b, uint8_t, H1, DO_MADC) +GEN_VEXT_VMADC_VVM(vmadc_vvm_h, uint16_t, H2, DO_MADC) +GEN_VEXT_VMADC_VVM(vmadc_vvm_w, uint32_t, H4, DO_MADC) +GEN_VEXT_VMADC_VVM(vmadc_vvm_d, uint64_t, H8, DO_MADC) + +GEN_VEXT_VMADC_VVM(vmsbc_vvm_b, uint8_t, H1, DO_MSBC) +GEN_VEXT_VMADC_VVM(vmsbc_vvm_h, uint16_t, H2, DO_MSBC) +GEN_VEXT_VMADC_VVM(vmsbc_vvm_w, uint32_t, H4, DO_MSBC) +GEN_VEXT_VMADC_VVM(vmsbc_vvm_d, uint64_t, H8, DO_MSBC) + +#define GEN_VEXT_VMADC_VXM(NAME, ETYPE, H, DO_OP) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ + void *vs2, CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vl = env->vl; \ + uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ + uint8_t carry = vext_elem_mask(v0, mlen, i); \ + \ + vext_set_elem_mask(vd, mlen, i, \ + DO_OP(s2, (ETYPE)(target_long)s1, carry)); \ + } \ + for (; i < vlmax; i++) { \ + vext_set_elem_mask(vd, mlen, i, 0); \ + } \ +} + +GEN_VEXT_VMADC_VXM(vmadc_vxm_b, uint8_t, H1, DO_MADC) +GEN_VEXT_VMADC_VXM(vmadc_vxm_h, uint16_t, H2, DO_MADC) +GEN_VEXT_VMADC_VXM(vmadc_vxm_w, uint32_t, H4, DO_MADC) +GEN_VEXT_VMADC_VXM(vmadc_vxm_d, uint64_t, H8, DO_MADC) + +GEN_VEXT_VMADC_VXM(vmsbc_vxm_b, uint8_t, H1, DO_MSBC) +GEN_VEXT_VMADC_VXM(vmsbc_vxm_h, uint16_t, H2, DO_MSBC) +GEN_VEXT_VMADC_VXM(vmsbc_vxm_w, uint32_t, H4, DO_MSBC) +GEN_VEXT_VMADC_VXM(vmsbc_vxm_d, uint64_t, H8, DO_MSBC) + +/* Vector Bitwise Logical Instructions */ +RVVCALL(OPIVV2, vand_vv_b, OP_SSS_B, H1, H1, H1, DO_AND) +RVVCALL(OPIVV2, vand_vv_h, OP_SSS_H, H2, H2, H2, DO_AND) +RVVCALL(OPIVV2, vand_vv_w, OP_SSS_W, H4, H4, H4, DO_AND) +RVVCALL(OPIVV2, vand_vv_d, OP_SSS_D, H8, H8, H8, DO_AND) +RVVCALL(OPIVV2, vor_vv_b, OP_SSS_B, H1, H1, H1, DO_OR) +RVVCALL(OPIVV2, vor_vv_h, OP_SSS_H, H2, H2, H2, DO_OR) +RVVCALL(OPIVV2, vor_vv_w, OP_SSS_W, H4, H4, H4, DO_OR) +RVVCALL(OPIVV2, vor_vv_d, OP_SSS_D, H8, H8, H8, DO_OR) +RVVCALL(OPIVV2, vxor_vv_b, OP_SSS_B, H1, H1, H1, DO_XOR) +RVVCALL(OPIVV2, vxor_vv_h, OP_SSS_H, H2, H2, H2, DO_XOR) +RVVCALL(OPIVV2, vxor_vv_w, OP_SSS_W, H4, H4, H4, DO_XOR) +RVVCALL(OPIVV2, vxor_vv_d, OP_SSS_D, H8, H8, H8, DO_XOR) +GEN_VEXT_VV(vand_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vand_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vand_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vand_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vor_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vor_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vor_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vor_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vxor_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vxor_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vxor_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vxor_vv_d, 8, 8, clearq) + +RVVCALL(OPIVX2, vand_vx_b, OP_SSS_B, H1, H1, DO_AND) +RVVCALL(OPIVX2, vand_vx_h, OP_SSS_H, H2, H2, DO_AND) +RVVCALL(OPIVX2, vand_vx_w, OP_SSS_W, H4, H4, DO_AND) +RVVCALL(OPIVX2, vand_vx_d, OP_SSS_D, H8, H8, DO_AND) +RVVCALL(OPIVX2, vor_vx_b, OP_SSS_B, H1, H1, DO_OR) +RVVCALL(OPIVX2, vor_vx_h, OP_SSS_H, H2, H2, DO_OR) +RVVCALL(OPIVX2, vor_vx_w, OP_SSS_W, H4, H4, DO_OR) +RVVCALL(OPIVX2, vor_vx_d, OP_SSS_D, H8, H8, DO_OR) +RVVCALL(OPIVX2, vxor_vx_b, OP_SSS_B, H1, H1, DO_XOR) +RVVCALL(OPIVX2, vxor_vx_h, OP_SSS_H, H2, H2, DO_XOR) +RVVCALL(OPIVX2, vxor_vx_w, OP_SSS_W, H4, H4, DO_XOR) +RVVCALL(OPIVX2, vxor_vx_d, OP_SSS_D, H8, H8, DO_XOR) +GEN_VEXT_VX(vand_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vand_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vand_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vand_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vor_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vor_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vor_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vor_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vxor_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vxor_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vxor_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vxor_vx_d, 8, 8, clearq) + +/* Vector Single-Width Bit Shift Instructions */ +#define DO_SLL(N, M) (N << (M)) +#define DO_SRL(N, M) (N >> (M)) + +/* generate the helpers for shift instructions with two vector operators */ +#define GEN_VEXT_SHIFT_VV(NAME, TS1, TS2, HS1, HS2, OP, MASK, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, \ + void *vs2, CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t esz = sizeof(TS1); \ + uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + TS1 s1 = *((TS1 *)vs1 + HS1(i)); \ + TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ + *((TS1 *)vd + HS1(i)) = OP(s2, s1 & MASK); \ + } \ + CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ +} + +GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t, uint8_t, H1, H1, DO_SLL, 0x7, clearb) +GEN_VEXT_SHIFT_VV(vsll_vv_h, uint16_t, uint16_t, H2, H2, DO_SLL, 0xf, clearh) +GEN_VEXT_SHIFT_VV(vsll_vv_w, uint32_t, uint32_t, H4, H4, DO_SLL, 0x1f, clearl) +GEN_VEXT_SHIFT_VV(vsll_vv_d, uint64_t, uint64_t, H8, H8, DO_SLL, 0x3f, clearq) + +GEN_VEXT_SHIFT_VV(vsrl_vv_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7, clearb) +GEN_VEXT_SHIFT_VV(vsrl_vv_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf, clearh) +GEN_VEXT_SHIFT_VV(vsrl_vv_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f, clearl) +GEN_VEXT_SHIFT_VV(vsrl_vv_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f, clearq) + +GEN_VEXT_SHIFT_VV(vsra_vv_b, uint8_t, int8_t, H1, H1, DO_SRL, 0x7, clearb) +GEN_VEXT_SHIFT_VV(vsra_vv_h, uint16_t, int16_t, H2, H2, DO_SRL, 0xf, clearh) +GEN_VEXT_SHIFT_VV(vsra_vv_w, uint32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl) +GEN_VEXT_SHIFT_VV(vsra_vv_d, uint64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq) + +/* generate the helpers for shift instructions with one vector and one scalar */ +#define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ + void *vs2, CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t esz = sizeof(TD); \ + uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ + *((TD *)vd + HD(i)) = OP(s2, s1 & MASK); \ + } \ + CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ +} + +GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7, clearb) +GEN_VEXT_SHIFT_VX(vsll_vx_h, uint16_t, int16_t, H2, H2, DO_SLL, 0xf, clearh) +GEN_VEXT_SHIFT_VX(vsll_vx_w, uint32_t, int32_t, H4, H4, DO_SLL, 0x1f, clearl) +GEN_VEXT_SHIFT_VX(vsll_vx_d, uint64_t, int64_t, H8, H8, DO_SLL, 0x3f, clearq) + +GEN_VEXT_SHIFT_VX(vsrl_vx_b, uint8_t, uint8_t, H1, H1, DO_SRL, 0x7, clearb) +GEN_VEXT_SHIFT_VX(vsrl_vx_h, uint16_t, uint16_t, H2, H2, DO_SRL, 0xf, clearh) +GEN_VEXT_SHIFT_VX(vsrl_vx_w, uint32_t, uint32_t, H4, H4, DO_SRL, 0x1f, clearl) +GEN_VEXT_SHIFT_VX(vsrl_vx_d, uint64_t, uint64_t, H8, H8, DO_SRL, 0x3f, clearq) + +GEN_VEXT_SHIFT_VX(vsra_vx_b, int8_t, int8_t, H1, H1, DO_SRL, 0x7, clearb) +GEN_VEXT_SHIFT_VX(vsra_vx_h, int16_t, int16_t, H2, H2, DO_SRL, 0xf, clearh) +GEN_VEXT_SHIFT_VX(vsra_vx_w, int32_t, int32_t, H4, H4, DO_SRL, 0x1f, clearl) +GEN_VEXT_SHIFT_VX(vsra_vx_d, int64_t, int64_t, H8, H8, DO_SRL, 0x3f, clearq) + +/* Vector Narrowing Integer Right Shift Instructions */ +GEN_VEXT_SHIFT_VV(vnsrl_vv_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb) +GEN_VEXT_SHIFT_VV(vnsrl_vv_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh) +GEN_VEXT_SHIFT_VV(vnsrl_vv_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl) +GEN_VEXT_SHIFT_VV(vnsra_vv_b, uint8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb) +GEN_VEXT_SHIFT_VV(vnsra_vv_h, uint16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh) +GEN_VEXT_SHIFT_VV(vnsra_vv_w, uint32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl) +GEN_VEXT_SHIFT_VX(vnsrl_vx_b, uint8_t, uint16_t, H1, H2, DO_SRL, 0xf, clearb) +GEN_VEXT_SHIFT_VX(vnsrl_vx_h, uint16_t, uint32_t, H2, H4, DO_SRL, 0x1f, clearh) +GEN_VEXT_SHIFT_VX(vnsrl_vx_w, uint32_t, uint64_t, H4, H8, DO_SRL, 0x3f, clearl) +GEN_VEXT_SHIFT_VX(vnsra_vx_b, int8_t, int16_t, H1, H2, DO_SRL, 0xf, clearb) +GEN_VEXT_SHIFT_VX(vnsra_vx_h, int16_t, int32_t, H2, H4, DO_SRL, 0x1f, clearh) +GEN_VEXT_SHIFT_VX(vnsra_vx_w, int32_t, int64_t, H4, H8, DO_SRL, 0x3f, clearl) + +/* Vector Integer Comparison Instructions */ +#define DO_MSEQ(N, M) (N == M) +#define DO_MSNE(N, M) (N != M) +#define DO_MSLT(N, M) (N < M) +#define DO_MSLE(N, M) (N <= M) +#define DO_MSGT(N, M) (N > M) + +#define GEN_VEXT_CMP_VV(NAME, ETYPE, H, DO_OP) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ + ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + vext_set_elem_mask(vd, mlen, i, DO_OP(s2, s1)); \ + } \ + for (; i < vlmax; i++) { \ + vext_set_elem_mask(vd, mlen, i, 0); \ + } \ +} + +GEN_VEXT_CMP_VV(vmseq_vv_b, uint8_t, H1, DO_MSEQ) +GEN_VEXT_CMP_VV(vmseq_vv_h, uint16_t, H2, DO_MSEQ) +GEN_VEXT_CMP_VV(vmseq_vv_w, uint32_t, H4, DO_MSEQ) +GEN_VEXT_CMP_VV(vmseq_vv_d, uint64_t, H8, DO_MSEQ) + +GEN_VEXT_CMP_VV(vmsne_vv_b, uint8_t, H1, DO_MSNE) +GEN_VEXT_CMP_VV(vmsne_vv_h, uint16_t, H2, DO_MSNE) +GEN_VEXT_CMP_VV(vmsne_vv_w, uint32_t, H4, DO_MSNE) +GEN_VEXT_CMP_VV(vmsne_vv_d, uint64_t, H8, DO_MSNE) + +GEN_VEXT_CMP_VV(vmsltu_vv_b, uint8_t, H1, DO_MSLT) +GEN_VEXT_CMP_VV(vmsltu_vv_h, uint16_t, H2, DO_MSLT) +GEN_VEXT_CMP_VV(vmsltu_vv_w, uint32_t, H4, DO_MSLT) +GEN_VEXT_CMP_VV(vmsltu_vv_d, uint64_t, H8, DO_MSLT) + +GEN_VEXT_CMP_VV(vmslt_vv_b, int8_t, H1, DO_MSLT) +GEN_VEXT_CMP_VV(vmslt_vv_h, int16_t, H2, DO_MSLT) +GEN_VEXT_CMP_VV(vmslt_vv_w, int32_t, H4, DO_MSLT) +GEN_VEXT_CMP_VV(vmslt_vv_d, int64_t, H8, DO_MSLT) + +GEN_VEXT_CMP_VV(vmsleu_vv_b, uint8_t, H1, DO_MSLE) +GEN_VEXT_CMP_VV(vmsleu_vv_h, uint16_t, H2, DO_MSLE) +GEN_VEXT_CMP_VV(vmsleu_vv_w, uint32_t, H4, DO_MSLE) +GEN_VEXT_CMP_VV(vmsleu_vv_d, uint64_t, H8, DO_MSLE) + +GEN_VEXT_CMP_VV(vmsle_vv_b, int8_t, H1, DO_MSLE) +GEN_VEXT_CMP_VV(vmsle_vv_h, int16_t, H2, DO_MSLE) +GEN_VEXT_CMP_VV(vmsle_vv_w, int32_t, H4, DO_MSLE) +GEN_VEXT_CMP_VV(vmsle_vv_d, int64_t, H8, DO_MSLE) + +#define GEN_VEXT_CMP_VX(NAME, ETYPE, H, DO_OP) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + vext_set_elem_mask(vd, mlen, i, \ + DO_OP(s2, (ETYPE)(target_long)s1)); \ + } \ + for (; i < vlmax; i++) { \ + vext_set_elem_mask(vd, mlen, i, 0); \ + } \ +} + +GEN_VEXT_CMP_VX(vmseq_vx_b, uint8_t, H1, DO_MSEQ) +GEN_VEXT_CMP_VX(vmseq_vx_h, uint16_t, H2, DO_MSEQ) +GEN_VEXT_CMP_VX(vmseq_vx_w, uint32_t, H4, DO_MSEQ) +GEN_VEXT_CMP_VX(vmseq_vx_d, uint64_t, H8, DO_MSEQ) + +GEN_VEXT_CMP_VX(vmsne_vx_b, uint8_t, H1, DO_MSNE) +GEN_VEXT_CMP_VX(vmsne_vx_h, uint16_t, H2, DO_MSNE) +GEN_VEXT_CMP_VX(vmsne_vx_w, uint32_t, H4, DO_MSNE) +GEN_VEXT_CMP_VX(vmsne_vx_d, uint64_t, H8, DO_MSNE) + +GEN_VEXT_CMP_VX(vmsltu_vx_b, uint8_t, H1, DO_MSLT) +GEN_VEXT_CMP_VX(vmsltu_vx_h, uint16_t, H2, DO_MSLT) +GEN_VEXT_CMP_VX(vmsltu_vx_w, uint32_t, H4, DO_MSLT) +GEN_VEXT_CMP_VX(vmsltu_vx_d, uint64_t, H8, DO_MSLT) + +GEN_VEXT_CMP_VX(vmslt_vx_b, int8_t, H1, DO_MSLT) +GEN_VEXT_CMP_VX(vmslt_vx_h, int16_t, H2, DO_MSLT) +GEN_VEXT_CMP_VX(vmslt_vx_w, int32_t, H4, DO_MSLT) +GEN_VEXT_CMP_VX(vmslt_vx_d, int64_t, H8, DO_MSLT) + +GEN_VEXT_CMP_VX(vmsleu_vx_b, uint8_t, H1, DO_MSLE) +GEN_VEXT_CMP_VX(vmsleu_vx_h, uint16_t, H2, DO_MSLE) +GEN_VEXT_CMP_VX(vmsleu_vx_w, uint32_t, H4, DO_MSLE) +GEN_VEXT_CMP_VX(vmsleu_vx_d, uint64_t, H8, DO_MSLE) + +GEN_VEXT_CMP_VX(vmsle_vx_b, int8_t, H1, DO_MSLE) +GEN_VEXT_CMP_VX(vmsle_vx_h, int16_t, H2, DO_MSLE) +GEN_VEXT_CMP_VX(vmsle_vx_w, int32_t, H4, DO_MSLE) +GEN_VEXT_CMP_VX(vmsle_vx_d, int64_t, H8, DO_MSLE) + +GEN_VEXT_CMP_VX(vmsgtu_vx_b, uint8_t, H1, DO_MSGT) +GEN_VEXT_CMP_VX(vmsgtu_vx_h, uint16_t, H2, DO_MSGT) +GEN_VEXT_CMP_VX(vmsgtu_vx_w, uint32_t, H4, DO_MSGT) +GEN_VEXT_CMP_VX(vmsgtu_vx_d, uint64_t, H8, DO_MSGT) + +GEN_VEXT_CMP_VX(vmsgt_vx_b, int8_t, H1, DO_MSGT) +GEN_VEXT_CMP_VX(vmsgt_vx_h, int16_t, H2, DO_MSGT) +GEN_VEXT_CMP_VX(vmsgt_vx_w, int32_t, H4, DO_MSGT) +GEN_VEXT_CMP_VX(vmsgt_vx_d, int64_t, H8, DO_MSGT) + +/* Vector Integer Min/Max Instructions */ +RVVCALL(OPIVV2, vminu_vv_b, OP_UUU_B, H1, H1, H1, DO_MIN) +RVVCALL(OPIVV2, vminu_vv_h, OP_UUU_H, H2, H2, H2, DO_MIN) +RVVCALL(OPIVV2, vminu_vv_w, OP_UUU_W, H4, H4, H4, DO_MIN) +RVVCALL(OPIVV2, vminu_vv_d, OP_UUU_D, H8, H8, H8, DO_MIN) +RVVCALL(OPIVV2, vmin_vv_b, OP_SSS_B, H1, H1, H1, DO_MIN) +RVVCALL(OPIVV2, vmin_vv_h, OP_SSS_H, H2, H2, H2, DO_MIN) +RVVCALL(OPIVV2, vmin_vv_w, OP_SSS_W, H4, H4, H4, DO_MIN) +RVVCALL(OPIVV2, vmin_vv_d, OP_SSS_D, H8, H8, H8, DO_MIN) +RVVCALL(OPIVV2, vmaxu_vv_b, OP_UUU_B, H1, H1, H1, DO_MAX) +RVVCALL(OPIVV2, vmaxu_vv_h, OP_UUU_H, H2, H2, H2, DO_MAX) +RVVCALL(OPIVV2, vmaxu_vv_w, OP_UUU_W, H4, H4, H4, DO_MAX) +RVVCALL(OPIVV2, vmaxu_vv_d, OP_UUU_D, H8, H8, H8, DO_MAX) +RVVCALL(OPIVV2, vmax_vv_b, OP_SSS_B, H1, H1, H1, DO_MAX) +RVVCALL(OPIVV2, vmax_vv_h, OP_SSS_H, H2, H2, H2, DO_MAX) +RVVCALL(OPIVV2, vmax_vv_w, OP_SSS_W, H4, H4, H4, DO_MAX) +RVVCALL(OPIVV2, vmax_vv_d, OP_SSS_D, H8, H8, H8, DO_MAX) +GEN_VEXT_VV(vminu_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vminu_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vminu_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vminu_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vmin_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vmin_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vmin_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vmin_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vmaxu_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vmaxu_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vmaxu_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vmaxu_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vmax_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vmax_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vmax_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vmax_vv_d, 8, 8, clearq) + +RVVCALL(OPIVX2, vminu_vx_b, OP_UUU_B, H1, H1, DO_MIN) +RVVCALL(OPIVX2, vminu_vx_h, OP_UUU_H, H2, H2, DO_MIN) +RVVCALL(OPIVX2, vminu_vx_w, OP_UUU_W, H4, H4, DO_MIN) +RVVCALL(OPIVX2, vminu_vx_d, OP_UUU_D, H8, H8, DO_MIN) +RVVCALL(OPIVX2, vmin_vx_b, OP_SSS_B, H1, H1, DO_MIN) +RVVCALL(OPIVX2, vmin_vx_h, OP_SSS_H, H2, H2, DO_MIN) +RVVCALL(OPIVX2, vmin_vx_w, OP_SSS_W, H4, H4, DO_MIN) +RVVCALL(OPIVX2, vmin_vx_d, OP_SSS_D, H8, H8, DO_MIN) +RVVCALL(OPIVX2, vmaxu_vx_b, OP_UUU_B, H1, H1, DO_MAX) +RVVCALL(OPIVX2, vmaxu_vx_h, OP_UUU_H, H2, H2, DO_MAX) +RVVCALL(OPIVX2, vmaxu_vx_w, OP_UUU_W, H4, H4, DO_MAX) +RVVCALL(OPIVX2, vmaxu_vx_d, OP_UUU_D, H8, H8, DO_MAX) +RVVCALL(OPIVX2, vmax_vx_b, OP_SSS_B, H1, H1, DO_MAX) +RVVCALL(OPIVX2, vmax_vx_h, OP_SSS_H, H2, H2, DO_MAX) +RVVCALL(OPIVX2, vmax_vx_w, OP_SSS_W, H4, H4, DO_MAX) +RVVCALL(OPIVX2, vmax_vx_d, OP_SSS_D, H8, H8, DO_MAX) +GEN_VEXT_VX(vminu_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vminu_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vminu_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vminu_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vmin_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vmin_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vmin_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vmin_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vmaxu_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vmaxu_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vmaxu_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vmaxu_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vmax_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vmax_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vmax_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vmax_vx_d, 8, 8, clearq) + +/* Vector Single-Width Integer Multiply Instructions */ +#define DO_MUL(N, M) (N * M) +RVVCALL(OPIVV2, vmul_vv_b, OP_SSS_B, H1, H1, H1, DO_MUL) +RVVCALL(OPIVV2, vmul_vv_h, OP_SSS_H, H2, H2, H2, DO_MUL) +RVVCALL(OPIVV2, vmul_vv_w, OP_SSS_W, H4, H4, H4, DO_MUL) +RVVCALL(OPIVV2, vmul_vv_d, OP_SSS_D, H8, H8, H8, DO_MUL) +GEN_VEXT_VV(vmul_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vmul_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vmul_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vmul_vv_d, 8, 8, clearq) + +static int8_t do_mulh_b(int8_t s2, int8_t s1) +{ + return (int16_t)s2 * (int16_t)s1 >> 8; +} + +static int16_t do_mulh_h(int16_t s2, int16_t s1) +{ + return (int32_t)s2 * (int32_t)s1 >> 16; +} + +static int32_t do_mulh_w(int32_t s2, int32_t s1) +{ + return (int64_t)s2 * (int64_t)s1 >> 32; +} + +static int64_t do_mulh_d(int64_t s2, int64_t s1) +{ + uint64_t hi_64, lo_64; + + muls64(&lo_64, &hi_64, s1, s2); + return hi_64; +} + +static uint8_t do_mulhu_b(uint8_t s2, uint8_t s1) +{ + return (uint16_t)s2 * (uint16_t)s1 >> 8; +} + +static uint16_t do_mulhu_h(uint16_t s2, uint16_t s1) +{ + return (uint32_t)s2 * (uint32_t)s1 >> 16; +} + +static uint32_t do_mulhu_w(uint32_t s2, uint32_t s1) +{ + return (uint64_t)s2 * (uint64_t)s1 >> 32; +} + +static uint64_t do_mulhu_d(uint64_t s2, uint64_t s1) +{ + uint64_t hi_64, lo_64; + + mulu64(&lo_64, &hi_64, s2, s1); + return hi_64; +} + +static int8_t do_mulhsu_b(int8_t s2, uint8_t s1) +{ + return (int16_t)s2 * (uint16_t)s1 >> 8; +} + +static int16_t do_mulhsu_h(int16_t s2, uint16_t s1) +{ + return (int32_t)s2 * (uint32_t)s1 >> 16; +} + +static int32_t do_mulhsu_w(int32_t s2, uint32_t s1) +{ + return (int64_t)s2 * (uint64_t)s1 >> 32; +} + +/* + * Let A = signed operand, + * B = unsigned operand + * P = mulu64(A, B), unsigned product + * + * LET X = 2 ** 64 - A, 2's complement of A + * SP = signed product + * THEN + * IF A < 0 + * SP = -X * B + * = -(2 ** 64 - A) * B + * = A * B - 2 ** 64 * B + * = P - 2 ** 64 * B + * ELSE + * SP = P + * THEN + * HI_P -= (A < 0 ? B : 0) + */ + +static int64_t do_mulhsu_d(int64_t s2, uint64_t s1) +{ + uint64_t hi_64, lo_64; + + mulu64(&lo_64, &hi_64, s2, s1); + + hi_64 -= s2 < 0 ? s1 : 0; + return hi_64; +} + +RVVCALL(OPIVV2, vmulh_vv_b, OP_SSS_B, H1, H1, H1, do_mulh_b) +RVVCALL(OPIVV2, vmulh_vv_h, OP_SSS_H, H2, H2, H2, do_mulh_h) +RVVCALL(OPIVV2, vmulh_vv_w, OP_SSS_W, H4, H4, H4, do_mulh_w) +RVVCALL(OPIVV2, vmulh_vv_d, OP_SSS_D, H8, H8, H8, do_mulh_d) +RVVCALL(OPIVV2, vmulhu_vv_b, OP_UUU_B, H1, H1, H1, do_mulhu_b) +RVVCALL(OPIVV2, vmulhu_vv_h, OP_UUU_H, H2, H2, H2, do_mulhu_h) +RVVCALL(OPIVV2, vmulhu_vv_w, OP_UUU_W, H4, H4, H4, do_mulhu_w) +RVVCALL(OPIVV2, vmulhu_vv_d, OP_UUU_D, H8, H8, H8, do_mulhu_d) +RVVCALL(OPIVV2, vmulhsu_vv_b, OP_SUS_B, H1, H1, H1, do_mulhsu_b) +RVVCALL(OPIVV2, vmulhsu_vv_h, OP_SUS_H, H2, H2, H2, do_mulhsu_h) +RVVCALL(OPIVV2, vmulhsu_vv_w, OP_SUS_W, H4, H4, H4, do_mulhsu_w) +RVVCALL(OPIVV2, vmulhsu_vv_d, OP_SUS_D, H8, H8, H8, do_mulhsu_d) +GEN_VEXT_VV(vmulh_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vmulh_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vmulh_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vmulh_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vmulhu_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vmulhu_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vmulhu_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vmulhu_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vmulhsu_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vmulhsu_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vmulhsu_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vmulhsu_vv_d, 8, 8, clearq) + +RVVCALL(OPIVX2, vmul_vx_b, OP_SSS_B, H1, H1, DO_MUL) +RVVCALL(OPIVX2, vmul_vx_h, OP_SSS_H, H2, H2, DO_MUL) +RVVCALL(OPIVX2, vmul_vx_w, OP_SSS_W, H4, H4, DO_MUL) +RVVCALL(OPIVX2, vmul_vx_d, OP_SSS_D, H8, H8, DO_MUL) +RVVCALL(OPIVX2, vmulh_vx_b, OP_SSS_B, H1, H1, do_mulh_b) +RVVCALL(OPIVX2, vmulh_vx_h, OP_SSS_H, H2, H2, do_mulh_h) +RVVCALL(OPIVX2, vmulh_vx_w, OP_SSS_W, H4, H4, do_mulh_w) +RVVCALL(OPIVX2, vmulh_vx_d, OP_SSS_D, H8, H8, do_mulh_d) +RVVCALL(OPIVX2, vmulhu_vx_b, OP_UUU_B, H1, H1, do_mulhu_b) +RVVCALL(OPIVX2, vmulhu_vx_h, OP_UUU_H, H2, H2, do_mulhu_h) +RVVCALL(OPIVX2, vmulhu_vx_w, OP_UUU_W, H4, H4, do_mulhu_w) +RVVCALL(OPIVX2, vmulhu_vx_d, OP_UUU_D, H8, H8, do_mulhu_d) +RVVCALL(OPIVX2, vmulhsu_vx_b, OP_SUS_B, H1, H1, do_mulhsu_b) +RVVCALL(OPIVX2, vmulhsu_vx_h, OP_SUS_H, H2, H2, do_mulhsu_h) +RVVCALL(OPIVX2, vmulhsu_vx_w, OP_SUS_W, H4, H4, do_mulhsu_w) +RVVCALL(OPIVX2, vmulhsu_vx_d, OP_SUS_D, H8, H8, do_mulhsu_d) +GEN_VEXT_VX(vmul_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vmul_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vmul_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vmul_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vmulh_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vmulh_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vmulh_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vmulh_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vmulhu_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vmulhu_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vmulhu_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vmulhu_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vmulhsu_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vmulhsu_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vmulhsu_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vmulhsu_vx_d, 8, 8, clearq) + +/* Vector Integer Divide Instructions */ +#define DO_DIVU(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) : N / M) +#define DO_REMU(N, M) (unlikely(M == 0) ? N : N % M) +#define DO_DIV(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) :\ + unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M) +#define DO_REM(N, M) (unlikely(M == 0) ? N :\ + unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M) + +RVVCALL(OPIVV2, vdivu_vv_b, OP_UUU_B, H1, H1, H1, DO_DIVU) +RVVCALL(OPIVV2, vdivu_vv_h, OP_UUU_H, H2, H2, H2, DO_DIVU) +RVVCALL(OPIVV2, vdivu_vv_w, OP_UUU_W, H4, H4, H4, DO_DIVU) +RVVCALL(OPIVV2, vdivu_vv_d, OP_UUU_D, H8, H8, H8, DO_DIVU) +RVVCALL(OPIVV2, vdiv_vv_b, OP_SSS_B, H1, H1, H1, DO_DIV) +RVVCALL(OPIVV2, vdiv_vv_h, OP_SSS_H, H2, H2, H2, DO_DIV) +RVVCALL(OPIVV2, vdiv_vv_w, OP_SSS_W, H4, H4, H4, DO_DIV) +RVVCALL(OPIVV2, vdiv_vv_d, OP_SSS_D, H8, H8, H8, DO_DIV) +RVVCALL(OPIVV2, vremu_vv_b, OP_UUU_B, H1, H1, H1, DO_REMU) +RVVCALL(OPIVV2, vremu_vv_h, OP_UUU_H, H2, H2, H2, DO_REMU) +RVVCALL(OPIVV2, vremu_vv_w, OP_UUU_W, H4, H4, H4, DO_REMU) +RVVCALL(OPIVV2, vremu_vv_d, OP_UUU_D, H8, H8, H8, DO_REMU) +RVVCALL(OPIVV2, vrem_vv_b, OP_SSS_B, H1, H1, H1, DO_REM) +RVVCALL(OPIVV2, vrem_vv_h, OP_SSS_H, H2, H2, H2, DO_REM) +RVVCALL(OPIVV2, vrem_vv_w, OP_SSS_W, H4, H4, H4, DO_REM) +RVVCALL(OPIVV2, vrem_vv_d, OP_SSS_D, H8, H8, H8, DO_REM) +GEN_VEXT_VV(vdivu_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vdivu_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vdivu_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vdivu_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vdiv_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vdiv_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vdiv_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vdiv_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vremu_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vremu_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vremu_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vremu_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vrem_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vrem_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vrem_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vrem_vv_d, 8, 8, clearq) + +RVVCALL(OPIVX2, vdivu_vx_b, OP_UUU_B, H1, H1, DO_DIVU) +RVVCALL(OPIVX2, vdivu_vx_h, OP_UUU_H, H2, H2, DO_DIVU) +RVVCALL(OPIVX2, vdivu_vx_w, OP_UUU_W, H4, H4, DO_DIVU) +RVVCALL(OPIVX2, vdivu_vx_d, OP_UUU_D, H8, H8, DO_DIVU) +RVVCALL(OPIVX2, vdiv_vx_b, OP_SSS_B, H1, H1, DO_DIV) +RVVCALL(OPIVX2, vdiv_vx_h, OP_SSS_H, H2, H2, DO_DIV) +RVVCALL(OPIVX2, vdiv_vx_w, OP_SSS_W, H4, H4, DO_DIV) +RVVCALL(OPIVX2, vdiv_vx_d, OP_SSS_D, H8, H8, DO_DIV) +RVVCALL(OPIVX2, vremu_vx_b, OP_UUU_B, H1, H1, DO_REMU) +RVVCALL(OPIVX2, vremu_vx_h, OP_UUU_H, H2, H2, DO_REMU) +RVVCALL(OPIVX2, vremu_vx_w, OP_UUU_W, H4, H4, DO_REMU) +RVVCALL(OPIVX2, vremu_vx_d, OP_UUU_D, H8, H8, DO_REMU) +RVVCALL(OPIVX2, vrem_vx_b, OP_SSS_B, H1, H1, DO_REM) +RVVCALL(OPIVX2, vrem_vx_h, OP_SSS_H, H2, H2, DO_REM) +RVVCALL(OPIVX2, vrem_vx_w, OP_SSS_W, H4, H4, DO_REM) +RVVCALL(OPIVX2, vrem_vx_d, OP_SSS_D, H8, H8, DO_REM) +GEN_VEXT_VX(vdivu_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vdivu_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vdivu_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vdivu_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vdiv_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vdiv_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vdiv_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vdiv_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vremu_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vremu_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vremu_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vremu_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vrem_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vrem_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vrem_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vrem_vx_d, 8, 8, clearq) + +/* Vector Widening Integer Multiply Instructions */ +RVVCALL(OPIVV2, vwmul_vv_b, WOP_SSS_B, H2, H1, H1, DO_MUL) +RVVCALL(OPIVV2, vwmul_vv_h, WOP_SSS_H, H4, H2, H2, DO_MUL) +RVVCALL(OPIVV2, vwmul_vv_w, WOP_SSS_W, H8, H4, H4, DO_MUL) +RVVCALL(OPIVV2, vwmulu_vv_b, WOP_UUU_B, H2, H1, H1, DO_MUL) +RVVCALL(OPIVV2, vwmulu_vv_h, WOP_UUU_H, H4, H2, H2, DO_MUL) +RVVCALL(OPIVV2, vwmulu_vv_w, WOP_UUU_W, H8, H4, H4, DO_MUL) +RVVCALL(OPIVV2, vwmulsu_vv_b, WOP_SUS_B, H2, H1, H1, DO_MUL) +RVVCALL(OPIVV2, vwmulsu_vv_h, WOP_SUS_H, H4, H2, H2, DO_MUL) +RVVCALL(OPIVV2, vwmulsu_vv_w, WOP_SUS_W, H8, H4, H4, DO_MUL) +GEN_VEXT_VV(vwmul_vv_b, 1, 2, clearh) +GEN_VEXT_VV(vwmul_vv_h, 2, 4, clearl) +GEN_VEXT_VV(vwmul_vv_w, 4, 8, clearq) +GEN_VEXT_VV(vwmulu_vv_b, 1, 2, clearh) +GEN_VEXT_VV(vwmulu_vv_h, 2, 4, clearl) +GEN_VEXT_VV(vwmulu_vv_w, 4, 8, clearq) +GEN_VEXT_VV(vwmulsu_vv_b, 1, 2, clearh) +GEN_VEXT_VV(vwmulsu_vv_h, 2, 4, clearl) +GEN_VEXT_VV(vwmulsu_vv_w, 4, 8, clearq) + +RVVCALL(OPIVX2, vwmul_vx_b, WOP_SSS_B, H2, H1, DO_MUL) +RVVCALL(OPIVX2, vwmul_vx_h, WOP_SSS_H, H4, H2, DO_MUL) +RVVCALL(OPIVX2, vwmul_vx_w, WOP_SSS_W, H8, H4, DO_MUL) +RVVCALL(OPIVX2, vwmulu_vx_b, WOP_UUU_B, H2, H1, DO_MUL) +RVVCALL(OPIVX2, vwmulu_vx_h, WOP_UUU_H, H4, H2, DO_MUL) +RVVCALL(OPIVX2, vwmulu_vx_w, WOP_UUU_W, H8, H4, DO_MUL) +RVVCALL(OPIVX2, vwmulsu_vx_b, WOP_SUS_B, H2, H1, DO_MUL) +RVVCALL(OPIVX2, vwmulsu_vx_h, WOP_SUS_H, H4, H2, DO_MUL) +RVVCALL(OPIVX2, vwmulsu_vx_w, WOP_SUS_W, H8, H4, DO_MUL) +GEN_VEXT_VX(vwmul_vx_b, 1, 2, clearh) +GEN_VEXT_VX(vwmul_vx_h, 2, 4, clearl) +GEN_VEXT_VX(vwmul_vx_w, 4, 8, clearq) +GEN_VEXT_VX(vwmulu_vx_b, 1, 2, clearh) +GEN_VEXT_VX(vwmulu_vx_h, 2, 4, clearl) +GEN_VEXT_VX(vwmulu_vx_w, 4, 8, clearq) +GEN_VEXT_VX(vwmulsu_vx_b, 1, 2, clearh) +GEN_VEXT_VX(vwmulsu_vx_h, 2, 4, clearl) +GEN_VEXT_VX(vwmulsu_vx_w, 4, 8, clearq) + +/* Vector Single-Width Integer Multiply-Add Instructions */ +#define OPIVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ +static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \ +{ \ + TX1 s1 = *((T1 *)vs1 + HS1(i)); \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + TD d = *((TD *)vd + HD(i)); \ + *((TD *)vd + HD(i)) = OP(s2, s1, d); \ +} + +#define DO_MACC(N, M, D) (M * N + D) +#define DO_NMSAC(N, M, D) (-(M * N) + D) +#define DO_MADD(N, M, D) (M * D + N) +#define DO_NMSUB(N, M, D) (-(M * D) + N) +RVVCALL(OPIVV3, vmacc_vv_b, OP_SSS_B, H1, H1, H1, DO_MACC) +RVVCALL(OPIVV3, vmacc_vv_h, OP_SSS_H, H2, H2, H2, DO_MACC) +RVVCALL(OPIVV3, vmacc_vv_w, OP_SSS_W, H4, H4, H4, DO_MACC) +RVVCALL(OPIVV3, vmacc_vv_d, OP_SSS_D, H8, H8, H8, DO_MACC) +RVVCALL(OPIVV3, vnmsac_vv_b, OP_SSS_B, H1, H1, H1, DO_NMSAC) +RVVCALL(OPIVV3, vnmsac_vv_h, OP_SSS_H, H2, H2, H2, DO_NMSAC) +RVVCALL(OPIVV3, vnmsac_vv_w, OP_SSS_W, H4, H4, H4, DO_NMSAC) +RVVCALL(OPIVV3, vnmsac_vv_d, OP_SSS_D, H8, H8, H8, DO_NMSAC) +RVVCALL(OPIVV3, vmadd_vv_b, OP_SSS_B, H1, H1, H1, DO_MADD) +RVVCALL(OPIVV3, vmadd_vv_h, OP_SSS_H, H2, H2, H2, DO_MADD) +RVVCALL(OPIVV3, vmadd_vv_w, OP_SSS_W, H4, H4, H4, DO_MADD) +RVVCALL(OPIVV3, vmadd_vv_d, OP_SSS_D, H8, H8, H8, DO_MADD) +RVVCALL(OPIVV3, vnmsub_vv_b, OP_SSS_B, H1, H1, H1, DO_NMSUB) +RVVCALL(OPIVV3, vnmsub_vv_h, OP_SSS_H, H2, H2, H2, DO_NMSUB) +RVVCALL(OPIVV3, vnmsub_vv_w, OP_SSS_W, H4, H4, H4, DO_NMSUB) +RVVCALL(OPIVV3, vnmsub_vv_d, OP_SSS_D, H8, H8, H8, DO_NMSUB) +GEN_VEXT_VV(vmacc_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vmacc_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vmacc_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vmacc_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vnmsac_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vnmsac_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vnmsac_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vnmsac_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vmadd_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vmadd_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vmadd_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vmadd_vv_d, 8, 8, clearq) +GEN_VEXT_VV(vnmsub_vv_b, 1, 1, clearb) +GEN_VEXT_VV(vnmsub_vv_h, 2, 2, clearh) +GEN_VEXT_VV(vnmsub_vv_w, 4, 4, clearl) +GEN_VEXT_VV(vnmsub_vv_d, 8, 8, clearq) + +#define OPIVX3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ +static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \ +{ \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + TD d = *((TD *)vd + HD(i)); \ + *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, d); \ +} + +RVVCALL(OPIVX3, vmacc_vx_b, OP_SSS_B, H1, H1, DO_MACC) +RVVCALL(OPIVX3, vmacc_vx_h, OP_SSS_H, H2, H2, DO_MACC) +RVVCALL(OPIVX3, vmacc_vx_w, OP_SSS_W, H4, H4, DO_MACC) +RVVCALL(OPIVX3, vmacc_vx_d, OP_SSS_D, H8, H8, DO_MACC) +RVVCALL(OPIVX3, vnmsac_vx_b, OP_SSS_B, H1, H1, DO_NMSAC) +RVVCALL(OPIVX3, vnmsac_vx_h, OP_SSS_H, H2, H2, DO_NMSAC) +RVVCALL(OPIVX3, vnmsac_vx_w, OP_SSS_W, H4, H4, DO_NMSAC) +RVVCALL(OPIVX3, vnmsac_vx_d, OP_SSS_D, H8, H8, DO_NMSAC) +RVVCALL(OPIVX3, vmadd_vx_b, OP_SSS_B, H1, H1, DO_MADD) +RVVCALL(OPIVX3, vmadd_vx_h, OP_SSS_H, H2, H2, DO_MADD) +RVVCALL(OPIVX3, vmadd_vx_w, OP_SSS_W, H4, H4, DO_MADD) +RVVCALL(OPIVX3, vmadd_vx_d, OP_SSS_D, H8, H8, DO_MADD) +RVVCALL(OPIVX3, vnmsub_vx_b, OP_SSS_B, H1, H1, DO_NMSUB) +RVVCALL(OPIVX3, vnmsub_vx_h, OP_SSS_H, H2, H2, DO_NMSUB) +RVVCALL(OPIVX3, vnmsub_vx_w, OP_SSS_W, H4, H4, DO_NMSUB) +RVVCALL(OPIVX3, vnmsub_vx_d, OP_SSS_D, H8, H8, DO_NMSUB) +GEN_VEXT_VX(vmacc_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vmacc_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vmacc_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vmacc_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vnmsac_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vnmsac_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vnmsac_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vnmsac_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vmadd_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vmadd_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vmadd_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vmadd_vx_d, 8, 8, clearq) +GEN_VEXT_VX(vnmsub_vx_b, 1, 1, clearb) +GEN_VEXT_VX(vnmsub_vx_h, 2, 2, clearh) +GEN_VEXT_VX(vnmsub_vx_w, 4, 4, clearl) +GEN_VEXT_VX(vnmsub_vx_d, 8, 8, clearq) + +/* Vector Widening Integer Multiply-Add Instructions */ +RVVCALL(OPIVV3, vwmaccu_vv_b, WOP_UUU_B, H2, H1, H1, DO_MACC) +RVVCALL(OPIVV3, vwmaccu_vv_h, WOP_UUU_H, H4, H2, H2, DO_MACC) +RVVCALL(OPIVV3, vwmaccu_vv_w, WOP_UUU_W, H8, H4, H4, DO_MACC) +RVVCALL(OPIVV3, vwmacc_vv_b, WOP_SSS_B, H2, H1, H1, DO_MACC) +RVVCALL(OPIVV3, vwmacc_vv_h, WOP_SSS_H, H4, H2, H2, DO_MACC) +RVVCALL(OPIVV3, vwmacc_vv_w, WOP_SSS_W, H8, H4, H4, DO_MACC) +RVVCALL(OPIVV3, vwmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, DO_MACC) +RVVCALL(OPIVV3, vwmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, DO_MACC) +RVVCALL(OPIVV3, vwmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, DO_MACC) +GEN_VEXT_VV(vwmaccu_vv_b, 1, 2, clearh) +GEN_VEXT_VV(vwmaccu_vv_h, 2, 4, clearl) +GEN_VEXT_VV(vwmaccu_vv_w, 4, 8, clearq) +GEN_VEXT_VV(vwmacc_vv_b, 1, 2, clearh) +GEN_VEXT_VV(vwmacc_vv_h, 2, 4, clearl) +GEN_VEXT_VV(vwmacc_vv_w, 4, 8, clearq) +GEN_VEXT_VV(vwmaccsu_vv_b, 1, 2, clearh) +GEN_VEXT_VV(vwmaccsu_vv_h, 2, 4, clearl) +GEN_VEXT_VV(vwmaccsu_vv_w, 4, 8, clearq) + +RVVCALL(OPIVX3, vwmaccu_vx_b, WOP_UUU_B, H2, H1, DO_MACC) +RVVCALL(OPIVX3, vwmaccu_vx_h, WOP_UUU_H, H4, H2, DO_MACC) +RVVCALL(OPIVX3, vwmaccu_vx_w, WOP_UUU_W, H8, H4, DO_MACC) +RVVCALL(OPIVX3, vwmacc_vx_b, WOP_SSS_B, H2, H1, DO_MACC) +RVVCALL(OPIVX3, vwmacc_vx_h, WOP_SSS_H, H4, H2, DO_MACC) +RVVCALL(OPIVX3, vwmacc_vx_w, WOP_SSS_W, H8, H4, DO_MACC) +RVVCALL(OPIVX3, vwmaccsu_vx_b, WOP_SSU_B, H2, H1, DO_MACC) +RVVCALL(OPIVX3, vwmaccsu_vx_h, WOP_SSU_H, H4, H2, DO_MACC) +RVVCALL(OPIVX3, vwmaccsu_vx_w, WOP_SSU_W, H8, H4, DO_MACC) +RVVCALL(OPIVX3, vwmaccus_vx_b, WOP_SUS_B, H2, H1, DO_MACC) +RVVCALL(OPIVX3, vwmaccus_vx_h, WOP_SUS_H, H4, H2, DO_MACC) +RVVCALL(OPIVX3, vwmaccus_vx_w, WOP_SUS_W, H8, H4, DO_MACC) +GEN_VEXT_VX(vwmaccu_vx_b, 1, 2, clearh) +GEN_VEXT_VX(vwmaccu_vx_h, 2, 4, clearl) +GEN_VEXT_VX(vwmaccu_vx_w, 4, 8, clearq) +GEN_VEXT_VX(vwmacc_vx_b, 1, 2, clearh) +GEN_VEXT_VX(vwmacc_vx_h, 2, 4, clearl) +GEN_VEXT_VX(vwmacc_vx_w, 4, 8, clearq) +GEN_VEXT_VX(vwmaccsu_vx_b, 1, 2, clearh) +GEN_VEXT_VX(vwmaccsu_vx_h, 2, 4, clearl) +GEN_VEXT_VX(vwmaccsu_vx_w, 4, 8, clearq) +GEN_VEXT_VX(vwmaccus_vx_b, 1, 2, clearh) +GEN_VEXT_VX(vwmaccus_vx_h, 2, 4, clearl) +GEN_VEXT_VX(vwmaccus_vx_w, 4, 8, clearq) + +/* Vector Integer Merge and Move Instructions */ +#define GEN_VEXT_VMV_VV(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ + *((ETYPE *)vd + H(i)) = s1; \ + } \ + CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ +} + +GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t, H1, clearb) +GEN_VEXT_VMV_VV(vmv_v_v_h, int16_t, H2, clearh) +GEN_VEXT_VMV_VV(vmv_v_v_w, int32_t, H4, clearl) +GEN_VEXT_VMV_VV(vmv_v_v_d, int64_t, H8, clearq) + +#define GEN_VEXT_VMV_VX(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + *((ETYPE *)vd + H(i)) = (ETYPE)s1; \ + } \ + CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ +} + +GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t, H1, clearb) +GEN_VEXT_VMV_VX(vmv_v_x_h, int16_t, H2, clearh) +GEN_VEXT_VMV_VX(vmv_v_x_w, int32_t, H4, clearl) +GEN_VEXT_VMV_VX(vmv_v_x_d, int64_t, H8, clearq) + +#define GEN_VEXT_VMERGE_VV(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + ETYPE *vt = (!vext_elem_mask(v0, mlen, i) ? vs2 : vs1); \ + *((ETYPE *)vd + H(i)) = *(vt + H(i)); \ + } \ + CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ +} + +GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t, H1, clearb) +GEN_VEXT_VMERGE_VV(vmerge_vvm_h, int16_t, H2, clearh) +GEN_VEXT_VMERGE_VV(vmerge_vvm_w, int32_t, H4, clearl) +GEN_VEXT_VMERGE_VV(vmerge_vvm_d, int64_t, H8, clearq) + +#define GEN_VEXT_VMERGE_VX(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ + void *vs2, CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ + ETYPE d = (!vext_elem_mask(v0, mlen, i) ? s2 : \ + (ETYPE)(target_long)s1); \ + *((ETYPE *)vd + H(i)) = d; \ + } \ + CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ +} + +GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t, H1, clearb) +GEN_VEXT_VMERGE_VX(vmerge_vxm_h, int16_t, H2, clearh) +GEN_VEXT_VMERGE_VX(vmerge_vxm_w, int32_t, H4, clearl) +GEN_VEXT_VMERGE_VX(vmerge_vxm_d, int64_t, H8, clearq) + +/* + *** Vector Fixed-Point Arithmetic Instructions + */ + +/* Vector Single-Width Saturating Add and Subtract */ + +/* + * As fixed point instructions probably have round mode and saturation, + * define common macros for fixed point here. + */ +typedef void opivv2_rm_fn(void *vd, void *vs1, void *vs2, int i, + CPURISCVState *env, int vxrm); + +#define OPIVV2_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ +static inline void \ +do_##NAME(void *vd, void *vs1, void *vs2, int i, \ + CPURISCVState *env, int vxrm) \ +{ \ + TX1 s1 = *((T1 *)vs1 + HS1(i)); \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + *((TD *)vd + HD(i)) = OP(env, vxrm, s2, s1); \ +} + +static inline void +vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2, + CPURISCVState *env, + uint32_t vl, uint32_t vm, uint32_t mlen, int vxrm, + opivv2_rm_fn *fn) +{ + for (uint32_t i = 0; i < vl; i++) { + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + fn(vd, vs1, vs2, i, env, vxrm); + } +} + +static inline void +vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2, + CPURISCVState *env, + uint32_t desc, uint32_t esz, uint32_t dsz, + opivv2_rm_fn *fn, clear_fn *clearfn) +{ + uint32_t vlmax = vext_maxsz(desc) / esz; + uint32_t mlen = vext_mlen(desc); + uint32_t vm = vext_vm(desc); + uint32_t vl = env->vl; + + switch (env->vxrm) { + case 0: /* rnu */ + vext_vv_rm_1(vd, v0, vs1, vs2, + env, vl, vm, mlen, 0, fn); + break; + case 1: /* rne */ + vext_vv_rm_1(vd, v0, vs1, vs2, + env, vl, vm, mlen, 1, fn); + break; + case 2: /* rdn */ + vext_vv_rm_1(vd, v0, vs1, vs2, + env, vl, vm, mlen, 2, fn); + break; + default: /* rod */ + vext_vv_rm_1(vd, v0, vs1, vs2, + env, vl, vm, mlen, 3, fn); + break; + } + + clearfn(vd, vl, vl * dsz, vlmax * dsz); +} + +/* generate helpers for fixed point instructions with OPIVV format */ +#define GEN_VEXT_VV_RM(NAME, ESZ, DSZ, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vext_vv_rm_2(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \ + do_##NAME, CLEAR_FN); \ +} + +static inline uint8_t saddu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b) +{ + uint8_t res = a + b; + if (res < a) { + res = UINT8_MAX; + env->vxsat = 0x1; + } + return res; +} + +static inline uint16_t saddu16(CPURISCVState *env, int vxrm, uint16_t a, + uint16_t b) +{ + uint16_t res = a + b; + if (res < a) { + res = UINT16_MAX; + env->vxsat = 0x1; + } + return res; +} + +static inline uint32_t saddu32(CPURISCVState *env, int vxrm, uint32_t a, + uint32_t b) +{ + uint32_t res = a + b; + if (res < a) { + res = UINT32_MAX; + env->vxsat = 0x1; + } + return res; +} + +static inline uint64_t saddu64(CPURISCVState *env, int vxrm, uint64_t a, + uint64_t b) +{ + uint64_t res = a + b; + if (res < a) { + res = UINT64_MAX; + env->vxsat = 0x1; + } + return res; +} + +RVVCALL(OPIVV2_RM, vsaddu_vv_b, OP_UUU_B, H1, H1, H1, saddu8) +RVVCALL(OPIVV2_RM, vsaddu_vv_h, OP_UUU_H, H2, H2, H2, saddu16) +RVVCALL(OPIVV2_RM, vsaddu_vv_w, OP_UUU_W, H4, H4, H4, saddu32) +RVVCALL(OPIVV2_RM, vsaddu_vv_d, OP_UUU_D, H8, H8, H8, saddu64) +GEN_VEXT_VV_RM(vsaddu_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vsaddu_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vsaddu_vv_w, 4, 4, clearl) +GEN_VEXT_VV_RM(vsaddu_vv_d, 8, 8, clearq) + +typedef void opivx2_rm_fn(void *vd, target_long s1, void *vs2, int i, + CPURISCVState *env, int vxrm); + +#define OPIVX2_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ +static inline void \ +do_##NAME(void *vd, target_long s1, void *vs2, int i, \ + CPURISCVState *env, int vxrm) \ +{ \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + *((TD *)vd + HD(i)) = OP(env, vxrm, s2, (TX1)(T1)s1); \ +} + +static inline void +vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2, + CPURISCVState *env, + uint32_t vl, uint32_t vm, uint32_t mlen, int vxrm, + opivx2_rm_fn *fn) +{ + for (uint32_t i = 0; i < vl; i++) { + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + fn(vd, s1, vs2, i, env, vxrm); + } +} + +static inline void +vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2, + CPURISCVState *env, + uint32_t desc, uint32_t esz, uint32_t dsz, + opivx2_rm_fn *fn, clear_fn *clearfn) +{ + uint32_t vlmax = vext_maxsz(desc) / esz; + uint32_t mlen = vext_mlen(desc); + uint32_t vm = vext_vm(desc); + uint32_t vl = env->vl; + + switch (env->vxrm) { + case 0: /* rnu */ + vext_vx_rm_1(vd, v0, s1, vs2, + env, vl, vm, mlen, 0, fn); + break; + case 1: /* rne */ + vext_vx_rm_1(vd, v0, s1, vs2, + env, vl, vm, mlen, 1, fn); + break; + case 2: /* rdn */ + vext_vx_rm_1(vd, v0, s1, vs2, + env, vl, vm, mlen, 2, fn); + break; + default: /* rod */ + vext_vx_rm_1(vd, v0, s1, vs2, + env, vl, vm, mlen, 3, fn); + break; + } + + clearfn(vd, vl, vl * dsz, vlmax * dsz); +} + +/* generate helpers for fixed point instructions with OPIVX format */ +#define GEN_VEXT_VX_RM(NAME, ESZ, DSZ, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ + void *vs2, CPURISCVState *env, uint32_t desc) \ +{ \ + vext_vx_rm_2(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \ + do_##NAME, CLEAR_FN); \ +} + +RVVCALL(OPIVX2_RM, vsaddu_vx_b, OP_UUU_B, H1, H1, saddu8) +RVVCALL(OPIVX2_RM, vsaddu_vx_h, OP_UUU_H, H2, H2, saddu16) +RVVCALL(OPIVX2_RM, vsaddu_vx_w, OP_UUU_W, H4, H4, saddu32) +RVVCALL(OPIVX2_RM, vsaddu_vx_d, OP_UUU_D, H8, H8, saddu64) +GEN_VEXT_VX_RM(vsaddu_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vsaddu_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vsaddu_vx_w, 4, 4, clearl) +GEN_VEXT_VX_RM(vsaddu_vx_d, 8, 8, clearq) + +static inline int8_t sadd8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) +{ + int8_t res = a + b; + if ((res ^ a) & (res ^ b) & INT8_MIN) { + res = a > 0 ? INT8_MAX : INT8_MIN; + env->vxsat = 0x1; + } + return res; +} + +static inline int16_t sadd16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) +{ + int16_t res = a + b; + if ((res ^ a) & (res ^ b) & INT16_MIN) { + res = a > 0 ? INT16_MAX : INT16_MIN; + env->vxsat = 0x1; + } + return res; +} + +static inline int32_t sadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) +{ + int32_t res = a + b; + if ((res ^ a) & (res ^ b) & INT32_MIN) { + res = a > 0 ? INT32_MAX : INT32_MIN; + env->vxsat = 0x1; + } + return res; +} + +static inline int64_t sadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) +{ + int64_t res = a + b; + if ((res ^ a) & (res ^ b) & INT64_MIN) { + res = a > 0 ? INT64_MAX : INT64_MIN; + env->vxsat = 0x1; + } + return res; +} + +RVVCALL(OPIVV2_RM, vsadd_vv_b, OP_SSS_B, H1, H1, H1, sadd8) +RVVCALL(OPIVV2_RM, vsadd_vv_h, OP_SSS_H, H2, H2, H2, sadd16) +RVVCALL(OPIVV2_RM, vsadd_vv_w, OP_SSS_W, H4, H4, H4, sadd32) +RVVCALL(OPIVV2_RM, vsadd_vv_d, OP_SSS_D, H8, H8, H8, sadd64) +GEN_VEXT_VV_RM(vsadd_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vsadd_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vsadd_vv_w, 4, 4, clearl) +GEN_VEXT_VV_RM(vsadd_vv_d, 8, 8, clearq) + +RVVCALL(OPIVX2_RM, vsadd_vx_b, OP_SSS_B, H1, H1, sadd8) +RVVCALL(OPIVX2_RM, vsadd_vx_h, OP_SSS_H, H2, H2, sadd16) +RVVCALL(OPIVX2_RM, vsadd_vx_w, OP_SSS_W, H4, H4, sadd32) +RVVCALL(OPIVX2_RM, vsadd_vx_d, OP_SSS_D, H8, H8, sadd64) +GEN_VEXT_VX_RM(vsadd_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vsadd_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vsadd_vx_w, 4, 4, clearl) +GEN_VEXT_VX_RM(vsadd_vx_d, 8, 8, clearq) + +static inline uint8_t ssubu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b) +{ + uint8_t res = a - b; + if (res > a) { + res = 0; + env->vxsat = 0x1; + } + return res; +} + +static inline uint16_t ssubu16(CPURISCVState *env, int vxrm, uint16_t a, + uint16_t b) +{ + uint16_t res = a - b; + if (res > a) { + res = 0; + env->vxsat = 0x1; + } + return res; +} + +static inline uint32_t ssubu32(CPURISCVState *env, int vxrm, uint32_t a, + uint32_t b) +{ + uint32_t res = a - b; + if (res > a) { + res = 0; + env->vxsat = 0x1; + } + return res; +} + +static inline uint64_t ssubu64(CPURISCVState *env, int vxrm, uint64_t a, + uint64_t b) +{ + uint64_t res = a - b; + if (res > a) { + res = 0; + env->vxsat = 0x1; + } + return res; +} + +RVVCALL(OPIVV2_RM, vssubu_vv_b, OP_UUU_B, H1, H1, H1, ssubu8) +RVVCALL(OPIVV2_RM, vssubu_vv_h, OP_UUU_H, H2, H2, H2, ssubu16) +RVVCALL(OPIVV2_RM, vssubu_vv_w, OP_UUU_W, H4, H4, H4, ssubu32) +RVVCALL(OPIVV2_RM, vssubu_vv_d, OP_UUU_D, H8, H8, H8, ssubu64) +GEN_VEXT_VV_RM(vssubu_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vssubu_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vssubu_vv_w, 4, 4, clearl) +GEN_VEXT_VV_RM(vssubu_vv_d, 8, 8, clearq) + +RVVCALL(OPIVX2_RM, vssubu_vx_b, OP_UUU_B, H1, H1, ssubu8) +RVVCALL(OPIVX2_RM, vssubu_vx_h, OP_UUU_H, H2, H2, ssubu16) +RVVCALL(OPIVX2_RM, vssubu_vx_w, OP_UUU_W, H4, H4, ssubu32) +RVVCALL(OPIVX2_RM, vssubu_vx_d, OP_UUU_D, H8, H8, ssubu64) +GEN_VEXT_VX_RM(vssubu_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vssubu_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vssubu_vx_w, 4, 4, clearl) +GEN_VEXT_VX_RM(vssubu_vx_d, 8, 8, clearq) + +static inline int8_t ssub8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) +{ + int8_t res = a - b; + if ((res ^ a) & (a ^ b) & INT8_MIN) { + res = a > 0 ? INT8_MAX : INT8_MIN; + env->vxsat = 0x1; + } + return res; +} + +static inline int16_t ssub16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) +{ + int16_t res = a - b; + if ((res ^ a) & (a ^ b) & INT16_MIN) { + res = a > 0 ? INT16_MAX : INT16_MIN; + env->vxsat = 0x1; + } + return res; +} + +static inline int32_t ssub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) +{ + int32_t res = a - b; + if ((res ^ a) & (a ^ b) & INT32_MIN) { + res = a > 0 ? INT32_MAX : INT32_MIN; + env->vxsat = 0x1; + } + return res; +} + +static inline int64_t ssub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) +{ + int64_t res = a - b; + if ((res ^ a) & (a ^ b) & INT64_MIN) { + res = a > 0 ? INT64_MAX : INT64_MIN; + env->vxsat = 0x1; + } + return res; +} + +RVVCALL(OPIVV2_RM, vssub_vv_b, OP_SSS_B, H1, H1, H1, ssub8) +RVVCALL(OPIVV2_RM, vssub_vv_h, OP_SSS_H, H2, H2, H2, ssub16) +RVVCALL(OPIVV2_RM, vssub_vv_w, OP_SSS_W, H4, H4, H4, ssub32) +RVVCALL(OPIVV2_RM, vssub_vv_d, OP_SSS_D, H8, H8, H8, ssub64) +GEN_VEXT_VV_RM(vssub_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vssub_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vssub_vv_w, 4, 4, clearl) +GEN_VEXT_VV_RM(vssub_vv_d, 8, 8, clearq) + +RVVCALL(OPIVX2_RM, vssub_vx_b, OP_SSS_B, H1, H1, ssub8) +RVVCALL(OPIVX2_RM, vssub_vx_h, OP_SSS_H, H2, H2, ssub16) +RVVCALL(OPIVX2_RM, vssub_vx_w, OP_SSS_W, H4, H4, ssub32) +RVVCALL(OPIVX2_RM, vssub_vx_d, OP_SSS_D, H8, H8, ssub64) +GEN_VEXT_VX_RM(vssub_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vssub_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vssub_vx_w, 4, 4, clearl) +GEN_VEXT_VX_RM(vssub_vx_d, 8, 8, clearq) + +/* Vector Single-Width Averaging Add and Subtract */ +static inline uint8_t get_round(int vxrm, uint64_t v, uint8_t shift) +{ + uint8_t d = extract64(v, shift, 1); + uint8_t d1; + uint64_t D1, D2; + + if (shift == 0 || shift > 64) { + return 0; + } + + d1 = extract64(v, shift - 1, 1); + D1 = extract64(v, 0, shift); + if (vxrm == 0) { /* round-to-nearest-up (add +0.5 LSB) */ + return d1; + } else if (vxrm == 1) { /* round-to-nearest-even */ + if (shift > 1) { + D2 = extract64(v, 0, shift - 1); + return d1 & ((D2 != 0) | d); + } else { + return d1 & d; + } + } else if (vxrm == 3) { /* round-to-odd (OR bits into LSB, aka "jam") */ + return !d & (D1 != 0); + } + return 0; /* round-down (truncate) */ +} + +static inline int32_t aadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) +{ + int64_t res = (int64_t)a + b; + uint8_t round = get_round(vxrm, res, 1); + + return (res >> 1) + round; +} + +static inline int64_t aadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) +{ + int64_t res = a + b; + uint8_t round = get_round(vxrm, res, 1); + int64_t over = (res ^ a) & (res ^ b) & INT64_MIN; + + /* With signed overflow, bit 64 is inverse of bit 63. */ + return ((res >> 1) ^ over) + round; +} + +RVVCALL(OPIVV2_RM, vaadd_vv_b, OP_SSS_B, H1, H1, H1, aadd32) +RVVCALL(OPIVV2_RM, vaadd_vv_h, OP_SSS_H, H2, H2, H2, aadd32) +RVVCALL(OPIVV2_RM, vaadd_vv_w, OP_SSS_W, H4, H4, H4, aadd32) +RVVCALL(OPIVV2_RM, vaadd_vv_d, OP_SSS_D, H8, H8, H8, aadd64) +GEN_VEXT_VV_RM(vaadd_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vaadd_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vaadd_vv_w, 4, 4, clearl) +GEN_VEXT_VV_RM(vaadd_vv_d, 8, 8, clearq) + +RVVCALL(OPIVX2_RM, vaadd_vx_b, OP_SSS_B, H1, H1, aadd32) +RVVCALL(OPIVX2_RM, vaadd_vx_h, OP_SSS_H, H2, H2, aadd32) +RVVCALL(OPIVX2_RM, vaadd_vx_w, OP_SSS_W, H4, H4, aadd32) +RVVCALL(OPIVX2_RM, vaadd_vx_d, OP_SSS_D, H8, H8, aadd64) +GEN_VEXT_VX_RM(vaadd_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vaadd_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vaadd_vx_w, 4, 4, clearl) +GEN_VEXT_VX_RM(vaadd_vx_d, 8, 8, clearq) + +static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) +{ + int64_t res = (int64_t)a - b; + uint8_t round = get_round(vxrm, res, 1); + + return (res >> 1) + round; +} + +static inline int64_t asub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) +{ + int64_t res = (int64_t)a - b; + uint8_t round = get_round(vxrm, res, 1); + int64_t over = (res ^ a) & (a ^ b) & INT64_MIN; + + /* With signed overflow, bit 64 is inverse of bit 63. */ + return ((res >> 1) ^ over) + round; +} + +RVVCALL(OPIVV2_RM, vasub_vv_b, OP_SSS_B, H1, H1, H1, asub32) +RVVCALL(OPIVV2_RM, vasub_vv_h, OP_SSS_H, H2, H2, H2, asub32) +RVVCALL(OPIVV2_RM, vasub_vv_w, OP_SSS_W, H4, H4, H4, asub32) +RVVCALL(OPIVV2_RM, vasub_vv_d, OP_SSS_D, H8, H8, H8, asub64) +GEN_VEXT_VV_RM(vasub_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vasub_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vasub_vv_w, 4, 4, clearl) +GEN_VEXT_VV_RM(vasub_vv_d, 8, 8, clearq) + +RVVCALL(OPIVX2_RM, vasub_vx_b, OP_SSS_B, H1, H1, asub32) +RVVCALL(OPIVX2_RM, vasub_vx_h, OP_SSS_H, H2, H2, asub32) +RVVCALL(OPIVX2_RM, vasub_vx_w, OP_SSS_W, H4, H4, asub32) +RVVCALL(OPIVX2_RM, vasub_vx_d, OP_SSS_D, H8, H8, asub64) +GEN_VEXT_VX_RM(vasub_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vasub_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vasub_vx_w, 4, 4, clearl) +GEN_VEXT_VX_RM(vasub_vx_d, 8, 8, clearq) + +/* Vector Single-Width Fractional Multiply with Rounding and Saturation */ +static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) +{ + uint8_t round; + int16_t res; + + res = (int16_t)a * (int16_t)b; + round = get_round(vxrm, res, 7); + res = (res >> 7) + round; + + if (res > INT8_MAX) { + env->vxsat = 0x1; + return INT8_MAX; + } else if (res < INT8_MIN) { + env->vxsat = 0x1; + return INT8_MIN; + } else { + return res; + } +} + +static int16_t vsmul16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) +{ + uint8_t round; + int32_t res; + + res = (int32_t)a * (int32_t)b; + round = get_round(vxrm, res, 15); + res = (res >> 15) + round; + + if (res > INT16_MAX) { + env->vxsat = 0x1; + return INT16_MAX; + } else if (res < INT16_MIN) { + env->vxsat = 0x1; + return INT16_MIN; + } else { + return res; + } +} + +static int32_t vsmul32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) +{ + uint8_t round; + int64_t res; + + res = (int64_t)a * (int64_t)b; + round = get_round(vxrm, res, 31); + res = (res >> 31) + round; + + if (res > INT32_MAX) { + env->vxsat = 0x1; + return INT32_MAX; + } else if (res < INT32_MIN) { + env->vxsat = 0x1; + return INT32_MIN; + } else { + return res; + } +} + +static int64_t vsmul64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) +{ + uint8_t round; + uint64_t hi_64, lo_64; + int64_t res; + + if (a == INT64_MIN && b == INT64_MIN) { + env->vxsat = 1; + return INT64_MAX; + } + + muls64(&lo_64, &hi_64, a, b); + round = get_round(vxrm, lo_64, 63); + /* + * Cannot overflow, as there are always + * 2 sign bits after multiply. + */ + res = (hi_64 << 1) | (lo_64 >> 63); + if (round) { + if (res == INT64_MAX) { + env->vxsat = 1; + } else { + res += 1; + } + } + return res; +} + +RVVCALL(OPIVV2_RM, vsmul_vv_b, OP_SSS_B, H1, H1, H1, vsmul8) +RVVCALL(OPIVV2_RM, vsmul_vv_h, OP_SSS_H, H2, H2, H2, vsmul16) +RVVCALL(OPIVV2_RM, vsmul_vv_w, OP_SSS_W, H4, H4, H4, vsmul32) +RVVCALL(OPIVV2_RM, vsmul_vv_d, OP_SSS_D, H8, H8, H8, vsmul64) +GEN_VEXT_VV_RM(vsmul_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vsmul_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vsmul_vv_w, 4, 4, clearl) +GEN_VEXT_VV_RM(vsmul_vv_d, 8, 8, clearq) + +RVVCALL(OPIVX2_RM, vsmul_vx_b, OP_SSS_B, H1, H1, vsmul8) +RVVCALL(OPIVX2_RM, vsmul_vx_h, OP_SSS_H, H2, H2, vsmul16) +RVVCALL(OPIVX2_RM, vsmul_vx_w, OP_SSS_W, H4, H4, vsmul32) +RVVCALL(OPIVX2_RM, vsmul_vx_d, OP_SSS_D, H8, H8, vsmul64) +GEN_VEXT_VX_RM(vsmul_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vsmul_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vsmul_vx_w, 4, 4, clearl) +GEN_VEXT_VX_RM(vsmul_vx_d, 8, 8, clearq) + +/* Vector Widening Saturating Scaled Multiply-Add */ +static inline uint16_t +vwsmaccu8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b, + uint16_t c) +{ + uint8_t round; + uint16_t res = (uint16_t)a * b; + + round = get_round(vxrm, res, 4); + res = (res >> 4) + round; + return saddu16(env, vxrm, c, res); +} + +static inline uint32_t +vwsmaccu16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b, + uint32_t c) +{ + uint8_t round; + uint32_t res = (uint32_t)a * b; + + round = get_round(vxrm, res, 8); + res = (res >> 8) + round; + return saddu32(env, vxrm, c, res); +} + +static inline uint64_t +vwsmaccu32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b, + uint64_t c) +{ + uint8_t round; + uint64_t res = (uint64_t)a * b; + + round = get_round(vxrm, res, 16); + res = (res >> 16) + round; + return saddu64(env, vxrm, c, res); +} + +#define OPIVV3_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ +static inline void \ +do_##NAME(void *vd, void *vs1, void *vs2, int i, \ + CPURISCVState *env, int vxrm) \ +{ \ + TX1 s1 = *((T1 *)vs1 + HS1(i)); \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + TD d = *((TD *)vd + HD(i)); \ + *((TD *)vd + HD(i)) = OP(env, vxrm, s2, s1, d); \ +} + +RVVCALL(OPIVV3_RM, vwsmaccu_vv_b, WOP_UUU_B, H2, H1, H1, vwsmaccu8) +RVVCALL(OPIVV3_RM, vwsmaccu_vv_h, WOP_UUU_H, H4, H2, H2, vwsmaccu16) +RVVCALL(OPIVV3_RM, vwsmaccu_vv_w, WOP_UUU_W, H8, H4, H4, vwsmaccu32) +GEN_VEXT_VV_RM(vwsmaccu_vv_b, 1, 2, clearh) +GEN_VEXT_VV_RM(vwsmaccu_vv_h, 2, 4, clearl) +GEN_VEXT_VV_RM(vwsmaccu_vv_w, 4, 8, clearq) + +#define OPIVX3_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ +static inline void \ +do_##NAME(void *vd, target_long s1, void *vs2, int i, \ + CPURISCVState *env, int vxrm) \ +{ \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + TD d = *((TD *)vd + HD(i)); \ + *((TD *)vd + HD(i)) = OP(env, vxrm, s2, (TX1)(T1)s1, d); \ +} + +RVVCALL(OPIVX3_RM, vwsmaccu_vx_b, WOP_UUU_B, H2, H1, vwsmaccu8) +RVVCALL(OPIVX3_RM, vwsmaccu_vx_h, WOP_UUU_H, H4, H2, vwsmaccu16) +RVVCALL(OPIVX3_RM, vwsmaccu_vx_w, WOP_UUU_W, H8, H4, vwsmaccu32) +GEN_VEXT_VX_RM(vwsmaccu_vx_b, 1, 2, clearh) +GEN_VEXT_VX_RM(vwsmaccu_vx_h, 2, 4, clearl) +GEN_VEXT_VX_RM(vwsmaccu_vx_w, 4, 8, clearq) + +static inline int16_t +vwsmacc8(CPURISCVState *env, int vxrm, int8_t a, int8_t b, int16_t c) +{ + uint8_t round; + int16_t res = (int16_t)a * b; + + round = get_round(vxrm, res, 4); + res = (res >> 4) + round; + return sadd16(env, vxrm, c, res); +} + +static inline int32_t +vwsmacc16(CPURISCVState *env, int vxrm, int16_t a, int16_t b, int32_t c) +{ + uint8_t round; + int32_t res = (int32_t)a * b; + + round = get_round(vxrm, res, 8); + res = (res >> 8) + round; + return sadd32(env, vxrm, c, res); + +} + +static inline int64_t +vwsmacc32(CPURISCVState *env, int vxrm, int32_t a, int32_t b, int64_t c) +{ + uint8_t round; + int64_t res = (int64_t)a * b; + + round = get_round(vxrm, res, 16); + res = (res >> 16) + round; + return sadd64(env, vxrm, c, res); +} + +RVVCALL(OPIVV3_RM, vwsmacc_vv_b, WOP_SSS_B, H2, H1, H1, vwsmacc8) +RVVCALL(OPIVV3_RM, vwsmacc_vv_h, WOP_SSS_H, H4, H2, H2, vwsmacc16) +RVVCALL(OPIVV3_RM, vwsmacc_vv_w, WOP_SSS_W, H8, H4, H4, vwsmacc32) +GEN_VEXT_VV_RM(vwsmacc_vv_b, 1, 2, clearh) +GEN_VEXT_VV_RM(vwsmacc_vv_h, 2, 4, clearl) +GEN_VEXT_VV_RM(vwsmacc_vv_w, 4, 8, clearq) +RVVCALL(OPIVX3_RM, vwsmacc_vx_b, WOP_SSS_B, H2, H1, vwsmacc8) +RVVCALL(OPIVX3_RM, vwsmacc_vx_h, WOP_SSS_H, H4, H2, vwsmacc16) +RVVCALL(OPIVX3_RM, vwsmacc_vx_w, WOP_SSS_W, H8, H4, vwsmacc32) +GEN_VEXT_VX_RM(vwsmacc_vx_b, 1, 2, clearh) +GEN_VEXT_VX_RM(vwsmacc_vx_h, 2, 4, clearl) +GEN_VEXT_VX_RM(vwsmacc_vx_w, 4, 8, clearq) + +static inline int16_t +vwsmaccsu8(CPURISCVState *env, int vxrm, uint8_t a, int8_t b, int16_t c) +{ + uint8_t round; + int16_t res = a * (int16_t)b; + + round = get_round(vxrm, res, 4); + res = (res >> 4) + round; + return ssub16(env, vxrm, c, res); +} + +static inline int32_t +vwsmaccsu16(CPURISCVState *env, int vxrm, uint16_t a, int16_t b, uint32_t c) +{ + uint8_t round; + int32_t res = a * (int32_t)b; + + round = get_round(vxrm, res, 8); + res = (res >> 8) + round; + return ssub32(env, vxrm, c, res); +} + +static inline int64_t +vwsmaccsu32(CPURISCVState *env, int vxrm, uint32_t a, int32_t b, int64_t c) +{ + uint8_t round; + int64_t res = a * (int64_t)b; + + round = get_round(vxrm, res, 16); + res = (res >> 16) + round; + return ssub64(env, vxrm, c, res); +} + +RVVCALL(OPIVV3_RM, vwsmaccsu_vv_b, WOP_SSU_B, H2, H1, H1, vwsmaccsu8) +RVVCALL(OPIVV3_RM, vwsmaccsu_vv_h, WOP_SSU_H, H4, H2, H2, vwsmaccsu16) +RVVCALL(OPIVV3_RM, vwsmaccsu_vv_w, WOP_SSU_W, H8, H4, H4, vwsmaccsu32) +GEN_VEXT_VV_RM(vwsmaccsu_vv_b, 1, 2, clearh) +GEN_VEXT_VV_RM(vwsmaccsu_vv_h, 2, 4, clearl) +GEN_VEXT_VV_RM(vwsmaccsu_vv_w, 4, 8, clearq) +RVVCALL(OPIVX3_RM, vwsmaccsu_vx_b, WOP_SSU_B, H2, H1, vwsmaccsu8) +RVVCALL(OPIVX3_RM, vwsmaccsu_vx_h, WOP_SSU_H, H4, H2, vwsmaccsu16) +RVVCALL(OPIVX3_RM, vwsmaccsu_vx_w, WOP_SSU_W, H8, H4, vwsmaccsu32) +GEN_VEXT_VX_RM(vwsmaccsu_vx_b, 1, 2, clearh) +GEN_VEXT_VX_RM(vwsmaccsu_vx_h, 2, 4, clearl) +GEN_VEXT_VX_RM(vwsmaccsu_vx_w, 4, 8, clearq) + +static inline int16_t +vwsmaccus8(CPURISCVState *env, int vxrm, int8_t a, uint8_t b, int16_t c) +{ + uint8_t round; + int16_t res = (int16_t)a * b; + + round = get_round(vxrm, res, 4); + res = (res >> 4) + round; + return ssub16(env, vxrm, c, res); +} + +static inline int32_t +vwsmaccus16(CPURISCVState *env, int vxrm, int16_t a, uint16_t b, int32_t c) +{ + uint8_t round; + int32_t res = (int32_t)a * b; + + round = get_round(vxrm, res, 8); + res = (res >> 8) + round; + return ssub32(env, vxrm, c, res); +} + +static inline int64_t +vwsmaccus32(CPURISCVState *env, int vxrm, int32_t a, uint32_t b, int64_t c) +{ + uint8_t round; + int64_t res = (int64_t)a * b; + + round = get_round(vxrm, res, 16); + res = (res >> 16) + round; + return ssub64(env, vxrm, c, res); +} + +RVVCALL(OPIVX3_RM, vwsmaccus_vx_b, WOP_SUS_B, H2, H1, vwsmaccus8) +RVVCALL(OPIVX3_RM, vwsmaccus_vx_h, WOP_SUS_H, H4, H2, vwsmaccus16) +RVVCALL(OPIVX3_RM, vwsmaccus_vx_w, WOP_SUS_W, H8, H4, vwsmaccus32) +GEN_VEXT_VX_RM(vwsmaccus_vx_b, 1, 2, clearh) +GEN_VEXT_VX_RM(vwsmaccus_vx_h, 2, 4, clearl) +GEN_VEXT_VX_RM(vwsmaccus_vx_w, 4, 8, clearq) + +/* Vector Single-Width Scaling Shift Instructions */ +static inline uint8_t +vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b) +{ + uint8_t round, shift = b & 0x7; + uint8_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + return res; +} +static inline uint16_t +vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b) +{ + uint8_t round, shift = b & 0xf; + uint16_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + return res; +} +static inline uint32_t +vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b) +{ + uint8_t round, shift = b & 0x1f; + uint32_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + return res; +} +static inline uint64_t +vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b) +{ + uint8_t round, shift = b & 0x3f; + uint64_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + return res; +} +RVVCALL(OPIVV2_RM, vssrl_vv_b, OP_UUU_B, H1, H1, H1, vssrl8) +RVVCALL(OPIVV2_RM, vssrl_vv_h, OP_UUU_H, H2, H2, H2, vssrl16) +RVVCALL(OPIVV2_RM, vssrl_vv_w, OP_UUU_W, H4, H4, H4, vssrl32) +RVVCALL(OPIVV2_RM, vssrl_vv_d, OP_UUU_D, H8, H8, H8, vssrl64) +GEN_VEXT_VV_RM(vssrl_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vssrl_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vssrl_vv_w, 4, 4, clearl) +GEN_VEXT_VV_RM(vssrl_vv_d, 8, 8, clearq) + +RVVCALL(OPIVX2_RM, vssrl_vx_b, OP_UUU_B, H1, H1, vssrl8) +RVVCALL(OPIVX2_RM, vssrl_vx_h, OP_UUU_H, H2, H2, vssrl16) +RVVCALL(OPIVX2_RM, vssrl_vx_w, OP_UUU_W, H4, H4, vssrl32) +RVVCALL(OPIVX2_RM, vssrl_vx_d, OP_UUU_D, H8, H8, vssrl64) +GEN_VEXT_VX_RM(vssrl_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vssrl_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vssrl_vx_w, 4, 4, clearl) +GEN_VEXT_VX_RM(vssrl_vx_d, 8, 8, clearq) + +static inline int8_t +vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) +{ + uint8_t round, shift = b & 0x7; + int8_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + return res; +} +static inline int16_t +vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) +{ + uint8_t round, shift = b & 0xf; + int16_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + return res; +} +static inline int32_t +vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) +{ + uint8_t round, shift = b & 0x1f; + int32_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + return res; +} +static inline int64_t +vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) +{ + uint8_t round, shift = b & 0x3f; + int64_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + return res; +} + +RVVCALL(OPIVV2_RM, vssra_vv_b, OP_SSS_B, H1, H1, H1, vssra8) +RVVCALL(OPIVV2_RM, vssra_vv_h, OP_SSS_H, H2, H2, H2, vssra16) +RVVCALL(OPIVV2_RM, vssra_vv_w, OP_SSS_W, H4, H4, H4, vssra32) +RVVCALL(OPIVV2_RM, vssra_vv_d, OP_SSS_D, H8, H8, H8, vssra64) +GEN_VEXT_VV_RM(vssra_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vssra_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vssra_vv_w, 4, 4, clearl) +GEN_VEXT_VV_RM(vssra_vv_d, 8, 8, clearq) + +RVVCALL(OPIVX2_RM, vssra_vx_b, OP_SSS_B, H1, H1, vssra8) +RVVCALL(OPIVX2_RM, vssra_vx_h, OP_SSS_H, H2, H2, vssra16) +RVVCALL(OPIVX2_RM, vssra_vx_w, OP_SSS_W, H4, H4, vssra32) +RVVCALL(OPIVX2_RM, vssra_vx_d, OP_SSS_D, H8, H8, vssra64) +GEN_VEXT_VX_RM(vssra_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vssra_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vssra_vx_w, 4, 4, clearl) +GEN_VEXT_VX_RM(vssra_vx_d, 8, 8, clearq) + +/* Vector Narrowing Fixed-Point Clip Instructions */ +static inline int8_t +vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b) +{ + uint8_t round, shift = b & 0xf; + int16_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + if (res > INT8_MAX) { + env->vxsat = 0x1; + return INT8_MAX; + } else if (res < INT8_MIN) { + env->vxsat = 0x1; + return INT8_MIN; + } else { + return res; + } +} + +static inline int16_t +vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b) +{ + uint8_t round, shift = b & 0x1f; + int32_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + if (res > INT16_MAX) { + env->vxsat = 0x1; + return INT16_MAX; + } else if (res < INT16_MIN) { + env->vxsat = 0x1; + return INT16_MIN; + } else { + return res; + } +} + +static inline int32_t +vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b) +{ + uint8_t round, shift = b & 0x3f; + int64_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + if (res > INT32_MAX) { + env->vxsat = 0x1; + return INT32_MAX; + } else if (res < INT32_MIN) { + env->vxsat = 0x1; + return INT32_MIN; + } else { + return res; + } +} + +RVVCALL(OPIVV2_RM, vnclip_vv_b, NOP_SSS_B, H1, H2, H1, vnclip8) +RVVCALL(OPIVV2_RM, vnclip_vv_h, NOP_SSS_H, H2, H4, H2, vnclip16) +RVVCALL(OPIVV2_RM, vnclip_vv_w, NOP_SSS_W, H4, H8, H4, vnclip32) +GEN_VEXT_VV_RM(vnclip_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vnclip_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vnclip_vv_w, 4, 4, clearl) + +RVVCALL(OPIVX2_RM, vnclip_vx_b, NOP_SSS_B, H1, H2, vnclip8) +RVVCALL(OPIVX2_RM, vnclip_vx_h, NOP_SSS_H, H2, H4, vnclip16) +RVVCALL(OPIVX2_RM, vnclip_vx_w, NOP_SSS_W, H4, H8, vnclip32) +GEN_VEXT_VX_RM(vnclip_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vnclip_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vnclip_vx_w, 4, 4, clearl) + +static inline uint8_t +vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b) +{ + uint8_t round, shift = b & 0xf; + uint16_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + if (res > UINT8_MAX) { + env->vxsat = 0x1; + return UINT8_MAX; + } else { + return res; + } +} + +static inline uint16_t +vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b) +{ + uint8_t round, shift = b & 0x1f; + uint32_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + if (res > UINT16_MAX) { + env->vxsat = 0x1; + return UINT16_MAX; + } else { + return res; + } +} + +static inline uint32_t +vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b) +{ + uint8_t round, shift = b & 0x3f; + int64_t res; + + round = get_round(vxrm, a, shift); + res = (a >> shift) + round; + if (res > UINT32_MAX) { + env->vxsat = 0x1; + return UINT32_MAX; + } else { + return res; + } +} + +RVVCALL(OPIVV2_RM, vnclipu_vv_b, NOP_UUU_B, H1, H2, H1, vnclipu8) +RVVCALL(OPIVV2_RM, vnclipu_vv_h, NOP_UUU_H, H2, H4, H2, vnclipu16) +RVVCALL(OPIVV2_RM, vnclipu_vv_w, NOP_UUU_W, H4, H8, H4, vnclipu32) +GEN_VEXT_VV_RM(vnclipu_vv_b, 1, 1, clearb) +GEN_VEXT_VV_RM(vnclipu_vv_h, 2, 2, clearh) +GEN_VEXT_VV_RM(vnclipu_vv_w, 4, 4, clearl) + +RVVCALL(OPIVX2_RM, vnclipu_vx_b, NOP_UUU_B, H1, H2, vnclipu8) +RVVCALL(OPIVX2_RM, vnclipu_vx_h, NOP_UUU_H, H2, H4, vnclipu16) +RVVCALL(OPIVX2_RM, vnclipu_vx_w, NOP_UUU_W, H4, H8, vnclipu32) +GEN_VEXT_VX_RM(vnclipu_vx_b, 1, 1, clearb) +GEN_VEXT_VX_RM(vnclipu_vx_h, 2, 2, clearh) +GEN_VEXT_VX_RM(vnclipu_vx_w, 4, 4, clearl) + +/* + *** Vector Float Point Arithmetic Instructions + */ +/* Vector Single-Width Floating-Point Add/Subtract Instructions */ +#define OPFVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ +static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \ + CPURISCVState *env) \ +{ \ + TX1 s1 = *((T1 *)vs1 + HS1(i)); \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + *((TD *)vd + HD(i)) = OP(s2, s1, &env->fp_status); \ +} + +#define GEN_VEXT_VV_ENV(NAME, ESZ, DSZ, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, \ + void *vs2, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + uint32_t vlmax = vext_maxsz(desc) / ESZ; \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + do_##NAME(vd, vs1, vs2, i, env); \ + } \ + CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \ +} + +RVVCALL(OPFVV2, vfadd_vv_h, OP_UUU_H, H2, H2, H2, float16_add) +RVVCALL(OPFVV2, vfadd_vv_w, OP_UUU_W, H4, H4, H4, float32_add) +RVVCALL(OPFVV2, vfadd_vv_d, OP_UUU_D, H8, H8, H8, float64_add) +GEN_VEXT_VV_ENV(vfadd_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfadd_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfadd_vv_d, 8, 8, clearq) + +#define OPFVF2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ +static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \ + CPURISCVState *env) \ +{ \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, &env->fp_status);\ +} + +#define GEN_VEXT_VF(NAME, ESZ, DSZ, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \ + void *vs2, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + uint32_t vlmax = vext_maxsz(desc) / ESZ; \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + do_##NAME(vd, s1, vs2, i, env); \ + } \ + CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \ +} + +RVVCALL(OPFVF2, vfadd_vf_h, OP_UUU_H, H2, H2, float16_add) +RVVCALL(OPFVF2, vfadd_vf_w, OP_UUU_W, H4, H4, float32_add) +RVVCALL(OPFVF2, vfadd_vf_d, OP_UUU_D, H8, H8, float64_add) +GEN_VEXT_VF(vfadd_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfadd_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfadd_vf_d, 8, 8, clearq) + +RVVCALL(OPFVV2, vfsub_vv_h, OP_UUU_H, H2, H2, H2, float16_sub) +RVVCALL(OPFVV2, vfsub_vv_w, OP_UUU_W, H4, H4, H4, float32_sub) +RVVCALL(OPFVV2, vfsub_vv_d, OP_UUU_D, H8, H8, H8, float64_sub) +GEN_VEXT_VV_ENV(vfsub_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfsub_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfsub_vv_d, 8, 8, clearq) +RVVCALL(OPFVF2, vfsub_vf_h, OP_UUU_H, H2, H2, float16_sub) +RVVCALL(OPFVF2, vfsub_vf_w, OP_UUU_W, H4, H4, float32_sub) +RVVCALL(OPFVF2, vfsub_vf_d, OP_UUU_D, H8, H8, float64_sub) +GEN_VEXT_VF(vfsub_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfsub_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfsub_vf_d, 8, 8, clearq) + +static uint16_t float16_rsub(uint16_t a, uint16_t b, float_status *s) +{ + return float16_sub(b, a, s); +} + +static uint32_t float32_rsub(uint32_t a, uint32_t b, float_status *s) +{ + return float32_sub(b, a, s); +} + +static uint64_t float64_rsub(uint64_t a, uint64_t b, float_status *s) +{ + return float64_sub(b, a, s); +} + +RVVCALL(OPFVF2, vfrsub_vf_h, OP_UUU_H, H2, H2, float16_rsub) +RVVCALL(OPFVF2, vfrsub_vf_w, OP_UUU_W, H4, H4, float32_rsub) +RVVCALL(OPFVF2, vfrsub_vf_d, OP_UUU_D, H8, H8, float64_rsub) +GEN_VEXT_VF(vfrsub_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfrsub_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfrsub_vf_d, 8, 8, clearq) + +/* Vector Widening Floating-Point Add/Subtract Instructions */ +static uint32_t vfwadd16(uint16_t a, uint16_t b, float_status *s) +{ + return float32_add(float16_to_float32(a, true, s), + float16_to_float32(b, true, s), s); +} + +static uint64_t vfwadd32(uint32_t a, uint32_t b, float_status *s) +{ + return float64_add(float32_to_float64(a, s), + float32_to_float64(b, s), s); + +} + +RVVCALL(OPFVV2, vfwadd_vv_h, WOP_UUU_H, H4, H2, H2, vfwadd16) +RVVCALL(OPFVV2, vfwadd_vv_w, WOP_UUU_W, H8, H4, H4, vfwadd32) +GEN_VEXT_VV_ENV(vfwadd_vv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwadd_vv_w, 4, 8, clearq) +RVVCALL(OPFVF2, vfwadd_vf_h, WOP_UUU_H, H4, H2, vfwadd16) +RVVCALL(OPFVF2, vfwadd_vf_w, WOP_UUU_W, H8, H4, vfwadd32) +GEN_VEXT_VF(vfwadd_vf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwadd_vf_w, 4, 8, clearq) + +static uint32_t vfwsub16(uint16_t a, uint16_t b, float_status *s) +{ + return float32_sub(float16_to_float32(a, true, s), + float16_to_float32(b, true, s), s); +} + +static uint64_t vfwsub32(uint32_t a, uint32_t b, float_status *s) +{ + return float64_sub(float32_to_float64(a, s), + float32_to_float64(b, s), s); + +} + +RVVCALL(OPFVV2, vfwsub_vv_h, WOP_UUU_H, H4, H2, H2, vfwsub16) +RVVCALL(OPFVV2, vfwsub_vv_w, WOP_UUU_W, H8, H4, H4, vfwsub32) +GEN_VEXT_VV_ENV(vfwsub_vv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwsub_vv_w, 4, 8, clearq) +RVVCALL(OPFVF2, vfwsub_vf_h, WOP_UUU_H, H4, H2, vfwsub16) +RVVCALL(OPFVF2, vfwsub_vf_w, WOP_UUU_W, H8, H4, vfwsub32) +GEN_VEXT_VF(vfwsub_vf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwsub_vf_w, 4, 8, clearq) + +static uint32_t vfwaddw16(uint32_t a, uint16_t b, float_status *s) +{ + return float32_add(a, float16_to_float32(b, true, s), s); +} + +static uint64_t vfwaddw32(uint64_t a, uint32_t b, float_status *s) +{ + return float64_add(a, float32_to_float64(b, s), s); +} + +RVVCALL(OPFVV2, vfwadd_wv_h, WOP_WUUU_H, H4, H2, H2, vfwaddw16) +RVVCALL(OPFVV2, vfwadd_wv_w, WOP_WUUU_W, H8, H4, H4, vfwaddw32) +GEN_VEXT_VV_ENV(vfwadd_wv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwadd_wv_w, 4, 8, clearq) +RVVCALL(OPFVF2, vfwadd_wf_h, WOP_WUUU_H, H4, H2, vfwaddw16) +RVVCALL(OPFVF2, vfwadd_wf_w, WOP_WUUU_W, H8, H4, vfwaddw32) +GEN_VEXT_VF(vfwadd_wf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwadd_wf_w, 4, 8, clearq) + +static uint32_t vfwsubw16(uint32_t a, uint16_t b, float_status *s) +{ + return float32_sub(a, float16_to_float32(b, true, s), s); +} + +static uint64_t vfwsubw32(uint64_t a, uint32_t b, float_status *s) +{ + return float64_sub(a, float32_to_float64(b, s), s); +} + +RVVCALL(OPFVV2, vfwsub_wv_h, WOP_WUUU_H, H4, H2, H2, vfwsubw16) +RVVCALL(OPFVV2, vfwsub_wv_w, WOP_WUUU_W, H8, H4, H4, vfwsubw32) +GEN_VEXT_VV_ENV(vfwsub_wv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwsub_wv_w, 4, 8, clearq) +RVVCALL(OPFVF2, vfwsub_wf_h, WOP_WUUU_H, H4, H2, vfwsubw16) +RVVCALL(OPFVF2, vfwsub_wf_w, WOP_WUUU_W, H8, H4, vfwsubw32) +GEN_VEXT_VF(vfwsub_wf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwsub_wf_w, 4, 8, clearq) + +/* Vector Single-Width Floating-Point Multiply/Divide Instructions */ +RVVCALL(OPFVV2, vfmul_vv_h, OP_UUU_H, H2, H2, H2, float16_mul) +RVVCALL(OPFVV2, vfmul_vv_w, OP_UUU_W, H4, H4, H4, float32_mul) +RVVCALL(OPFVV2, vfmul_vv_d, OP_UUU_D, H8, H8, H8, float64_mul) +GEN_VEXT_VV_ENV(vfmul_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfmul_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfmul_vv_d, 8, 8, clearq) +RVVCALL(OPFVF2, vfmul_vf_h, OP_UUU_H, H2, H2, float16_mul) +RVVCALL(OPFVF2, vfmul_vf_w, OP_UUU_W, H4, H4, float32_mul) +RVVCALL(OPFVF2, vfmul_vf_d, OP_UUU_D, H8, H8, float64_mul) +GEN_VEXT_VF(vfmul_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfmul_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfmul_vf_d, 8, 8, clearq) + +RVVCALL(OPFVV2, vfdiv_vv_h, OP_UUU_H, H2, H2, H2, float16_div) +RVVCALL(OPFVV2, vfdiv_vv_w, OP_UUU_W, H4, H4, H4, float32_div) +RVVCALL(OPFVV2, vfdiv_vv_d, OP_UUU_D, H8, H8, H8, float64_div) +GEN_VEXT_VV_ENV(vfdiv_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfdiv_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfdiv_vv_d, 8, 8, clearq) +RVVCALL(OPFVF2, vfdiv_vf_h, OP_UUU_H, H2, H2, float16_div) +RVVCALL(OPFVF2, vfdiv_vf_w, OP_UUU_W, H4, H4, float32_div) +RVVCALL(OPFVF2, vfdiv_vf_d, OP_UUU_D, H8, H8, float64_div) +GEN_VEXT_VF(vfdiv_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfdiv_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfdiv_vf_d, 8, 8, clearq) + +static uint16_t float16_rdiv(uint16_t a, uint16_t b, float_status *s) +{ + return float16_div(b, a, s); +} + +static uint32_t float32_rdiv(uint32_t a, uint32_t b, float_status *s) +{ + return float32_div(b, a, s); +} + +static uint64_t float64_rdiv(uint64_t a, uint64_t b, float_status *s) +{ + return float64_div(b, a, s); +} + +RVVCALL(OPFVF2, vfrdiv_vf_h, OP_UUU_H, H2, H2, float16_rdiv) +RVVCALL(OPFVF2, vfrdiv_vf_w, OP_UUU_W, H4, H4, float32_rdiv) +RVVCALL(OPFVF2, vfrdiv_vf_d, OP_UUU_D, H8, H8, float64_rdiv) +GEN_VEXT_VF(vfrdiv_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfrdiv_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfrdiv_vf_d, 8, 8, clearq) + +/* Vector Widening Floating-Point Multiply */ +static uint32_t vfwmul16(uint16_t a, uint16_t b, float_status *s) +{ + return float32_mul(float16_to_float32(a, true, s), + float16_to_float32(b, true, s), s); +} + +static uint64_t vfwmul32(uint32_t a, uint32_t b, float_status *s) +{ + return float64_mul(float32_to_float64(a, s), + float32_to_float64(b, s), s); + +} +RVVCALL(OPFVV2, vfwmul_vv_h, WOP_UUU_H, H4, H2, H2, vfwmul16) +RVVCALL(OPFVV2, vfwmul_vv_w, WOP_UUU_W, H8, H4, H4, vfwmul32) +GEN_VEXT_VV_ENV(vfwmul_vv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwmul_vv_w, 4, 8, clearq) +RVVCALL(OPFVF2, vfwmul_vf_h, WOP_UUU_H, H4, H2, vfwmul16) +RVVCALL(OPFVF2, vfwmul_vf_w, WOP_UUU_W, H8, H4, vfwmul32) +GEN_VEXT_VF(vfwmul_vf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwmul_vf_w, 4, 8, clearq) + +/* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */ +#define OPFVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \ +static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \ + CPURISCVState *env) \ +{ \ + TX1 s1 = *((T1 *)vs1 + HS1(i)); \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + TD d = *((TD *)vd + HD(i)); \ + *((TD *)vd + HD(i)) = OP(s2, s1, d, &env->fp_status); \ +} + +static uint16_t fmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s) +{ + return float16_muladd(a, b, d, 0, s); +} + +static uint32_t fmacc32(uint32_t a, uint32_t b, uint32_t d, float_status *s) +{ + return float32_muladd(a, b, d, 0, s); +} + +static uint64_t fmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s) +{ + return float64_muladd(a, b, d, 0, s); +} + +RVVCALL(OPFVV3, vfmacc_vv_h, OP_UUU_H, H2, H2, H2, fmacc16) +RVVCALL(OPFVV3, vfmacc_vv_w, OP_UUU_W, H4, H4, H4, fmacc32) +RVVCALL(OPFVV3, vfmacc_vv_d, OP_UUU_D, H8, H8, H8, fmacc64) +GEN_VEXT_VV_ENV(vfmacc_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfmacc_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfmacc_vv_d, 8, 8, clearq) + +#define OPFVF3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \ +static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \ + CPURISCVState *env) \ +{ \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + TD d = *((TD *)vd + HD(i)); \ + *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, d, &env->fp_status);\ +} + +RVVCALL(OPFVF3, vfmacc_vf_h, OP_UUU_H, H2, H2, fmacc16) +RVVCALL(OPFVF3, vfmacc_vf_w, OP_UUU_W, H4, H4, fmacc32) +RVVCALL(OPFVF3, vfmacc_vf_d, OP_UUU_D, H8, H8, fmacc64) +GEN_VEXT_VF(vfmacc_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfmacc_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfmacc_vf_d, 8, 8, clearq) + +static uint16_t fnmacc16(uint16_t a, uint16_t b, uint16_t d, float_status *s) +{ + return float16_muladd(a, b, d, + float_muladd_negate_c | float_muladd_negate_product, s); +} + +static uint32_t fnmacc32(uint32_t a, uint32_t b, uint32_t d, float_status *s) +{ + return float32_muladd(a, b, d, + float_muladd_negate_c | float_muladd_negate_product, s); +} + +static uint64_t fnmacc64(uint64_t a, uint64_t b, uint64_t d, float_status *s) +{ + return float64_muladd(a, b, d, + float_muladd_negate_c | float_muladd_negate_product, s); +} + +RVVCALL(OPFVV3, vfnmacc_vv_h, OP_UUU_H, H2, H2, H2, fnmacc16) +RVVCALL(OPFVV3, vfnmacc_vv_w, OP_UUU_W, H4, H4, H4, fnmacc32) +RVVCALL(OPFVV3, vfnmacc_vv_d, OP_UUU_D, H8, H8, H8, fnmacc64) +GEN_VEXT_VV_ENV(vfnmacc_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfnmacc_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfnmacc_vv_d, 8, 8, clearq) +RVVCALL(OPFVF3, vfnmacc_vf_h, OP_UUU_H, H2, H2, fnmacc16) +RVVCALL(OPFVF3, vfnmacc_vf_w, OP_UUU_W, H4, H4, fnmacc32) +RVVCALL(OPFVF3, vfnmacc_vf_d, OP_UUU_D, H8, H8, fnmacc64) +GEN_VEXT_VF(vfnmacc_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfnmacc_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfnmacc_vf_d, 8, 8, clearq) + +static uint16_t fmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s) +{ + return float16_muladd(a, b, d, float_muladd_negate_c, s); +} + +static uint32_t fmsac32(uint32_t a, uint32_t b, uint32_t d, float_status *s) +{ + return float32_muladd(a, b, d, float_muladd_negate_c, s); +} + +static uint64_t fmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s) +{ + return float64_muladd(a, b, d, float_muladd_negate_c, s); +} + +RVVCALL(OPFVV3, vfmsac_vv_h, OP_UUU_H, H2, H2, H2, fmsac16) +RVVCALL(OPFVV3, vfmsac_vv_w, OP_UUU_W, H4, H4, H4, fmsac32) +RVVCALL(OPFVV3, vfmsac_vv_d, OP_UUU_D, H8, H8, H8, fmsac64) +GEN_VEXT_VV_ENV(vfmsac_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfmsac_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfmsac_vv_d, 8, 8, clearq) +RVVCALL(OPFVF3, vfmsac_vf_h, OP_UUU_H, H2, H2, fmsac16) +RVVCALL(OPFVF3, vfmsac_vf_w, OP_UUU_W, H4, H4, fmsac32) +RVVCALL(OPFVF3, vfmsac_vf_d, OP_UUU_D, H8, H8, fmsac64) +GEN_VEXT_VF(vfmsac_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfmsac_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfmsac_vf_d, 8, 8, clearq) + +static uint16_t fnmsac16(uint16_t a, uint16_t b, uint16_t d, float_status *s) +{ + return float16_muladd(a, b, d, float_muladd_negate_product, s); +} + +static uint32_t fnmsac32(uint32_t a, uint32_t b, uint32_t d, float_status *s) +{ + return float32_muladd(a, b, d, float_muladd_negate_product, s); +} + +static uint64_t fnmsac64(uint64_t a, uint64_t b, uint64_t d, float_status *s) +{ + return float64_muladd(a, b, d, float_muladd_negate_product, s); +} + +RVVCALL(OPFVV3, vfnmsac_vv_h, OP_UUU_H, H2, H2, H2, fnmsac16) +RVVCALL(OPFVV3, vfnmsac_vv_w, OP_UUU_W, H4, H4, H4, fnmsac32) +RVVCALL(OPFVV3, vfnmsac_vv_d, OP_UUU_D, H8, H8, H8, fnmsac64) +GEN_VEXT_VV_ENV(vfnmsac_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfnmsac_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfnmsac_vv_d, 8, 8, clearq) +RVVCALL(OPFVF3, vfnmsac_vf_h, OP_UUU_H, H2, H2, fnmsac16) +RVVCALL(OPFVF3, vfnmsac_vf_w, OP_UUU_W, H4, H4, fnmsac32) +RVVCALL(OPFVF3, vfnmsac_vf_d, OP_UUU_D, H8, H8, fnmsac64) +GEN_VEXT_VF(vfnmsac_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfnmsac_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfnmsac_vf_d, 8, 8, clearq) + +static uint16_t fmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s) +{ + return float16_muladd(d, b, a, 0, s); +} + +static uint32_t fmadd32(uint32_t a, uint32_t b, uint32_t d, float_status *s) +{ + return float32_muladd(d, b, a, 0, s); +} + +static uint64_t fmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s) +{ + return float64_muladd(d, b, a, 0, s); +} + +RVVCALL(OPFVV3, vfmadd_vv_h, OP_UUU_H, H2, H2, H2, fmadd16) +RVVCALL(OPFVV3, vfmadd_vv_w, OP_UUU_W, H4, H4, H4, fmadd32) +RVVCALL(OPFVV3, vfmadd_vv_d, OP_UUU_D, H8, H8, H8, fmadd64) +GEN_VEXT_VV_ENV(vfmadd_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfmadd_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfmadd_vv_d, 8, 8, clearq) +RVVCALL(OPFVF3, vfmadd_vf_h, OP_UUU_H, H2, H2, fmadd16) +RVVCALL(OPFVF3, vfmadd_vf_w, OP_UUU_W, H4, H4, fmadd32) +RVVCALL(OPFVF3, vfmadd_vf_d, OP_UUU_D, H8, H8, fmadd64) +GEN_VEXT_VF(vfmadd_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfmadd_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfmadd_vf_d, 8, 8, clearq) + +static uint16_t fnmadd16(uint16_t a, uint16_t b, uint16_t d, float_status *s) +{ + return float16_muladd(d, b, a, + float_muladd_negate_c | float_muladd_negate_product, s); +} + +static uint32_t fnmadd32(uint32_t a, uint32_t b, uint32_t d, float_status *s) +{ + return float32_muladd(d, b, a, + float_muladd_negate_c | float_muladd_negate_product, s); +} + +static uint64_t fnmadd64(uint64_t a, uint64_t b, uint64_t d, float_status *s) +{ + return float64_muladd(d, b, a, + float_muladd_negate_c | float_muladd_negate_product, s); +} + +RVVCALL(OPFVV3, vfnmadd_vv_h, OP_UUU_H, H2, H2, H2, fnmadd16) +RVVCALL(OPFVV3, vfnmadd_vv_w, OP_UUU_W, H4, H4, H4, fnmadd32) +RVVCALL(OPFVV3, vfnmadd_vv_d, OP_UUU_D, H8, H8, H8, fnmadd64) +GEN_VEXT_VV_ENV(vfnmadd_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfnmadd_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfnmadd_vv_d, 8, 8, clearq) +RVVCALL(OPFVF3, vfnmadd_vf_h, OP_UUU_H, H2, H2, fnmadd16) +RVVCALL(OPFVF3, vfnmadd_vf_w, OP_UUU_W, H4, H4, fnmadd32) +RVVCALL(OPFVF3, vfnmadd_vf_d, OP_UUU_D, H8, H8, fnmadd64) +GEN_VEXT_VF(vfnmadd_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfnmadd_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfnmadd_vf_d, 8, 8, clearq) + +static uint16_t fmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s) +{ + return float16_muladd(d, b, a, float_muladd_negate_c, s); +} + +static uint32_t fmsub32(uint32_t a, uint32_t b, uint32_t d, float_status *s) +{ + return float32_muladd(d, b, a, float_muladd_negate_c, s); +} + +static uint64_t fmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s) +{ + return float64_muladd(d, b, a, float_muladd_negate_c, s); +} + +RVVCALL(OPFVV3, vfmsub_vv_h, OP_UUU_H, H2, H2, H2, fmsub16) +RVVCALL(OPFVV3, vfmsub_vv_w, OP_UUU_W, H4, H4, H4, fmsub32) +RVVCALL(OPFVV3, vfmsub_vv_d, OP_UUU_D, H8, H8, H8, fmsub64) +GEN_VEXT_VV_ENV(vfmsub_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfmsub_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfmsub_vv_d, 8, 8, clearq) +RVVCALL(OPFVF3, vfmsub_vf_h, OP_UUU_H, H2, H2, fmsub16) +RVVCALL(OPFVF3, vfmsub_vf_w, OP_UUU_W, H4, H4, fmsub32) +RVVCALL(OPFVF3, vfmsub_vf_d, OP_UUU_D, H8, H8, fmsub64) +GEN_VEXT_VF(vfmsub_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfmsub_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfmsub_vf_d, 8, 8, clearq) + +static uint16_t fnmsub16(uint16_t a, uint16_t b, uint16_t d, float_status *s) +{ + return float16_muladd(d, b, a, float_muladd_negate_product, s); +} + +static uint32_t fnmsub32(uint32_t a, uint32_t b, uint32_t d, float_status *s) +{ + return float32_muladd(d, b, a, float_muladd_negate_product, s); +} + +static uint64_t fnmsub64(uint64_t a, uint64_t b, uint64_t d, float_status *s) +{ + return float64_muladd(d, b, a, float_muladd_negate_product, s); +} + +RVVCALL(OPFVV3, vfnmsub_vv_h, OP_UUU_H, H2, H2, H2, fnmsub16) +RVVCALL(OPFVV3, vfnmsub_vv_w, OP_UUU_W, H4, H4, H4, fnmsub32) +RVVCALL(OPFVV3, vfnmsub_vv_d, OP_UUU_D, H8, H8, H8, fnmsub64) +GEN_VEXT_VV_ENV(vfnmsub_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfnmsub_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfnmsub_vv_d, 8, 8, clearq) +RVVCALL(OPFVF3, vfnmsub_vf_h, OP_UUU_H, H2, H2, fnmsub16) +RVVCALL(OPFVF3, vfnmsub_vf_w, OP_UUU_W, H4, H4, fnmsub32) +RVVCALL(OPFVF3, vfnmsub_vf_d, OP_UUU_D, H8, H8, fnmsub64) +GEN_VEXT_VF(vfnmsub_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfnmsub_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfnmsub_vf_d, 8, 8, clearq) + +/* Vector Widening Floating-Point Fused Multiply-Add Instructions */ +static uint32_t fwmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s) +{ + return float32_muladd(float16_to_float32(a, true, s), + float16_to_float32(b, true, s), d, 0, s); +} + +static uint64_t fwmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s) +{ + return float64_muladd(float32_to_float64(a, s), + float32_to_float64(b, s), d, 0, s); +} + +RVVCALL(OPFVV3, vfwmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwmacc16) +RVVCALL(OPFVV3, vfwmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwmacc32) +GEN_VEXT_VV_ENV(vfwmacc_vv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwmacc_vv_w, 4, 8, clearq) +RVVCALL(OPFVF3, vfwmacc_vf_h, WOP_UUU_H, H4, H2, fwmacc16) +RVVCALL(OPFVF3, vfwmacc_vf_w, WOP_UUU_W, H8, H4, fwmacc32) +GEN_VEXT_VF(vfwmacc_vf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwmacc_vf_w, 4, 8, clearq) + +static uint32_t fwnmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s) +{ + return float32_muladd(float16_to_float32(a, true, s), + float16_to_float32(b, true, s), d, + float_muladd_negate_c | float_muladd_negate_product, s); +} + +static uint64_t fwnmacc32(uint32_t a, uint32_t b, uint64_t d, float_status *s) +{ + return float64_muladd(float32_to_float64(a, s), + float32_to_float64(b, s), d, + float_muladd_negate_c | float_muladd_negate_product, s); +} + +RVVCALL(OPFVV3, vfwnmacc_vv_h, WOP_UUU_H, H4, H2, H2, fwnmacc16) +RVVCALL(OPFVV3, vfwnmacc_vv_w, WOP_UUU_W, H8, H4, H4, fwnmacc32) +GEN_VEXT_VV_ENV(vfwnmacc_vv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwnmacc_vv_w, 4, 8, clearq) +RVVCALL(OPFVF3, vfwnmacc_vf_h, WOP_UUU_H, H4, H2, fwnmacc16) +RVVCALL(OPFVF3, vfwnmacc_vf_w, WOP_UUU_W, H8, H4, fwnmacc32) +GEN_VEXT_VF(vfwnmacc_vf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwnmacc_vf_w, 4, 8, clearq) + +static uint32_t fwmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s) +{ + return float32_muladd(float16_to_float32(a, true, s), + float16_to_float32(b, true, s), d, + float_muladd_negate_c, s); +} + +static uint64_t fwmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s) +{ + return float64_muladd(float32_to_float64(a, s), + float32_to_float64(b, s), d, + float_muladd_negate_c, s); +} + +RVVCALL(OPFVV3, vfwmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwmsac16) +RVVCALL(OPFVV3, vfwmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwmsac32) +GEN_VEXT_VV_ENV(vfwmsac_vv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwmsac_vv_w, 4, 8, clearq) +RVVCALL(OPFVF3, vfwmsac_vf_h, WOP_UUU_H, H4, H2, fwmsac16) +RVVCALL(OPFVF3, vfwmsac_vf_w, WOP_UUU_W, H8, H4, fwmsac32) +GEN_VEXT_VF(vfwmsac_vf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwmsac_vf_w, 4, 8, clearq) + +static uint32_t fwnmsac16(uint16_t a, uint16_t b, uint32_t d, float_status *s) +{ + return float32_muladd(float16_to_float32(a, true, s), + float16_to_float32(b, true, s), d, + float_muladd_negate_product, s); +} + +static uint64_t fwnmsac32(uint32_t a, uint32_t b, uint64_t d, float_status *s) +{ + return float64_muladd(float32_to_float64(a, s), + float32_to_float64(b, s), d, + float_muladd_negate_product, s); +} + +RVVCALL(OPFVV3, vfwnmsac_vv_h, WOP_UUU_H, H4, H2, H2, fwnmsac16) +RVVCALL(OPFVV3, vfwnmsac_vv_w, WOP_UUU_W, H8, H4, H4, fwnmsac32) +GEN_VEXT_VV_ENV(vfwnmsac_vv_h, 2, 4, clearl) +GEN_VEXT_VV_ENV(vfwnmsac_vv_w, 4, 8, clearq) +RVVCALL(OPFVF3, vfwnmsac_vf_h, WOP_UUU_H, H4, H2, fwnmsac16) +RVVCALL(OPFVF3, vfwnmsac_vf_w, WOP_UUU_W, H8, H4, fwnmsac32) +GEN_VEXT_VF(vfwnmsac_vf_h, 2, 4, clearl) +GEN_VEXT_VF(vfwnmsac_vf_w, 4, 8, clearq) + +/* Vector Floating-Point Square-Root Instruction */ +/* (TD, T2, TX2) */ +#define OP_UU_H uint16_t, uint16_t, uint16_t +#define OP_UU_W uint32_t, uint32_t, uint32_t +#define OP_UU_D uint64_t, uint64_t, uint64_t + +#define OPFVV1(NAME, TD, T2, TX2, HD, HS2, OP) \ +static void do_##NAME(void *vd, void *vs2, int i, \ + CPURISCVState *env) \ +{ \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + *((TD *)vd + HD(i)) = OP(s2, &env->fp_status); \ +} + +#define GEN_VEXT_V_ENV(NAME, ESZ, DSZ, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t vlmax = vext_maxsz(desc) / ESZ; \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t i; \ + \ + if (vl == 0) { \ + return; \ + } \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + do_##NAME(vd, vs2, i, env); \ + } \ + CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \ +} + +RVVCALL(OPFVV1, vfsqrt_v_h, OP_UU_H, H2, H2, float16_sqrt) +RVVCALL(OPFVV1, vfsqrt_v_w, OP_UU_W, H4, H4, float32_sqrt) +RVVCALL(OPFVV1, vfsqrt_v_d, OP_UU_D, H8, H8, float64_sqrt) +GEN_VEXT_V_ENV(vfsqrt_v_h, 2, 2, clearh) +GEN_VEXT_V_ENV(vfsqrt_v_w, 4, 4, clearl) +GEN_VEXT_V_ENV(vfsqrt_v_d, 8, 8, clearq) + +/* Vector Floating-Point MIN/MAX Instructions */ +RVVCALL(OPFVV2, vfmin_vv_h, OP_UUU_H, H2, H2, H2, float16_minnum) +RVVCALL(OPFVV2, vfmin_vv_w, OP_UUU_W, H4, H4, H4, float32_minnum) +RVVCALL(OPFVV2, vfmin_vv_d, OP_UUU_D, H8, H8, H8, float64_minnum) +GEN_VEXT_VV_ENV(vfmin_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfmin_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfmin_vv_d, 8, 8, clearq) +RVVCALL(OPFVF2, vfmin_vf_h, OP_UUU_H, H2, H2, float16_minnum) +RVVCALL(OPFVF2, vfmin_vf_w, OP_UUU_W, H4, H4, float32_minnum) +RVVCALL(OPFVF2, vfmin_vf_d, OP_UUU_D, H8, H8, float64_minnum) +GEN_VEXT_VF(vfmin_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfmin_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfmin_vf_d, 8, 8, clearq) + +RVVCALL(OPFVV2, vfmax_vv_h, OP_UUU_H, H2, H2, H2, float16_maxnum) +RVVCALL(OPFVV2, vfmax_vv_w, OP_UUU_W, H4, H4, H4, float32_maxnum) +RVVCALL(OPFVV2, vfmax_vv_d, OP_UUU_D, H8, H8, H8, float64_maxnum) +GEN_VEXT_VV_ENV(vfmax_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfmax_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfmax_vv_d, 8, 8, clearq) +RVVCALL(OPFVF2, vfmax_vf_h, OP_UUU_H, H2, H2, float16_maxnum) +RVVCALL(OPFVF2, vfmax_vf_w, OP_UUU_W, H4, H4, float32_maxnum) +RVVCALL(OPFVF2, vfmax_vf_d, OP_UUU_D, H8, H8, float64_maxnum) +GEN_VEXT_VF(vfmax_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfmax_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfmax_vf_d, 8, 8, clearq) + +/* Vector Floating-Point Sign-Injection Instructions */ +static uint16_t fsgnj16(uint16_t a, uint16_t b, float_status *s) +{ + return deposit64(b, 0, 15, a); +} + +static uint32_t fsgnj32(uint32_t a, uint32_t b, float_status *s) +{ + return deposit64(b, 0, 31, a); +} + +static uint64_t fsgnj64(uint64_t a, uint64_t b, float_status *s) +{ + return deposit64(b, 0, 63, a); +} + +RVVCALL(OPFVV2, vfsgnj_vv_h, OP_UUU_H, H2, H2, H2, fsgnj16) +RVVCALL(OPFVV2, vfsgnj_vv_w, OP_UUU_W, H4, H4, H4, fsgnj32) +RVVCALL(OPFVV2, vfsgnj_vv_d, OP_UUU_D, H8, H8, H8, fsgnj64) +GEN_VEXT_VV_ENV(vfsgnj_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfsgnj_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfsgnj_vv_d, 8, 8, clearq) +RVVCALL(OPFVF2, vfsgnj_vf_h, OP_UUU_H, H2, H2, fsgnj16) +RVVCALL(OPFVF2, vfsgnj_vf_w, OP_UUU_W, H4, H4, fsgnj32) +RVVCALL(OPFVF2, vfsgnj_vf_d, OP_UUU_D, H8, H8, fsgnj64) +GEN_VEXT_VF(vfsgnj_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfsgnj_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfsgnj_vf_d, 8, 8, clearq) + +static uint16_t fsgnjn16(uint16_t a, uint16_t b, float_status *s) +{ + return deposit64(~b, 0, 15, a); +} + +static uint32_t fsgnjn32(uint32_t a, uint32_t b, float_status *s) +{ + return deposit64(~b, 0, 31, a); +} + +static uint64_t fsgnjn64(uint64_t a, uint64_t b, float_status *s) +{ + return deposit64(~b, 0, 63, a); +} + +RVVCALL(OPFVV2, vfsgnjn_vv_h, OP_UUU_H, H2, H2, H2, fsgnjn16) +RVVCALL(OPFVV2, vfsgnjn_vv_w, OP_UUU_W, H4, H4, H4, fsgnjn32) +RVVCALL(OPFVV2, vfsgnjn_vv_d, OP_UUU_D, H8, H8, H8, fsgnjn64) +GEN_VEXT_VV_ENV(vfsgnjn_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfsgnjn_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfsgnjn_vv_d, 8, 8, clearq) +RVVCALL(OPFVF2, vfsgnjn_vf_h, OP_UUU_H, H2, H2, fsgnjn16) +RVVCALL(OPFVF2, vfsgnjn_vf_w, OP_UUU_W, H4, H4, fsgnjn32) +RVVCALL(OPFVF2, vfsgnjn_vf_d, OP_UUU_D, H8, H8, fsgnjn64) +GEN_VEXT_VF(vfsgnjn_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfsgnjn_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfsgnjn_vf_d, 8, 8, clearq) + +static uint16_t fsgnjx16(uint16_t a, uint16_t b, float_status *s) +{ + return deposit64(b ^ a, 0, 15, a); +} + +static uint32_t fsgnjx32(uint32_t a, uint32_t b, float_status *s) +{ + return deposit64(b ^ a, 0, 31, a); +} + +static uint64_t fsgnjx64(uint64_t a, uint64_t b, float_status *s) +{ + return deposit64(b ^ a, 0, 63, a); +} + +RVVCALL(OPFVV2, vfsgnjx_vv_h, OP_UUU_H, H2, H2, H2, fsgnjx16) +RVVCALL(OPFVV2, vfsgnjx_vv_w, OP_UUU_W, H4, H4, H4, fsgnjx32) +RVVCALL(OPFVV2, vfsgnjx_vv_d, OP_UUU_D, H8, H8, H8, fsgnjx64) +GEN_VEXT_VV_ENV(vfsgnjx_vv_h, 2, 2, clearh) +GEN_VEXT_VV_ENV(vfsgnjx_vv_w, 4, 4, clearl) +GEN_VEXT_VV_ENV(vfsgnjx_vv_d, 8, 8, clearq) +RVVCALL(OPFVF2, vfsgnjx_vf_h, OP_UUU_H, H2, H2, fsgnjx16) +RVVCALL(OPFVF2, vfsgnjx_vf_w, OP_UUU_W, H4, H4, fsgnjx32) +RVVCALL(OPFVF2, vfsgnjx_vf_d, OP_UUU_D, H8, H8, fsgnjx64) +GEN_VEXT_VF(vfsgnjx_vf_h, 2, 2, clearh) +GEN_VEXT_VF(vfsgnjx_vf_w, 4, 4, clearl) +GEN_VEXT_VF(vfsgnjx_vf_d, 8, 8, clearq) + +/* Vector Floating-Point Compare Instructions */ +#define GEN_VEXT_CMP_VV_ENV(NAME, ETYPE, H, DO_OP) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + ETYPE s1 = *((ETYPE *)vs1 + H(i)); \ + ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + vext_set_elem_mask(vd, mlen, i, \ + DO_OP(s2, s1, &env->fp_status)); \ + } \ + for (; i < vlmax; i++) { \ + vext_set_elem_mask(vd, mlen, i, 0); \ + } \ +} + +static bool float16_eq_quiet(uint16_t a, uint16_t b, float_status *s) +{ + FloatRelation compare = float16_compare_quiet(a, b, s); + return compare == float_relation_equal; +} + +GEN_VEXT_CMP_VV_ENV(vmfeq_vv_h, uint16_t, H2, float16_eq_quiet) +GEN_VEXT_CMP_VV_ENV(vmfeq_vv_w, uint32_t, H4, float32_eq_quiet) +GEN_VEXT_CMP_VV_ENV(vmfeq_vv_d, uint64_t, H8, float64_eq_quiet) + +#define GEN_VEXT_CMP_VF(NAME, ETYPE, H, DO_OP) \ +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t vlmax = vext_maxsz(desc) / sizeof(ETYPE); \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + vext_set_elem_mask(vd, mlen, i, \ + DO_OP(s2, (ETYPE)s1, &env->fp_status)); \ + } \ + for (; i < vlmax; i++) { \ + vext_set_elem_mask(vd, mlen, i, 0); \ + } \ +} + +GEN_VEXT_CMP_VF(vmfeq_vf_h, uint16_t, H2, float16_eq_quiet) +GEN_VEXT_CMP_VF(vmfeq_vf_w, uint32_t, H4, float32_eq_quiet) +GEN_VEXT_CMP_VF(vmfeq_vf_d, uint64_t, H8, float64_eq_quiet) + +static bool vmfne16(uint16_t a, uint16_t b, float_status *s) +{ + FloatRelation compare = float16_compare_quiet(a, b, s); + return compare != float_relation_equal; +} + +static bool vmfne32(uint32_t a, uint32_t b, float_status *s) +{ + FloatRelation compare = float32_compare_quiet(a, b, s); + return compare != float_relation_equal; +} + +static bool vmfne64(uint64_t a, uint64_t b, float_status *s) +{ + FloatRelation compare = float64_compare_quiet(a, b, s); + return compare != float_relation_equal; +} + +GEN_VEXT_CMP_VV_ENV(vmfne_vv_h, uint16_t, H2, vmfne16) +GEN_VEXT_CMP_VV_ENV(vmfne_vv_w, uint32_t, H4, vmfne32) +GEN_VEXT_CMP_VV_ENV(vmfne_vv_d, uint64_t, H8, vmfne64) +GEN_VEXT_CMP_VF(vmfne_vf_h, uint16_t, H2, vmfne16) +GEN_VEXT_CMP_VF(vmfne_vf_w, uint32_t, H4, vmfne32) +GEN_VEXT_CMP_VF(vmfne_vf_d, uint64_t, H8, vmfne64) + +static bool float16_lt(uint16_t a, uint16_t b, float_status *s) +{ + FloatRelation compare = float16_compare(a, b, s); + return compare == float_relation_less; +} + +GEN_VEXT_CMP_VV_ENV(vmflt_vv_h, uint16_t, H2, float16_lt) +GEN_VEXT_CMP_VV_ENV(vmflt_vv_w, uint32_t, H4, float32_lt) +GEN_VEXT_CMP_VV_ENV(vmflt_vv_d, uint64_t, H8, float64_lt) +GEN_VEXT_CMP_VF(vmflt_vf_h, uint16_t, H2, float16_lt) +GEN_VEXT_CMP_VF(vmflt_vf_w, uint32_t, H4, float32_lt) +GEN_VEXT_CMP_VF(vmflt_vf_d, uint64_t, H8, float64_lt) + +static bool float16_le(uint16_t a, uint16_t b, float_status *s) +{ + FloatRelation compare = float16_compare(a, b, s); + return compare == float_relation_less || + compare == float_relation_equal; +} + +GEN_VEXT_CMP_VV_ENV(vmfle_vv_h, uint16_t, H2, float16_le) +GEN_VEXT_CMP_VV_ENV(vmfle_vv_w, uint32_t, H4, float32_le) +GEN_VEXT_CMP_VV_ENV(vmfle_vv_d, uint64_t, H8, float64_le) +GEN_VEXT_CMP_VF(vmfle_vf_h, uint16_t, H2, float16_le) +GEN_VEXT_CMP_VF(vmfle_vf_w, uint32_t, H4, float32_le) +GEN_VEXT_CMP_VF(vmfle_vf_d, uint64_t, H8, float64_le) + +static bool vmfgt16(uint16_t a, uint16_t b, float_status *s) +{ + FloatRelation compare = float16_compare(a, b, s); + return compare == float_relation_greater; +} + +static bool vmfgt32(uint32_t a, uint32_t b, float_status *s) +{ + FloatRelation compare = float32_compare(a, b, s); + return compare == float_relation_greater; +} + +static bool vmfgt64(uint64_t a, uint64_t b, float_status *s) +{ + FloatRelation compare = float64_compare(a, b, s); + return compare == float_relation_greater; +} + +GEN_VEXT_CMP_VF(vmfgt_vf_h, uint16_t, H2, vmfgt16) +GEN_VEXT_CMP_VF(vmfgt_vf_w, uint32_t, H4, vmfgt32) +GEN_VEXT_CMP_VF(vmfgt_vf_d, uint64_t, H8, vmfgt64) + +static bool vmfge16(uint16_t a, uint16_t b, float_status *s) +{ + FloatRelation compare = float16_compare(a, b, s); + return compare == float_relation_greater || + compare == float_relation_equal; +} + +static bool vmfge32(uint32_t a, uint32_t b, float_status *s) +{ + FloatRelation compare = float32_compare(a, b, s); + return compare == float_relation_greater || + compare == float_relation_equal; +} + +static bool vmfge64(uint64_t a, uint64_t b, float_status *s) +{ + FloatRelation compare = float64_compare(a, b, s); + return compare == float_relation_greater || + compare == float_relation_equal; +} + +GEN_VEXT_CMP_VF(vmfge_vf_h, uint16_t, H2, vmfge16) +GEN_VEXT_CMP_VF(vmfge_vf_w, uint32_t, H4, vmfge32) +GEN_VEXT_CMP_VF(vmfge_vf_d, uint64_t, H8, vmfge64) + +static bool float16_unordered_quiet(uint16_t a, uint16_t b, float_status *s) +{ + FloatRelation compare = float16_compare_quiet(a, b, s); + return compare == float_relation_unordered; +} + +GEN_VEXT_CMP_VV_ENV(vmford_vv_h, uint16_t, H2, !float16_unordered_quiet) +GEN_VEXT_CMP_VV_ENV(vmford_vv_w, uint32_t, H4, !float32_unordered_quiet) +GEN_VEXT_CMP_VV_ENV(vmford_vv_d, uint64_t, H8, !float64_unordered_quiet) +GEN_VEXT_CMP_VF(vmford_vf_h, uint16_t, H2, !float16_unordered_quiet) +GEN_VEXT_CMP_VF(vmford_vf_w, uint32_t, H4, !float32_unordered_quiet) +GEN_VEXT_CMP_VF(vmford_vf_d, uint64_t, H8, !float64_unordered_quiet) + +/* Vector Floating-Point Classify Instruction */ +#define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \ +static void do_##NAME(void *vd, void *vs2, int i) \ +{ \ + TX2 s2 = *((T2 *)vs2 + HS2(i)); \ + *((TD *)vd + HD(i)) = OP(s2); \ +} + +#define GEN_VEXT_V(NAME, ESZ, DSZ, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t vlmax = vext_maxsz(desc) / ESZ; \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + do_##NAME(vd, vs2, i); \ + } \ + CLEAR_FN(vd, vl, vl * DSZ, vlmax * DSZ); \ +} + +target_ulong fclass_h(uint64_t frs1) +{ + float16 f = frs1; + bool sign = float16_is_neg(f); + + if (float16_is_infinity(f)) { + return sign ? 1 << 0 : 1 << 7; + } else if (float16_is_zero(f)) { + return sign ? 1 << 3 : 1 << 4; + } else if (float16_is_zero_or_denormal(f)) { + return sign ? 1 << 2 : 1 << 5; + } else if (float16_is_any_nan(f)) { + float_status s = { }; /* for snan_bit_is_one */ + return float16_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8; + } else { + return sign ? 1 << 1 : 1 << 6; + } +} + +target_ulong fclass_s(uint64_t frs1) +{ + float32 f = frs1; + bool sign = float32_is_neg(f); + + if (float32_is_infinity(f)) { + return sign ? 1 << 0 : 1 << 7; + } else if (float32_is_zero(f)) { + return sign ? 1 << 3 : 1 << 4; + } else if (float32_is_zero_or_denormal(f)) { + return sign ? 1 << 2 : 1 << 5; + } else if (float32_is_any_nan(f)) { + float_status s = { }; /* for snan_bit_is_one */ + return float32_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8; + } else { + return sign ? 1 << 1 : 1 << 6; + } +} + +target_ulong fclass_d(uint64_t frs1) +{ + float64 f = frs1; + bool sign = float64_is_neg(f); + + if (float64_is_infinity(f)) { + return sign ? 1 << 0 : 1 << 7; + } else if (float64_is_zero(f)) { + return sign ? 1 << 3 : 1 << 4; + } else if (float64_is_zero_or_denormal(f)) { + return sign ? 1 << 2 : 1 << 5; + } else if (float64_is_any_nan(f)) { + float_status s = { }; /* for snan_bit_is_one */ + return float64_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8; + } else { + return sign ? 1 << 1 : 1 << 6; + } +} + +RVVCALL(OPIVV1, vfclass_v_h, OP_UU_H, H2, H2, fclass_h) +RVVCALL(OPIVV1, vfclass_v_w, OP_UU_W, H4, H4, fclass_s) +RVVCALL(OPIVV1, vfclass_v_d, OP_UU_D, H8, H8, fclass_d) +GEN_VEXT_V(vfclass_v_h, 2, 2, clearh) +GEN_VEXT_V(vfclass_v_w, 4, 4, clearl) +GEN_VEXT_V(vfclass_v_d, 8, 8, clearq) + +/* Vector Floating-Point Merge Instruction */ +#define GEN_VFMERGE_VF(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t esz = sizeof(ETYPE); \ + uint32_t vlmax = vext_maxsz(desc) / esz; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + ETYPE s2 = *((ETYPE *)vs2 + H(i)); \ + *((ETYPE *)vd + H(i)) \ + = (!vm && !vext_elem_mask(v0, mlen, i) ? s2 : s1); \ + } \ + CLEAR_FN(vd, vl, vl * esz, vlmax * esz); \ +} + +GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2, clearh) +GEN_VFMERGE_VF(vfmerge_vfm_w, int32_t, H4, clearl) +GEN_VFMERGE_VF(vfmerge_vfm_d, int64_t, H8, clearq) + +/* Single-Width Floating-Point/Integer Type-Convert Instructions */ +/* vfcvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */ +RVVCALL(OPFVV1, vfcvt_xu_f_v_h, OP_UU_H, H2, H2, float16_to_uint16) +RVVCALL(OPFVV1, vfcvt_xu_f_v_w, OP_UU_W, H4, H4, float32_to_uint32) +RVVCALL(OPFVV1, vfcvt_xu_f_v_d, OP_UU_D, H8, H8, float64_to_uint64) +GEN_VEXT_V_ENV(vfcvt_xu_f_v_h, 2, 2, clearh) +GEN_VEXT_V_ENV(vfcvt_xu_f_v_w, 4, 4, clearl) +GEN_VEXT_V_ENV(vfcvt_xu_f_v_d, 8, 8, clearq) + +/* vfcvt.x.f.v vd, vs2, vm # Convert float to signed integer. */ +RVVCALL(OPFVV1, vfcvt_x_f_v_h, OP_UU_H, H2, H2, float16_to_int16) +RVVCALL(OPFVV1, vfcvt_x_f_v_w, OP_UU_W, H4, H4, float32_to_int32) +RVVCALL(OPFVV1, vfcvt_x_f_v_d, OP_UU_D, H8, H8, float64_to_int64) +GEN_VEXT_V_ENV(vfcvt_x_f_v_h, 2, 2, clearh) +GEN_VEXT_V_ENV(vfcvt_x_f_v_w, 4, 4, clearl) +GEN_VEXT_V_ENV(vfcvt_x_f_v_d, 8, 8, clearq) + +/* vfcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to float. */ +RVVCALL(OPFVV1, vfcvt_f_xu_v_h, OP_UU_H, H2, H2, uint16_to_float16) +RVVCALL(OPFVV1, vfcvt_f_xu_v_w, OP_UU_W, H4, H4, uint32_to_float32) +RVVCALL(OPFVV1, vfcvt_f_xu_v_d, OP_UU_D, H8, H8, uint64_to_float64) +GEN_VEXT_V_ENV(vfcvt_f_xu_v_h, 2, 2, clearh) +GEN_VEXT_V_ENV(vfcvt_f_xu_v_w, 4, 4, clearl) +GEN_VEXT_V_ENV(vfcvt_f_xu_v_d, 8, 8, clearq) + +/* vfcvt.f.x.v vd, vs2, vm # Convert integer to float. */ +RVVCALL(OPFVV1, vfcvt_f_x_v_h, OP_UU_H, H2, H2, int16_to_float16) +RVVCALL(OPFVV1, vfcvt_f_x_v_w, OP_UU_W, H4, H4, int32_to_float32) +RVVCALL(OPFVV1, vfcvt_f_x_v_d, OP_UU_D, H8, H8, int64_to_float64) +GEN_VEXT_V_ENV(vfcvt_f_x_v_h, 2, 2, clearh) +GEN_VEXT_V_ENV(vfcvt_f_x_v_w, 4, 4, clearl) +GEN_VEXT_V_ENV(vfcvt_f_x_v_d, 8, 8, clearq) + +/* Widening Floating-Point/Integer Type-Convert Instructions */ +/* (TD, T2, TX2) */ +#define WOP_UU_H uint32_t, uint16_t, uint16_t +#define WOP_UU_W uint64_t, uint32_t, uint32_t +/* vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer.*/ +RVVCALL(OPFVV1, vfwcvt_xu_f_v_h, WOP_UU_H, H4, H2, float16_to_uint32) +RVVCALL(OPFVV1, vfwcvt_xu_f_v_w, WOP_UU_W, H8, H4, float32_to_uint64) +GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h, 2, 4, clearl) +GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w, 4, 8, clearq) + +/* vfwcvt.x.f.v vd, vs2, vm # Convert float to double-width signed integer. */ +RVVCALL(OPFVV1, vfwcvt_x_f_v_h, WOP_UU_H, H4, H2, float16_to_int32) +RVVCALL(OPFVV1, vfwcvt_x_f_v_w, WOP_UU_W, H8, H4, float32_to_int64) +GEN_VEXT_V_ENV(vfwcvt_x_f_v_h, 2, 4, clearl) +GEN_VEXT_V_ENV(vfwcvt_x_f_v_w, 4, 8, clearq) + +/* vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width float */ +RVVCALL(OPFVV1, vfwcvt_f_xu_v_h, WOP_UU_H, H4, H2, uint16_to_float32) +RVVCALL(OPFVV1, vfwcvt_f_xu_v_w, WOP_UU_W, H8, H4, uint32_to_float64) +GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h, 2, 4, clearl) +GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w, 4, 8, clearq) + +/* vfwcvt.f.x.v vd, vs2, vm # Convert integer to double-width float. */ +RVVCALL(OPFVV1, vfwcvt_f_x_v_h, WOP_UU_H, H4, H2, int16_to_float32) +RVVCALL(OPFVV1, vfwcvt_f_x_v_w, WOP_UU_W, H8, H4, int32_to_float64) +GEN_VEXT_V_ENV(vfwcvt_f_x_v_h, 2, 4, clearl) +GEN_VEXT_V_ENV(vfwcvt_f_x_v_w, 4, 8, clearq) + +/* + * vfwcvt.f.f.v vd, vs2, vm # + * Convert single-width float to double-width float. + */ +static uint32_t vfwcvtffv16(uint16_t a, float_status *s) +{ + return float16_to_float32(a, true, s); +} + +RVVCALL(OPFVV1, vfwcvt_f_f_v_h, WOP_UU_H, H4, H2, vfwcvtffv16) +RVVCALL(OPFVV1, vfwcvt_f_f_v_w, WOP_UU_W, H8, H4, float32_to_float64) +GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 2, 4, clearl) +GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 4, 8, clearq) + +/* Narrowing Floating-Point/Integer Type-Convert Instructions */ +/* (TD, T2, TX2) */ +#define NOP_UU_H uint16_t, uint32_t, uint32_t +#define NOP_UU_W uint32_t, uint64_t, uint64_t +/* vfncvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */ +RVVCALL(OPFVV1, vfncvt_xu_f_v_h, NOP_UU_H, H2, H4, float32_to_uint16) +RVVCALL(OPFVV1, vfncvt_xu_f_v_w, NOP_UU_W, H4, H8, float64_to_uint32) +GEN_VEXT_V_ENV(vfncvt_xu_f_v_h, 2, 2, clearh) +GEN_VEXT_V_ENV(vfncvt_xu_f_v_w, 4, 4, clearl) + +/* vfncvt.x.f.v vd, vs2, vm # Convert double-width float to signed integer. */ +RVVCALL(OPFVV1, vfncvt_x_f_v_h, NOP_UU_H, H2, H4, float32_to_int16) +RVVCALL(OPFVV1, vfncvt_x_f_v_w, NOP_UU_W, H4, H8, float64_to_int32) +GEN_VEXT_V_ENV(vfncvt_x_f_v_h, 2, 2, clearh) +GEN_VEXT_V_ENV(vfncvt_x_f_v_w, 4, 4, clearl) + +/* vfncvt.f.xu.v vd, vs2, vm # Convert double-width unsigned integer to float */ +RVVCALL(OPFVV1, vfncvt_f_xu_v_h, NOP_UU_H, H2, H4, uint32_to_float16) +RVVCALL(OPFVV1, vfncvt_f_xu_v_w, NOP_UU_W, H4, H8, uint64_to_float32) +GEN_VEXT_V_ENV(vfncvt_f_xu_v_h, 2, 2, clearh) +GEN_VEXT_V_ENV(vfncvt_f_xu_v_w, 4, 4, clearl) + +/* vfncvt.f.x.v vd, vs2, vm # Convert double-width integer to float. */ +RVVCALL(OPFVV1, vfncvt_f_x_v_h, NOP_UU_H, H2, H4, int32_to_float16) +RVVCALL(OPFVV1, vfncvt_f_x_v_w, NOP_UU_W, H4, H8, int64_to_float32) +GEN_VEXT_V_ENV(vfncvt_f_x_v_h, 2, 2, clearh) +GEN_VEXT_V_ENV(vfncvt_f_x_v_w, 4, 4, clearl) + +/* vfncvt.f.f.v vd, vs2, vm # Convert double float to single-width float. */ +static uint16_t vfncvtffv16(uint32_t a, float_status *s) +{ + return float32_to_float16(a, true, s); +} + +RVVCALL(OPFVV1, vfncvt_f_f_v_h, NOP_UU_H, H2, H4, vfncvtffv16) +RVVCALL(OPFVV1, vfncvt_f_f_v_w, NOP_UU_W, H4, H8, float64_to_float32) +GEN_VEXT_V_ENV(vfncvt_f_f_v_h, 2, 2, clearh) +GEN_VEXT_V_ENV(vfncvt_f_f_v_w, 4, 4, clearl) + +/* + *** Vector Reduction Operations + */ +/* Vector Single-Width Integer Reduction Instructions */ +#define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\ +void HELPER(NAME)(void *vd, void *v0, void *vs1, \ + void *vs2, CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t i; \ + uint32_t tot = env_archcpu(env)->cfg.vlen / 8; \ + TD s1 = *((TD *)vs1 + HD(0)); \ + \ + for (i = 0; i < vl; i++) { \ + TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + s1 = OP(s1, (TD)s2); \ + } \ + *((TD *)vd + HD(0)) = s1; \ + CLEAR_FN(vd, 1, sizeof(TD), tot); \ +} + +/* vd[0] = sum(vs1[0], vs2[*]) */ +GEN_VEXT_RED(vredsum_vs_b, int8_t, int8_t, H1, H1, DO_ADD, clearb) +GEN_VEXT_RED(vredsum_vs_h, int16_t, int16_t, H2, H2, DO_ADD, clearh) +GEN_VEXT_RED(vredsum_vs_w, int32_t, int32_t, H4, H4, DO_ADD, clearl) +GEN_VEXT_RED(vredsum_vs_d, int64_t, int64_t, H8, H8, DO_ADD, clearq) + +/* vd[0] = maxu(vs1[0], vs2[*]) */ +GEN_VEXT_RED(vredmaxu_vs_b, uint8_t, uint8_t, H1, H1, DO_MAX, clearb) +GEN_VEXT_RED(vredmaxu_vs_h, uint16_t, uint16_t, H2, H2, DO_MAX, clearh) +GEN_VEXT_RED(vredmaxu_vs_w, uint32_t, uint32_t, H4, H4, DO_MAX, clearl) +GEN_VEXT_RED(vredmaxu_vs_d, uint64_t, uint64_t, H8, H8, DO_MAX, clearq) + +/* vd[0] = max(vs1[0], vs2[*]) */ +GEN_VEXT_RED(vredmax_vs_b, int8_t, int8_t, H1, H1, DO_MAX, clearb) +GEN_VEXT_RED(vredmax_vs_h, int16_t, int16_t, H2, H2, DO_MAX, clearh) +GEN_VEXT_RED(vredmax_vs_w, int32_t, int32_t, H4, H4, DO_MAX, clearl) +GEN_VEXT_RED(vredmax_vs_d, int64_t, int64_t, H8, H8, DO_MAX, clearq) + +/* vd[0] = minu(vs1[0], vs2[*]) */ +GEN_VEXT_RED(vredminu_vs_b, uint8_t, uint8_t, H1, H1, DO_MIN, clearb) +GEN_VEXT_RED(vredminu_vs_h, uint16_t, uint16_t, H2, H2, DO_MIN, clearh) +GEN_VEXT_RED(vredminu_vs_w, uint32_t, uint32_t, H4, H4, DO_MIN, clearl) +GEN_VEXT_RED(vredminu_vs_d, uint64_t, uint64_t, H8, H8, DO_MIN, clearq) + +/* vd[0] = min(vs1[0], vs2[*]) */ +GEN_VEXT_RED(vredmin_vs_b, int8_t, int8_t, H1, H1, DO_MIN, clearb) +GEN_VEXT_RED(vredmin_vs_h, int16_t, int16_t, H2, H2, DO_MIN, clearh) +GEN_VEXT_RED(vredmin_vs_w, int32_t, int32_t, H4, H4, DO_MIN, clearl) +GEN_VEXT_RED(vredmin_vs_d, int64_t, int64_t, H8, H8, DO_MIN, clearq) + +/* vd[0] = and(vs1[0], vs2[*]) */ +GEN_VEXT_RED(vredand_vs_b, int8_t, int8_t, H1, H1, DO_AND, clearb) +GEN_VEXT_RED(vredand_vs_h, int16_t, int16_t, H2, H2, DO_AND, clearh) +GEN_VEXT_RED(vredand_vs_w, int32_t, int32_t, H4, H4, DO_AND, clearl) +GEN_VEXT_RED(vredand_vs_d, int64_t, int64_t, H8, H8, DO_AND, clearq) + +/* vd[0] = or(vs1[0], vs2[*]) */ +GEN_VEXT_RED(vredor_vs_b, int8_t, int8_t, H1, H1, DO_OR, clearb) +GEN_VEXT_RED(vredor_vs_h, int16_t, int16_t, H2, H2, DO_OR, clearh) +GEN_VEXT_RED(vredor_vs_w, int32_t, int32_t, H4, H4, DO_OR, clearl) +GEN_VEXT_RED(vredor_vs_d, int64_t, int64_t, H8, H8, DO_OR, clearq) + +/* vd[0] = xor(vs1[0], vs2[*]) */ +GEN_VEXT_RED(vredxor_vs_b, int8_t, int8_t, H1, H1, DO_XOR, clearb) +GEN_VEXT_RED(vredxor_vs_h, int16_t, int16_t, H2, H2, DO_XOR, clearh) +GEN_VEXT_RED(vredxor_vs_w, int32_t, int32_t, H4, H4, DO_XOR, clearl) +GEN_VEXT_RED(vredxor_vs_d, int64_t, int64_t, H8, H8, DO_XOR, clearq) + +/* Vector Widening Integer Reduction Instructions */ +/* signed sum reduction into double-width accumulator */ +GEN_VEXT_RED(vwredsum_vs_b, int16_t, int8_t, H2, H1, DO_ADD, clearh) +GEN_VEXT_RED(vwredsum_vs_h, int32_t, int16_t, H4, H2, DO_ADD, clearl) +GEN_VEXT_RED(vwredsum_vs_w, int64_t, int32_t, H8, H4, DO_ADD, clearq) + +/* Unsigned sum reduction into double-width accumulator */ +GEN_VEXT_RED(vwredsumu_vs_b, uint16_t, uint8_t, H2, H1, DO_ADD, clearh) +GEN_VEXT_RED(vwredsumu_vs_h, uint32_t, uint16_t, H4, H2, DO_ADD, clearl) +GEN_VEXT_RED(vwredsumu_vs_w, uint64_t, uint32_t, H8, H4, DO_ADD, clearq) + +/* Vector Single-Width Floating-Point Reduction Instructions */ +#define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, OP, CLEAR_FN)\ +void HELPER(NAME)(void *vd, void *v0, void *vs1, \ + void *vs2, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t i; \ + uint32_t tot = env_archcpu(env)->cfg.vlen / 8; \ + TD s1 = *((TD *)vs1 + HD(0)); \ + \ + for (i = 0; i < vl; i++) { \ + TS2 s2 = *((TS2 *)vs2 + HS2(i)); \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + s1 = OP(s1, (TD)s2, &env->fp_status); \ + } \ + *((TD *)vd + HD(0)) = s1; \ + CLEAR_FN(vd, 1, sizeof(TD), tot); \ +} + +/* Unordered sum */ +GEN_VEXT_FRED(vfredsum_vs_h, uint16_t, uint16_t, H2, H2, float16_add, clearh) +GEN_VEXT_FRED(vfredsum_vs_w, uint32_t, uint32_t, H4, H4, float32_add, clearl) +GEN_VEXT_FRED(vfredsum_vs_d, uint64_t, uint64_t, H8, H8, float64_add, clearq) + +/* Maximum value */ +GEN_VEXT_FRED(vfredmax_vs_h, uint16_t, uint16_t, H2, H2, float16_maxnum, clearh) +GEN_VEXT_FRED(vfredmax_vs_w, uint32_t, uint32_t, H4, H4, float32_maxnum, clearl) +GEN_VEXT_FRED(vfredmax_vs_d, uint64_t, uint64_t, H8, H8, float64_maxnum, clearq) + +/* Minimum value */ +GEN_VEXT_FRED(vfredmin_vs_h, uint16_t, uint16_t, H2, H2, float16_minnum, clearh) +GEN_VEXT_FRED(vfredmin_vs_w, uint32_t, uint32_t, H4, H4, float32_minnum, clearl) +GEN_VEXT_FRED(vfredmin_vs_d, uint64_t, uint64_t, H8, H8, float64_minnum, clearq) + +/* Vector Widening Floating-Point Reduction Instructions */ +/* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */ +void HELPER(vfwredsum_vs_h)(void *vd, void *v0, void *vs1, + void *vs2, CPURISCVState *env, uint32_t desc) +{ + uint32_t mlen = vext_mlen(desc); + uint32_t vm = vext_vm(desc); + uint32_t vl = env->vl; + uint32_t i; + uint32_t tot = env_archcpu(env)->cfg.vlen / 8; + uint32_t s1 = *((uint32_t *)vs1 + H4(0)); + + for (i = 0; i < vl; i++) { + uint16_t s2 = *((uint16_t *)vs2 + H2(i)); + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + s1 = float32_add(s1, float16_to_float32(s2, true, &env->fp_status), + &env->fp_status); + } + *((uint32_t *)vd + H4(0)) = s1; + clearl(vd, 1, sizeof(uint32_t), tot); +} + +void HELPER(vfwredsum_vs_w)(void *vd, void *v0, void *vs1, + void *vs2, CPURISCVState *env, uint32_t desc) +{ + uint32_t mlen = vext_mlen(desc); + uint32_t vm = vext_vm(desc); + uint32_t vl = env->vl; + uint32_t i; + uint32_t tot = env_archcpu(env)->cfg.vlen / 8; + uint64_t s1 = *((uint64_t *)vs1); + + for (i = 0; i < vl; i++) { + uint32_t s2 = *((uint32_t *)vs2 + H4(i)); + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + s1 = float64_add(s1, float32_to_float64(s2, &env->fp_status), + &env->fp_status); + } + *((uint64_t *)vd) = s1; + clearq(vd, 1, sizeof(uint64_t), tot); +} + +/* + *** Vector Mask Operations + */ +/* Vector Mask-Register Logical Instructions */ +#define GEN_VEXT_MASK_VV(NAME, OP) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, \ + void *vs2, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vl = env->vl; \ + uint32_t i; \ + int a, b; \ + \ + for (i = 0; i < vl; i++) { \ + a = vext_elem_mask(vs1, mlen, i); \ + b = vext_elem_mask(vs2, mlen, i); \ + vext_set_elem_mask(vd, mlen, i, OP(b, a)); \ + } \ + for (; i < vlmax; i++) { \ + vext_set_elem_mask(vd, mlen, i, 0); \ + } \ +} + +#define DO_NAND(N, M) (!(N & M)) +#define DO_ANDNOT(N, M) (N & !M) +#define DO_NOR(N, M) (!(N | M)) +#define DO_ORNOT(N, M) (N | !M) +#define DO_XNOR(N, M) (!(N ^ M)) + +GEN_VEXT_MASK_VV(vmand_mm, DO_AND) +GEN_VEXT_MASK_VV(vmnand_mm, DO_NAND) +GEN_VEXT_MASK_VV(vmandnot_mm, DO_ANDNOT) +GEN_VEXT_MASK_VV(vmxor_mm, DO_XOR) +GEN_VEXT_MASK_VV(vmor_mm, DO_OR) +GEN_VEXT_MASK_VV(vmnor_mm, DO_NOR) +GEN_VEXT_MASK_VV(vmornot_mm, DO_ORNOT) +GEN_VEXT_MASK_VV(vmxnor_mm, DO_XNOR) + +/* Vector mask population count vmpopc */ +target_ulong HELPER(vmpopc_m)(void *v0, void *vs2, CPURISCVState *env, + uint32_t desc) +{ + target_ulong cnt = 0; + uint32_t mlen = vext_mlen(desc); + uint32_t vm = vext_vm(desc); + uint32_t vl = env->vl; + int i; + + for (i = 0; i < vl; i++) { + if (vm || vext_elem_mask(v0, mlen, i)) { + if (vext_elem_mask(vs2, mlen, i)) { + cnt++; + } + } + } + return cnt; +} + +/* vmfirst find-first-set mask bit*/ +target_ulong HELPER(vmfirst_m)(void *v0, void *vs2, CPURISCVState *env, + uint32_t desc) +{ + uint32_t mlen = vext_mlen(desc); + uint32_t vm = vext_vm(desc); + uint32_t vl = env->vl; + int i; + + for (i = 0; i < vl; i++) { + if (vm || vext_elem_mask(v0, mlen, i)) { + if (vext_elem_mask(vs2, mlen, i)) { + return i; + } + } + } + return -1LL; +} + +enum set_mask_type { + ONLY_FIRST = 1, + INCLUDE_FIRST, + BEFORE_FIRST, +}; + +static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env, + uint32_t desc, enum set_mask_type type) +{ + uint32_t mlen = vext_mlen(desc); + uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; + uint32_t vm = vext_vm(desc); + uint32_t vl = env->vl; + int i; + bool first_mask_bit = false; + + for (i = 0; i < vl; i++) { + if (!vm && !vext_elem_mask(v0, mlen, i)) { + continue; + } + /* write a zero to all following active elements */ + if (first_mask_bit) { + vext_set_elem_mask(vd, mlen, i, 0); + continue; + } + if (vext_elem_mask(vs2, mlen, i)) { + first_mask_bit = true; + if (type == BEFORE_FIRST) { + vext_set_elem_mask(vd, mlen, i, 0); + } else { + vext_set_elem_mask(vd, mlen, i, 1); + } + } else { + if (type == ONLY_FIRST) { + vext_set_elem_mask(vd, mlen, i, 0); + } else { + vext_set_elem_mask(vd, mlen, i, 1); + } + } + } + for (; i < vlmax; i++) { + vext_set_elem_mask(vd, mlen, i, 0); + } +} + +void HELPER(vmsbf_m)(void *vd, void *v0, void *vs2, CPURISCVState *env, + uint32_t desc) +{ + vmsetm(vd, v0, vs2, env, desc, BEFORE_FIRST); +} + +void HELPER(vmsif_m)(void *vd, void *v0, void *vs2, CPURISCVState *env, + uint32_t desc) +{ + vmsetm(vd, v0, vs2, env, desc, INCLUDE_FIRST); +} + +void HELPER(vmsof_m)(void *vd, void *v0, void *vs2, CPURISCVState *env, + uint32_t desc) +{ + vmsetm(vd, v0, vs2, env, desc, ONLY_FIRST); +} + +/* Vector Iota Instruction */ +#define GEN_VEXT_VIOTA_M(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \ + uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t sum = 0; \ + int i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + *((ETYPE *)vd + H(i)) = sum; \ + if (vext_elem_mask(vs2, mlen, i)) { \ + sum++; \ + } \ + } \ + CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ +} + +GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1, clearb) +GEN_VEXT_VIOTA_M(viota_m_h, uint16_t, H2, clearh) +GEN_VEXT_VIOTA_M(viota_m_w, uint32_t, H4, clearl) +GEN_VEXT_VIOTA_M(viota_m_d, uint64_t, H8, clearq) + +/* Vector Element Index Instruction */ +#define GEN_VEXT_VID_V(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + int i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + *((ETYPE *)vd + H(i)) = i; \ + } \ + CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ +} + +GEN_VEXT_VID_V(vid_v_b, uint8_t, H1, clearb) +GEN_VEXT_VID_V(vid_v_h, uint16_t, H2, clearh) +GEN_VEXT_VID_V(vid_v_w, uint32_t, H4, clearl) +GEN_VEXT_VID_V(vid_v_d, uint64_t, H8, clearq) + +/* + *** Vector Permutation Instructions + */ + +/* Vector Slide Instructions */ +#define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + target_ulong offset = s1, i; \ + \ + for (i = offset; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \ + } \ + CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ +} + +/* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */ +GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b, uint8_t, H1, clearb) +GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h, uint16_t, H2, clearh) +GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w, uint32_t, H4, clearl) +GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8, clearq) + +#define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + target_ulong offset = s1, i; \ + \ + for (i = 0; i < vl; ++i) { \ + target_ulong j = i + offset; \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + *((ETYPE *)vd + H(i)) = j >= vlmax ? 0 : *((ETYPE *)vs2 + H(j)); \ + } \ + CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ +} + +/* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */ +GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b, uint8_t, H1, clearb) +GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2, clearh) +GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4, clearl) +GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8, clearq) + +#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + if (i == 0) { \ + *((ETYPE *)vd + H(i)) = s1; \ + } else { \ + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \ + } \ + } \ + CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ +} + +/* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */ +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1, clearb) +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2, clearh) +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4, clearl) +GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8, clearq) + +#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + if (i == vl - 1) { \ + *((ETYPE *)vd + H(i)) = s1; \ + } else { \ + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \ + } \ + } \ + CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ +} + +/* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */ +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1, clearb) +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2, clearh) +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4, clearl) +GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8, clearq) + +/* Vector Register Gather Instruction */ +#define GEN_VEXT_VRGATHER_VV(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t index, i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + index = *((ETYPE *)vs1 + H(i)); \ + if (index >= vlmax) { \ + *((ETYPE *)vd + H(i)) = 0; \ + } else { \ + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \ + } \ + } \ + CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ +} + +/* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */ +GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t, H1, clearb) +GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, H2, clearh) +GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, H4, clearl) +GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8, clearq) + +#define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vm = vext_vm(desc); \ + uint32_t vl = env->vl; \ + uint32_t index = s1, i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vm && !vext_elem_mask(v0, mlen, i)) { \ + continue; \ + } \ + if (index >= vlmax) { \ + *((ETYPE *)vd + H(i)) = 0; \ + } else { \ + *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \ + } \ + } \ + CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ +} + +/* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */ +GEN_VEXT_VRGATHER_VX(vrgather_vx_b, uint8_t, H1, clearb) +GEN_VEXT_VRGATHER_VX(vrgather_vx_h, uint16_t, H2, clearh) +GEN_VEXT_VRGATHER_VX(vrgather_vx_w, uint32_t, H4, clearl) +GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8, clearq) + +/* Vector Compress Instruction */ +#define GEN_VEXT_VCOMPRESS_VM(NAME, ETYPE, H, CLEAR_FN) \ +void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + uint32_t mlen = vext_mlen(desc); \ + uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \ + uint32_t vl = env->vl; \ + uint32_t num = 0, i; \ + \ + for (i = 0; i < vl; i++) { \ + if (!vext_elem_mask(vs1, mlen, i)) { \ + continue; \ + } \ + *((ETYPE *)vd + H(num)) = *((ETYPE *)vs2 + H(i)); \ + num++; \ + } \ + CLEAR_FN(vd, num, num * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \ +} + +/* Compress into vd elements of vs2 where vs1 is enabled */ +GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b, uint8_t, H1, clearb) +GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h, uint16_t, H2, clearh) +GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w, uint32_t, H4, clearl) +GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d, uint64_t, H8, clearq) diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def index 91ddaed..d79ae9e 100644 --- a/target/s390x/insn-data.def +++ b/target/s390x/insn-data.def @@ -798,7 +798,7 @@ /* SQUARE ROOT */ F(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0, IF_BFP) F(0xb315, SQDBR, RRE, Z, 0, f2, new, f1, sqdb, 0, IF_BFP) - F(0xb316, SQXBR, RRE, Z, x2h, x2l, new, x1, sqxb, 0, IF_BFP) + F(0xb316, SQXBR, RRE, Z, x2h, x2l, new_P, x1, sqxb, 0, IF_BFP) F(0xed14, SQEB, RXE, Z, 0, m2_32u, new, e1, sqeb, 0, IF_BFP) F(0xed15, SQDB, RXE, Z, 0, m2_64, new, f1, sqdb, 0, IF_BFP) |