aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--hw/intc/loongarch_ipi.c2
-rw-r--r--linux-user/loongarch64/signal.c4
-rw-r--r--target/loongarch/cpu.c5
-rw-r--r--target/loongarch/cpu.h27
-rw-r--r--target/loongarch/disas.c911
-rw-r--r--target/loongarch/fpu_helper.c2
-rw-r--r--target/loongarch/gdbstub.c4
-rw-r--r--target/loongarch/helper.h566
-rw-r--r--target/loongarch/insn_trans/trans_farith.c.inc72
-rw-r--r--target/loongarch/insn_trans/trans_fcmp.c.inc12
-rw-r--r--target/loongarch/insn_trans/trans_fmemory.c.inc37
-rw-r--r--target/loongarch/insn_trans/trans_fmov.c.inc31
-rw-r--r--target/loongarch/insn_trans/trans_lsx.c.inc4400
-rw-r--r--target/loongarch/insns.decode811
-rw-r--r--target/loongarch/internals.h23
-rw-r--r--target/loongarch/lsx_helper.c3004
-rw-r--r--target/loongarch/machine.c79
-rw-r--r--target/loongarch/meson.build1
-rw-r--r--target/loongarch/translate.c55
-rw-r--r--target/loongarch/translate.h1
20 files changed, 9989 insertions, 58 deletions
diff --git a/hw/intc/loongarch_ipi.c b/hw/intc/loongarch_ipi.c
index aa4bf9e..bdba0f8 100644
--- a/hw/intc/loongarch_ipi.c
+++ b/hw/intc/loongarch_ipi.c
@@ -50,7 +50,7 @@ static uint64_t loongarch_ipi_readl(void *opaque, hwaddr addr, unsigned size)
return ret;
}
-static void send_ipi_data(CPULoongArchState *env, target_ulong val, target_ulong addr)
+static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr)
{
int i, mask = 0, data = 0;
diff --git a/linux-user/loongarch64/signal.c b/linux-user/loongarch64/signal.c
index 7c7afb6..bb8efb1 100644
--- a/linux-user/loongarch64/signal.c
+++ b/linux-user/loongarch64/signal.c
@@ -128,7 +128,7 @@ static void setup_sigframe(CPULoongArchState *env,
fpu_ctx = (struct target_fpu_context *)(info + 1);
for (i = 0; i < 32; ++i) {
- __put_user(env->fpr[i], &fpu_ctx->regs[i]);
+ __put_user(env->fpr[i].vreg.D(0), &fpu_ctx->regs[i]);
}
__put_user(read_fcc(env), &fpu_ctx->fcc);
__put_user(env->fcsr0, &fpu_ctx->fcsr);
@@ -193,7 +193,7 @@ static void restore_sigframe(CPULoongArchState *env,
uint64_t fcc;
for (i = 0; i < 32; ++i) {
- __get_user(env->fpr[i], &fpu_ctx->regs[i]);
+ __get_user(env->fpr[i].vreg.D(0), &fpu_ctx->regs[i]);
}
__get_user(fcc, &fpu_ctx->fcc);
write_fcc(env, fcc);
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
index 97e6579..c0afc21 100644
--- a/target/loongarch/cpu.c
+++ b/target/loongarch/cpu.c
@@ -52,6 +52,7 @@ static const char * const excp_names[] = {
[EXCCODE_FPE] = "Floating Point Exception",
[EXCCODE_DBP] = "Debug breakpoint",
[EXCCODE_BCE] = "Bound Check Exception",
+ [EXCCODE_SXD] = "128 bit vector instructions Disable exception",
};
const char *loongarch_exception_name(int32_t exception)
@@ -187,6 +188,7 @@ static void loongarch_cpu_do_interrupt(CPUState *cs)
case EXCCODE_FPD:
case EXCCODE_FPE:
case EXCCODE_BCE:
+ case EXCCODE_SXD:
env->CSR_BADV = env->pc;
QEMU_FALLTHROUGH;
case EXCCODE_ADEM:
@@ -386,6 +388,7 @@ static void loongarch_la464_initfn(Object *obj)
data = FIELD_DP32(data, CPUCFG2, FP_SP, 1);
data = FIELD_DP32(data, CPUCFG2, FP_DP, 1);
data = FIELD_DP32(data, CPUCFG2, FP_VER, 1);
+ data = FIELD_DP32(data, CPUCFG2, LSX, 1),
data = FIELD_DP32(data, CPUCFG2, LLFTP, 1);
data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1);
data = FIELD_DP32(data, CPUCFG2, LAM, 1);
@@ -656,7 +659,7 @@ void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
/* fpr */
if (flags & CPU_DUMP_FPU) {
for (i = 0; i < 32; i++) {
- qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i]);
+ qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i].vreg.D(0));
if ((i & 3) == 3) {
qemu_fprintf(f, "\n");
}
diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h
index e11c875..1f37e36 100644
--- a/target/loongarch/cpu.h
+++ b/target/loongarch/cpu.h
@@ -8,6 +8,7 @@
#ifndef LOONGARCH_CPU_H
#define LOONGARCH_CPU_H
+#include "qemu/int128.h"
#include "exec/cpu-defs.h"
#include "fpu/softfloat-types.h"
#include "hw/registerfields.h"
@@ -54,6 +55,10 @@ FIELD(FCSR0, CAUSE, 24, 5)
do { \
(REG) = FIELD_DP32(REG, FCSR0, CAUSE, V); \
} while (0)
+#define UPDATE_FP_CAUSE(REG, V) \
+ do { \
+ (REG) |= FIELD_DP32(0, FCSR0, CAUSE, V); \
+ } while (0)
#define GET_FP_ENABLES(REG) FIELD_EX32(REG, FCSR0, ENABLES)
#define SET_FP_ENABLES(REG, V) \
@@ -241,6 +246,24 @@ FIELD(TLB_MISC, ASID, 1, 10)
FIELD(TLB_MISC, VPPN, 13, 35)
FIELD(TLB_MISC, PS, 48, 6)
+#define LSX_LEN (128)
+typedef union VReg {
+ int8_t B[LSX_LEN / 8];
+ int16_t H[LSX_LEN / 16];
+ int32_t W[LSX_LEN / 32];
+ int64_t D[LSX_LEN / 64];
+ uint8_t UB[LSX_LEN / 8];
+ uint16_t UH[LSX_LEN / 16];
+ uint32_t UW[LSX_LEN / 32];
+ uint64_t UD[LSX_LEN / 64];
+ Int128 Q[LSX_LEN / 128];
+}VReg;
+
+typedef union fpr_t fpr_t;
+union fpr_t {
+ VReg vreg;
+};
+
struct LoongArchTLB {
uint64_t tlb_misc;
/* Fields corresponding to CSR_TLBELO0/1 */
@@ -253,7 +276,7 @@ typedef struct CPUArchState {
uint64_t gpr[32];
uint64_t pc;
- uint64_t fpr[32];
+ fpr_t fpr[32];
float_status fp_status;
bool cf[8];
@@ -400,6 +423,7 @@ static inline int cpu_mmu_index(CPULoongArchState *env, bool ifetch)
#define HW_FLAGS_PLV_MASK R_CSR_CRMD_PLV_MASK /* 0x03 */
#define HW_FLAGS_CRMD_PG R_CSR_CRMD_PG_MASK /* 0x10 */
#define HW_FLAGS_EUEN_FPE 0x04
+#define HW_FLAGS_EUEN_SXE 0x08
static inline void cpu_get_tb_cpu_state(CPULoongArchState *env,
target_ulong *pc,
@@ -410,6 +434,7 @@ static inline void cpu_get_tb_cpu_state(CPULoongArchState *env,
*cs_base = 0;
*flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK);
*flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE;
+ *flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE;
}
void loongarch_cpu_list(void);
diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c
index 2e93e77..5c402d9 100644
--- a/target/loongarch/disas.c
+++ b/target/loongarch/disas.c
@@ -21,11 +21,21 @@ static inline int plus_1(DisasContext *ctx, int x)
return x + 1;
}
+static inline int shl_1(DisasContext *ctx, int x)
+{
+ return x << 1;
+}
+
static inline int shl_2(DisasContext *ctx, int x)
{
return x << 2;
}
+static inline int shl_3(DisasContext *ctx, int x)
+{
+ return x << 3;
+}
+
#define CSR_NAME(REG) \
[LOONGARCH_CSR_##REG] = (#REG)
@@ -784,3 +794,904 @@ PCADD_INSN(pcaddi)
PCADD_INSN(pcalau12i)
PCADD_INSN(pcaddu12i)
PCADD_INSN(pcaddu18i)
+
+#define INSN_LSX(insn, type) \
+static bool trans_##insn(DisasContext *ctx, arg_##type * a) \
+{ \
+ output_##type(ctx, a, #insn); \
+ return true; \
+}
+
+static void output_cv(DisasContext *ctx, arg_cv *a,
+ const char *mnemonic)
+{
+ output(ctx, mnemonic, "fcc%d, v%d", a->cd, a->vj);
+}
+
+static void output_vvv(DisasContext *ctx, arg_vvv *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "v%d, v%d, v%d", a->vd, a->vj, a->vk);
+}
+
+static void output_vv_i(DisasContext *ctx, arg_vv_i *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "v%d, v%d, 0x%x", a->vd, a->vj, a->imm);
+}
+
+static void output_vv(DisasContext *ctx, arg_vv *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "v%d, v%d", a->vd, a->vj);
+}
+
+static void output_vvvv(DisasContext *ctx, arg_vvvv *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "v%d, v%d, v%d, v%d", a->vd, a->vj, a->vk, a->va);
+}
+
+static void output_vr_i(DisasContext *ctx, arg_vr_i *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "v%d, r%d, 0x%x", a->vd, a->rj, a->imm);
+}
+
+static void output_vr_ii(DisasContext *ctx, arg_vr_ii *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "v%d, r%d, 0x%x, 0x%x", a->vd, a->rj, a->imm, a->imm2);
+}
+
+static void output_rv_i(DisasContext *ctx, arg_rv_i *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "r%d, v%d, 0x%x", a->rd, a->vj, a->imm);
+}
+
+static void output_vr(DisasContext *ctx, arg_vr *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "v%d, r%d", a->vd, a->rj);
+}
+
+static void output_vvr(DisasContext *ctx, arg_vvr *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "v%d, v%d, r%d", a->vd, a->vj, a->rk);
+}
+
+static void output_vrr(DisasContext *ctx, arg_vrr *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "v%d, r%d, r%d", a->vd, a->rj, a->rk);
+}
+
+static void output_v_i(DisasContext *ctx, arg_v_i *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "v%d, 0x%x", a->vd, a->imm);
+}
+
+INSN_LSX(vadd_b, vvv)
+INSN_LSX(vadd_h, vvv)
+INSN_LSX(vadd_w, vvv)
+INSN_LSX(vadd_d, vvv)
+INSN_LSX(vadd_q, vvv)
+INSN_LSX(vsub_b, vvv)
+INSN_LSX(vsub_h, vvv)
+INSN_LSX(vsub_w, vvv)
+INSN_LSX(vsub_d, vvv)
+INSN_LSX(vsub_q, vvv)
+
+INSN_LSX(vaddi_bu, vv_i)
+INSN_LSX(vaddi_hu, vv_i)
+INSN_LSX(vaddi_wu, vv_i)
+INSN_LSX(vaddi_du, vv_i)
+INSN_LSX(vsubi_bu, vv_i)
+INSN_LSX(vsubi_hu, vv_i)
+INSN_LSX(vsubi_wu, vv_i)
+INSN_LSX(vsubi_du, vv_i)
+
+INSN_LSX(vneg_b, vv)
+INSN_LSX(vneg_h, vv)
+INSN_LSX(vneg_w, vv)
+INSN_LSX(vneg_d, vv)
+
+INSN_LSX(vsadd_b, vvv)
+INSN_LSX(vsadd_h, vvv)
+INSN_LSX(vsadd_w, vvv)
+INSN_LSX(vsadd_d, vvv)
+INSN_LSX(vsadd_bu, vvv)
+INSN_LSX(vsadd_hu, vvv)
+INSN_LSX(vsadd_wu, vvv)
+INSN_LSX(vsadd_du, vvv)
+INSN_LSX(vssub_b, vvv)
+INSN_LSX(vssub_h, vvv)
+INSN_LSX(vssub_w, vvv)
+INSN_LSX(vssub_d, vvv)
+INSN_LSX(vssub_bu, vvv)
+INSN_LSX(vssub_hu, vvv)
+INSN_LSX(vssub_wu, vvv)
+INSN_LSX(vssub_du, vvv)
+
+INSN_LSX(vhaddw_h_b, vvv)
+INSN_LSX(vhaddw_w_h, vvv)
+INSN_LSX(vhaddw_d_w, vvv)
+INSN_LSX(vhaddw_q_d, vvv)
+INSN_LSX(vhaddw_hu_bu, vvv)
+INSN_LSX(vhaddw_wu_hu, vvv)
+INSN_LSX(vhaddw_du_wu, vvv)
+INSN_LSX(vhaddw_qu_du, vvv)
+INSN_LSX(vhsubw_h_b, vvv)
+INSN_LSX(vhsubw_w_h, vvv)
+INSN_LSX(vhsubw_d_w, vvv)
+INSN_LSX(vhsubw_q_d, vvv)
+INSN_LSX(vhsubw_hu_bu, vvv)
+INSN_LSX(vhsubw_wu_hu, vvv)
+INSN_LSX(vhsubw_du_wu, vvv)
+INSN_LSX(vhsubw_qu_du, vvv)
+
+INSN_LSX(vaddwev_h_b, vvv)
+INSN_LSX(vaddwev_w_h, vvv)
+INSN_LSX(vaddwev_d_w, vvv)
+INSN_LSX(vaddwev_q_d, vvv)
+INSN_LSX(vaddwod_h_b, vvv)
+INSN_LSX(vaddwod_w_h, vvv)
+INSN_LSX(vaddwod_d_w, vvv)
+INSN_LSX(vaddwod_q_d, vvv)
+INSN_LSX(vsubwev_h_b, vvv)
+INSN_LSX(vsubwev_w_h, vvv)
+INSN_LSX(vsubwev_d_w, vvv)
+INSN_LSX(vsubwev_q_d, vvv)
+INSN_LSX(vsubwod_h_b, vvv)
+INSN_LSX(vsubwod_w_h, vvv)
+INSN_LSX(vsubwod_d_w, vvv)
+INSN_LSX(vsubwod_q_d, vvv)
+
+INSN_LSX(vaddwev_h_bu, vvv)
+INSN_LSX(vaddwev_w_hu, vvv)
+INSN_LSX(vaddwev_d_wu, vvv)
+INSN_LSX(vaddwev_q_du, vvv)
+INSN_LSX(vaddwod_h_bu, vvv)
+INSN_LSX(vaddwod_w_hu, vvv)
+INSN_LSX(vaddwod_d_wu, vvv)
+INSN_LSX(vaddwod_q_du, vvv)
+INSN_LSX(vsubwev_h_bu, vvv)
+INSN_LSX(vsubwev_w_hu, vvv)
+INSN_LSX(vsubwev_d_wu, vvv)
+INSN_LSX(vsubwev_q_du, vvv)
+INSN_LSX(vsubwod_h_bu, vvv)
+INSN_LSX(vsubwod_w_hu, vvv)
+INSN_LSX(vsubwod_d_wu, vvv)
+INSN_LSX(vsubwod_q_du, vvv)
+
+INSN_LSX(vaddwev_h_bu_b, vvv)
+INSN_LSX(vaddwev_w_hu_h, vvv)
+INSN_LSX(vaddwev_d_wu_w, vvv)
+INSN_LSX(vaddwev_q_du_d, vvv)
+INSN_LSX(vaddwod_h_bu_b, vvv)
+INSN_LSX(vaddwod_w_hu_h, vvv)
+INSN_LSX(vaddwod_d_wu_w, vvv)
+INSN_LSX(vaddwod_q_du_d, vvv)
+
+INSN_LSX(vavg_b, vvv)
+INSN_LSX(vavg_h, vvv)
+INSN_LSX(vavg_w, vvv)
+INSN_LSX(vavg_d, vvv)
+INSN_LSX(vavg_bu, vvv)
+INSN_LSX(vavg_hu, vvv)
+INSN_LSX(vavg_wu, vvv)
+INSN_LSX(vavg_du, vvv)
+INSN_LSX(vavgr_b, vvv)
+INSN_LSX(vavgr_h, vvv)
+INSN_LSX(vavgr_w, vvv)
+INSN_LSX(vavgr_d, vvv)
+INSN_LSX(vavgr_bu, vvv)
+INSN_LSX(vavgr_hu, vvv)
+INSN_LSX(vavgr_wu, vvv)
+INSN_LSX(vavgr_du, vvv)
+
+INSN_LSX(vabsd_b, vvv)
+INSN_LSX(vabsd_h, vvv)
+INSN_LSX(vabsd_w, vvv)
+INSN_LSX(vabsd_d, vvv)
+INSN_LSX(vabsd_bu, vvv)
+INSN_LSX(vabsd_hu, vvv)
+INSN_LSX(vabsd_wu, vvv)
+INSN_LSX(vabsd_du, vvv)
+
+INSN_LSX(vadda_b, vvv)
+INSN_LSX(vadda_h, vvv)
+INSN_LSX(vadda_w, vvv)
+INSN_LSX(vadda_d, vvv)
+
+INSN_LSX(vmax_b, vvv)
+INSN_LSX(vmax_h, vvv)
+INSN_LSX(vmax_w, vvv)
+INSN_LSX(vmax_d, vvv)
+INSN_LSX(vmin_b, vvv)
+INSN_LSX(vmin_h, vvv)
+INSN_LSX(vmin_w, vvv)
+INSN_LSX(vmin_d, vvv)
+INSN_LSX(vmax_bu, vvv)
+INSN_LSX(vmax_hu, vvv)
+INSN_LSX(vmax_wu, vvv)
+INSN_LSX(vmax_du, vvv)
+INSN_LSX(vmin_bu, vvv)
+INSN_LSX(vmin_hu, vvv)
+INSN_LSX(vmin_wu, vvv)
+INSN_LSX(vmin_du, vvv)
+INSN_LSX(vmaxi_b, vv_i)
+INSN_LSX(vmaxi_h, vv_i)
+INSN_LSX(vmaxi_w, vv_i)
+INSN_LSX(vmaxi_d, vv_i)
+INSN_LSX(vmini_b, vv_i)
+INSN_LSX(vmini_h, vv_i)
+INSN_LSX(vmini_w, vv_i)
+INSN_LSX(vmini_d, vv_i)
+INSN_LSX(vmaxi_bu, vv_i)
+INSN_LSX(vmaxi_hu, vv_i)
+INSN_LSX(vmaxi_wu, vv_i)
+INSN_LSX(vmaxi_du, vv_i)
+INSN_LSX(vmini_bu, vv_i)
+INSN_LSX(vmini_hu, vv_i)
+INSN_LSX(vmini_wu, vv_i)
+INSN_LSX(vmini_du, vv_i)
+
+INSN_LSX(vmul_b, vvv)
+INSN_LSX(vmul_h, vvv)
+INSN_LSX(vmul_w, vvv)
+INSN_LSX(vmul_d, vvv)
+INSN_LSX(vmuh_b, vvv)
+INSN_LSX(vmuh_h, vvv)
+INSN_LSX(vmuh_w, vvv)
+INSN_LSX(vmuh_d, vvv)
+INSN_LSX(vmuh_bu, vvv)
+INSN_LSX(vmuh_hu, vvv)
+INSN_LSX(vmuh_wu, vvv)
+INSN_LSX(vmuh_du, vvv)
+
+INSN_LSX(vmulwev_h_b, vvv)
+INSN_LSX(vmulwev_w_h, vvv)
+INSN_LSX(vmulwev_d_w, vvv)
+INSN_LSX(vmulwev_q_d, vvv)
+INSN_LSX(vmulwod_h_b, vvv)
+INSN_LSX(vmulwod_w_h, vvv)
+INSN_LSX(vmulwod_d_w, vvv)
+INSN_LSX(vmulwod_q_d, vvv)
+INSN_LSX(vmulwev_h_bu, vvv)
+INSN_LSX(vmulwev_w_hu, vvv)
+INSN_LSX(vmulwev_d_wu, vvv)
+INSN_LSX(vmulwev_q_du, vvv)
+INSN_LSX(vmulwod_h_bu, vvv)
+INSN_LSX(vmulwod_w_hu, vvv)
+INSN_LSX(vmulwod_d_wu, vvv)
+INSN_LSX(vmulwod_q_du, vvv)
+INSN_LSX(vmulwev_h_bu_b, vvv)
+INSN_LSX(vmulwev_w_hu_h, vvv)
+INSN_LSX(vmulwev_d_wu_w, vvv)
+INSN_LSX(vmulwev_q_du_d, vvv)
+INSN_LSX(vmulwod_h_bu_b, vvv)
+INSN_LSX(vmulwod_w_hu_h, vvv)
+INSN_LSX(vmulwod_d_wu_w, vvv)
+INSN_LSX(vmulwod_q_du_d, vvv)
+
+INSN_LSX(vmadd_b, vvv)
+INSN_LSX(vmadd_h, vvv)
+INSN_LSX(vmadd_w, vvv)
+INSN_LSX(vmadd_d, vvv)
+INSN_LSX(vmsub_b, vvv)
+INSN_LSX(vmsub_h, vvv)
+INSN_LSX(vmsub_w, vvv)
+INSN_LSX(vmsub_d, vvv)
+
+INSN_LSX(vmaddwev_h_b, vvv)
+INSN_LSX(vmaddwev_w_h, vvv)
+INSN_LSX(vmaddwev_d_w, vvv)
+INSN_LSX(vmaddwev_q_d, vvv)
+INSN_LSX(vmaddwod_h_b, vvv)
+INSN_LSX(vmaddwod_w_h, vvv)
+INSN_LSX(vmaddwod_d_w, vvv)
+INSN_LSX(vmaddwod_q_d, vvv)
+INSN_LSX(vmaddwev_h_bu, vvv)
+INSN_LSX(vmaddwev_w_hu, vvv)
+INSN_LSX(vmaddwev_d_wu, vvv)
+INSN_LSX(vmaddwev_q_du, vvv)
+INSN_LSX(vmaddwod_h_bu, vvv)
+INSN_LSX(vmaddwod_w_hu, vvv)
+INSN_LSX(vmaddwod_d_wu, vvv)
+INSN_LSX(vmaddwod_q_du, vvv)
+INSN_LSX(vmaddwev_h_bu_b, vvv)
+INSN_LSX(vmaddwev_w_hu_h, vvv)
+INSN_LSX(vmaddwev_d_wu_w, vvv)
+INSN_LSX(vmaddwev_q_du_d, vvv)
+INSN_LSX(vmaddwod_h_bu_b, vvv)
+INSN_LSX(vmaddwod_w_hu_h, vvv)
+INSN_LSX(vmaddwod_d_wu_w, vvv)
+INSN_LSX(vmaddwod_q_du_d, vvv)
+
+INSN_LSX(vdiv_b, vvv)
+INSN_LSX(vdiv_h, vvv)
+INSN_LSX(vdiv_w, vvv)
+INSN_LSX(vdiv_d, vvv)
+INSN_LSX(vdiv_bu, vvv)
+INSN_LSX(vdiv_hu, vvv)
+INSN_LSX(vdiv_wu, vvv)
+INSN_LSX(vdiv_du, vvv)
+INSN_LSX(vmod_b, vvv)
+INSN_LSX(vmod_h, vvv)
+INSN_LSX(vmod_w, vvv)
+INSN_LSX(vmod_d, vvv)
+INSN_LSX(vmod_bu, vvv)
+INSN_LSX(vmod_hu, vvv)
+INSN_LSX(vmod_wu, vvv)
+INSN_LSX(vmod_du, vvv)
+
+INSN_LSX(vsat_b, vv_i)
+INSN_LSX(vsat_h, vv_i)
+INSN_LSX(vsat_w, vv_i)
+INSN_LSX(vsat_d, vv_i)
+INSN_LSX(vsat_bu, vv_i)
+INSN_LSX(vsat_hu, vv_i)
+INSN_LSX(vsat_wu, vv_i)
+INSN_LSX(vsat_du, vv_i)
+
+INSN_LSX(vexth_h_b, vv)
+INSN_LSX(vexth_w_h, vv)
+INSN_LSX(vexth_d_w, vv)
+INSN_LSX(vexth_q_d, vv)
+INSN_LSX(vexth_hu_bu, vv)
+INSN_LSX(vexth_wu_hu, vv)
+INSN_LSX(vexth_du_wu, vv)
+INSN_LSX(vexth_qu_du, vv)
+
+INSN_LSX(vsigncov_b, vvv)
+INSN_LSX(vsigncov_h, vvv)
+INSN_LSX(vsigncov_w, vvv)
+INSN_LSX(vsigncov_d, vvv)
+
+INSN_LSX(vmskltz_b, vv)
+INSN_LSX(vmskltz_h, vv)
+INSN_LSX(vmskltz_w, vv)
+INSN_LSX(vmskltz_d, vv)
+INSN_LSX(vmskgez_b, vv)
+INSN_LSX(vmsknz_b, vv)
+
+INSN_LSX(vldi, v_i)
+
+INSN_LSX(vand_v, vvv)
+INSN_LSX(vor_v, vvv)
+INSN_LSX(vxor_v, vvv)
+INSN_LSX(vnor_v, vvv)
+INSN_LSX(vandn_v, vvv)
+INSN_LSX(vorn_v, vvv)
+
+INSN_LSX(vandi_b, vv_i)
+INSN_LSX(vori_b, vv_i)
+INSN_LSX(vxori_b, vv_i)
+INSN_LSX(vnori_b, vv_i)
+
+INSN_LSX(vsll_b, vvv)
+INSN_LSX(vsll_h, vvv)
+INSN_LSX(vsll_w, vvv)
+INSN_LSX(vsll_d, vvv)
+INSN_LSX(vslli_b, vv_i)
+INSN_LSX(vslli_h, vv_i)
+INSN_LSX(vslli_w, vv_i)
+INSN_LSX(vslli_d, vv_i)
+
+INSN_LSX(vsrl_b, vvv)
+INSN_LSX(vsrl_h, vvv)
+INSN_LSX(vsrl_w, vvv)
+INSN_LSX(vsrl_d, vvv)
+INSN_LSX(vsrli_b, vv_i)
+INSN_LSX(vsrli_h, vv_i)
+INSN_LSX(vsrli_w, vv_i)
+INSN_LSX(vsrli_d, vv_i)
+
+INSN_LSX(vsra_b, vvv)
+INSN_LSX(vsra_h, vvv)
+INSN_LSX(vsra_w, vvv)
+INSN_LSX(vsra_d, vvv)
+INSN_LSX(vsrai_b, vv_i)
+INSN_LSX(vsrai_h, vv_i)
+INSN_LSX(vsrai_w, vv_i)
+INSN_LSX(vsrai_d, vv_i)
+
+INSN_LSX(vrotr_b, vvv)
+INSN_LSX(vrotr_h, vvv)
+INSN_LSX(vrotr_w, vvv)
+INSN_LSX(vrotr_d, vvv)
+INSN_LSX(vrotri_b, vv_i)
+INSN_LSX(vrotri_h, vv_i)
+INSN_LSX(vrotri_w, vv_i)
+INSN_LSX(vrotri_d, vv_i)
+
+INSN_LSX(vsllwil_h_b, vv_i)
+INSN_LSX(vsllwil_w_h, vv_i)
+INSN_LSX(vsllwil_d_w, vv_i)
+INSN_LSX(vextl_q_d, vv)
+INSN_LSX(vsllwil_hu_bu, vv_i)
+INSN_LSX(vsllwil_wu_hu, vv_i)
+INSN_LSX(vsllwil_du_wu, vv_i)
+INSN_LSX(vextl_qu_du, vv)
+
+INSN_LSX(vsrlr_b, vvv)
+INSN_LSX(vsrlr_h, vvv)
+INSN_LSX(vsrlr_w, vvv)
+INSN_LSX(vsrlr_d, vvv)
+INSN_LSX(vsrlri_b, vv_i)
+INSN_LSX(vsrlri_h, vv_i)
+INSN_LSX(vsrlri_w, vv_i)
+INSN_LSX(vsrlri_d, vv_i)
+
+INSN_LSX(vsrar_b, vvv)
+INSN_LSX(vsrar_h, vvv)
+INSN_LSX(vsrar_w, vvv)
+INSN_LSX(vsrar_d, vvv)
+INSN_LSX(vsrari_b, vv_i)
+INSN_LSX(vsrari_h, vv_i)
+INSN_LSX(vsrari_w, vv_i)
+INSN_LSX(vsrari_d, vv_i)
+
+INSN_LSX(vsrln_b_h, vvv)
+INSN_LSX(vsrln_h_w, vvv)
+INSN_LSX(vsrln_w_d, vvv)
+INSN_LSX(vsran_b_h, vvv)
+INSN_LSX(vsran_h_w, vvv)
+INSN_LSX(vsran_w_d, vvv)
+
+INSN_LSX(vsrlni_b_h, vv_i)
+INSN_LSX(vsrlni_h_w, vv_i)
+INSN_LSX(vsrlni_w_d, vv_i)
+INSN_LSX(vsrlni_d_q, vv_i)
+INSN_LSX(vsrani_b_h, vv_i)
+INSN_LSX(vsrani_h_w, vv_i)
+INSN_LSX(vsrani_w_d, vv_i)
+INSN_LSX(vsrani_d_q, vv_i)
+
+INSN_LSX(vsrlrn_b_h, vvv)
+INSN_LSX(vsrlrn_h_w, vvv)
+INSN_LSX(vsrlrn_w_d, vvv)
+INSN_LSX(vsrarn_b_h, vvv)
+INSN_LSX(vsrarn_h_w, vvv)
+INSN_LSX(vsrarn_w_d, vvv)
+
+INSN_LSX(vsrlrni_b_h, vv_i)
+INSN_LSX(vsrlrni_h_w, vv_i)
+INSN_LSX(vsrlrni_w_d, vv_i)
+INSN_LSX(vsrlrni_d_q, vv_i)
+INSN_LSX(vsrarni_b_h, vv_i)
+INSN_LSX(vsrarni_h_w, vv_i)
+INSN_LSX(vsrarni_w_d, vv_i)
+INSN_LSX(vsrarni_d_q, vv_i)
+
+INSN_LSX(vssrln_b_h, vvv)
+INSN_LSX(vssrln_h_w, vvv)
+INSN_LSX(vssrln_w_d, vvv)
+INSN_LSX(vssran_b_h, vvv)
+INSN_LSX(vssran_h_w, vvv)
+INSN_LSX(vssran_w_d, vvv)
+INSN_LSX(vssrln_bu_h, vvv)
+INSN_LSX(vssrln_hu_w, vvv)
+INSN_LSX(vssrln_wu_d, vvv)
+INSN_LSX(vssran_bu_h, vvv)
+INSN_LSX(vssran_hu_w, vvv)
+INSN_LSX(vssran_wu_d, vvv)
+
+INSN_LSX(vssrlni_b_h, vv_i)
+INSN_LSX(vssrlni_h_w, vv_i)
+INSN_LSX(vssrlni_w_d, vv_i)
+INSN_LSX(vssrlni_d_q, vv_i)
+INSN_LSX(vssrani_b_h, vv_i)
+INSN_LSX(vssrani_h_w, vv_i)
+INSN_LSX(vssrani_w_d, vv_i)
+INSN_LSX(vssrani_d_q, vv_i)
+INSN_LSX(vssrlni_bu_h, vv_i)
+INSN_LSX(vssrlni_hu_w, vv_i)
+INSN_LSX(vssrlni_wu_d, vv_i)
+INSN_LSX(vssrlni_du_q, vv_i)
+INSN_LSX(vssrani_bu_h, vv_i)
+INSN_LSX(vssrani_hu_w, vv_i)
+INSN_LSX(vssrani_wu_d, vv_i)
+INSN_LSX(vssrani_du_q, vv_i)
+
+INSN_LSX(vssrlrn_b_h, vvv)
+INSN_LSX(vssrlrn_h_w, vvv)
+INSN_LSX(vssrlrn_w_d, vvv)
+INSN_LSX(vssrarn_b_h, vvv)
+INSN_LSX(vssrarn_h_w, vvv)
+INSN_LSX(vssrarn_w_d, vvv)
+INSN_LSX(vssrlrn_bu_h, vvv)
+INSN_LSX(vssrlrn_hu_w, vvv)
+INSN_LSX(vssrlrn_wu_d, vvv)
+INSN_LSX(vssrarn_bu_h, vvv)
+INSN_LSX(vssrarn_hu_w, vvv)
+INSN_LSX(vssrarn_wu_d, vvv)
+
+INSN_LSX(vssrlrni_b_h, vv_i)
+INSN_LSX(vssrlrni_h_w, vv_i)
+INSN_LSX(vssrlrni_w_d, vv_i)
+INSN_LSX(vssrlrni_d_q, vv_i)
+INSN_LSX(vssrlrni_bu_h, vv_i)
+INSN_LSX(vssrlrni_hu_w, vv_i)
+INSN_LSX(vssrlrni_wu_d, vv_i)
+INSN_LSX(vssrlrni_du_q, vv_i)
+INSN_LSX(vssrarni_b_h, vv_i)
+INSN_LSX(vssrarni_h_w, vv_i)
+INSN_LSX(vssrarni_w_d, vv_i)
+INSN_LSX(vssrarni_d_q, vv_i)
+INSN_LSX(vssrarni_bu_h, vv_i)
+INSN_LSX(vssrarni_hu_w, vv_i)
+INSN_LSX(vssrarni_wu_d, vv_i)
+INSN_LSX(vssrarni_du_q, vv_i)
+
+INSN_LSX(vclo_b, vv)
+INSN_LSX(vclo_h, vv)
+INSN_LSX(vclo_w, vv)
+INSN_LSX(vclo_d, vv)
+INSN_LSX(vclz_b, vv)
+INSN_LSX(vclz_h, vv)
+INSN_LSX(vclz_w, vv)
+INSN_LSX(vclz_d, vv)
+
+INSN_LSX(vpcnt_b, vv)
+INSN_LSX(vpcnt_h, vv)
+INSN_LSX(vpcnt_w, vv)
+INSN_LSX(vpcnt_d, vv)
+
+INSN_LSX(vbitclr_b, vvv)
+INSN_LSX(vbitclr_h, vvv)
+INSN_LSX(vbitclr_w, vvv)
+INSN_LSX(vbitclr_d, vvv)
+INSN_LSX(vbitclri_b, vv_i)
+INSN_LSX(vbitclri_h, vv_i)
+INSN_LSX(vbitclri_w, vv_i)
+INSN_LSX(vbitclri_d, vv_i)
+INSN_LSX(vbitset_b, vvv)
+INSN_LSX(vbitset_h, vvv)
+INSN_LSX(vbitset_w, vvv)
+INSN_LSX(vbitset_d, vvv)
+INSN_LSX(vbitseti_b, vv_i)
+INSN_LSX(vbitseti_h, vv_i)
+INSN_LSX(vbitseti_w, vv_i)
+INSN_LSX(vbitseti_d, vv_i)
+INSN_LSX(vbitrev_b, vvv)
+INSN_LSX(vbitrev_h, vvv)
+INSN_LSX(vbitrev_w, vvv)
+INSN_LSX(vbitrev_d, vvv)
+INSN_LSX(vbitrevi_b, vv_i)
+INSN_LSX(vbitrevi_h, vv_i)
+INSN_LSX(vbitrevi_w, vv_i)
+INSN_LSX(vbitrevi_d, vv_i)
+
+INSN_LSX(vfrstp_b, vvv)
+INSN_LSX(vfrstp_h, vvv)
+INSN_LSX(vfrstpi_b, vv_i)
+INSN_LSX(vfrstpi_h, vv_i)
+
+INSN_LSX(vfadd_s, vvv)
+INSN_LSX(vfadd_d, vvv)
+INSN_LSX(vfsub_s, vvv)
+INSN_LSX(vfsub_d, vvv)
+INSN_LSX(vfmul_s, vvv)
+INSN_LSX(vfmul_d, vvv)
+INSN_LSX(vfdiv_s, vvv)
+INSN_LSX(vfdiv_d, vvv)
+
+INSN_LSX(vfmadd_s, vvvv)
+INSN_LSX(vfmadd_d, vvvv)
+INSN_LSX(vfmsub_s, vvvv)
+INSN_LSX(vfmsub_d, vvvv)
+INSN_LSX(vfnmadd_s, vvvv)
+INSN_LSX(vfnmadd_d, vvvv)
+INSN_LSX(vfnmsub_s, vvvv)
+INSN_LSX(vfnmsub_d, vvvv)
+
+INSN_LSX(vfmax_s, vvv)
+INSN_LSX(vfmax_d, vvv)
+INSN_LSX(vfmin_s, vvv)
+INSN_LSX(vfmin_d, vvv)
+
+INSN_LSX(vfmaxa_s, vvv)
+INSN_LSX(vfmaxa_d, vvv)
+INSN_LSX(vfmina_s, vvv)
+INSN_LSX(vfmina_d, vvv)
+
+INSN_LSX(vflogb_s, vv)
+INSN_LSX(vflogb_d, vv)
+
+INSN_LSX(vfclass_s, vv)
+INSN_LSX(vfclass_d, vv)
+
+INSN_LSX(vfsqrt_s, vv)
+INSN_LSX(vfsqrt_d, vv)
+INSN_LSX(vfrecip_s, vv)
+INSN_LSX(vfrecip_d, vv)
+INSN_LSX(vfrsqrt_s, vv)
+INSN_LSX(vfrsqrt_d, vv)
+
+INSN_LSX(vfcvtl_s_h, vv)
+INSN_LSX(vfcvth_s_h, vv)
+INSN_LSX(vfcvtl_d_s, vv)
+INSN_LSX(vfcvth_d_s, vv)
+INSN_LSX(vfcvt_h_s, vvv)
+INSN_LSX(vfcvt_s_d, vvv)
+
+INSN_LSX(vfrint_s, vv)
+INSN_LSX(vfrint_d, vv)
+INSN_LSX(vfrintrm_s, vv)
+INSN_LSX(vfrintrm_d, vv)
+INSN_LSX(vfrintrp_s, vv)
+INSN_LSX(vfrintrp_d, vv)
+INSN_LSX(vfrintrz_s, vv)
+INSN_LSX(vfrintrz_d, vv)
+INSN_LSX(vfrintrne_s, vv)
+INSN_LSX(vfrintrne_d, vv)
+
+INSN_LSX(vftint_w_s, vv)
+INSN_LSX(vftint_l_d, vv)
+INSN_LSX(vftintrm_w_s, vv)
+INSN_LSX(vftintrm_l_d, vv)
+INSN_LSX(vftintrp_w_s, vv)
+INSN_LSX(vftintrp_l_d, vv)
+INSN_LSX(vftintrz_w_s, vv)
+INSN_LSX(vftintrz_l_d, vv)
+INSN_LSX(vftintrne_w_s, vv)
+INSN_LSX(vftintrne_l_d, vv)
+INSN_LSX(vftint_wu_s, vv)
+INSN_LSX(vftint_lu_d, vv)
+INSN_LSX(vftintrz_wu_s, vv)
+INSN_LSX(vftintrz_lu_d, vv)
+INSN_LSX(vftint_w_d, vvv)
+INSN_LSX(vftintrm_w_d, vvv)
+INSN_LSX(vftintrp_w_d, vvv)
+INSN_LSX(vftintrz_w_d, vvv)
+INSN_LSX(vftintrne_w_d, vvv)
+INSN_LSX(vftintl_l_s, vv)
+INSN_LSX(vftinth_l_s, vv)
+INSN_LSX(vftintrml_l_s, vv)
+INSN_LSX(vftintrmh_l_s, vv)
+INSN_LSX(vftintrpl_l_s, vv)
+INSN_LSX(vftintrph_l_s, vv)
+INSN_LSX(vftintrzl_l_s, vv)
+INSN_LSX(vftintrzh_l_s, vv)
+INSN_LSX(vftintrnel_l_s, vv)
+INSN_LSX(vftintrneh_l_s, vv)
+
+INSN_LSX(vffint_s_w, vv)
+INSN_LSX(vffint_s_wu, vv)
+INSN_LSX(vffint_d_l, vv)
+INSN_LSX(vffint_d_lu, vv)
+INSN_LSX(vffintl_d_w, vv)
+INSN_LSX(vffinth_d_w, vv)
+INSN_LSX(vffint_s_l, vvv)
+
+INSN_LSX(vseq_b, vvv)
+INSN_LSX(vseq_h, vvv)
+INSN_LSX(vseq_w, vvv)
+INSN_LSX(vseq_d, vvv)
+INSN_LSX(vseqi_b, vv_i)
+INSN_LSX(vseqi_h, vv_i)
+INSN_LSX(vseqi_w, vv_i)
+INSN_LSX(vseqi_d, vv_i)
+
+INSN_LSX(vsle_b, vvv)
+INSN_LSX(vsle_h, vvv)
+INSN_LSX(vsle_w, vvv)
+INSN_LSX(vsle_d, vvv)
+INSN_LSX(vslei_b, vv_i)
+INSN_LSX(vslei_h, vv_i)
+INSN_LSX(vslei_w, vv_i)
+INSN_LSX(vslei_d, vv_i)
+INSN_LSX(vsle_bu, vvv)
+INSN_LSX(vsle_hu, vvv)
+INSN_LSX(vsle_wu, vvv)
+INSN_LSX(vsle_du, vvv)
+INSN_LSX(vslei_bu, vv_i)
+INSN_LSX(vslei_hu, vv_i)
+INSN_LSX(vslei_wu, vv_i)
+INSN_LSX(vslei_du, vv_i)
+
+INSN_LSX(vslt_b, vvv)
+INSN_LSX(vslt_h, vvv)
+INSN_LSX(vslt_w, vvv)
+INSN_LSX(vslt_d, vvv)
+INSN_LSX(vslti_b, vv_i)
+INSN_LSX(vslti_h, vv_i)
+INSN_LSX(vslti_w, vv_i)
+INSN_LSX(vslti_d, vv_i)
+INSN_LSX(vslt_bu, vvv)
+INSN_LSX(vslt_hu, vvv)
+INSN_LSX(vslt_wu, vvv)
+INSN_LSX(vslt_du, vvv)
+INSN_LSX(vslti_bu, vv_i)
+INSN_LSX(vslti_hu, vv_i)
+INSN_LSX(vslti_wu, vv_i)
+INSN_LSX(vslti_du, vv_i)
+
+#define output_vfcmp(C, PREFIX, SUFFIX) \
+{ \
+ (C)->info->fprintf_func((C)->info->stream, "%08x %s%s\t%d, f%d, f%d", \
+ (C)->insn, PREFIX, SUFFIX, a->vd, \
+ a->vj, a->vk); \
+}
+
+static bool output_vvv_fcond(DisasContext *ctx, arg_vvv_fcond * a,
+ const char *suffix)
+{
+ bool ret = true;
+ switch (a->fcond) {
+ case 0x0:
+ output_vfcmp(ctx, "vfcmp_caf_", suffix);
+ break;
+ case 0x1:
+ output_vfcmp(ctx, "vfcmp_saf_", suffix);
+ break;
+ case 0x2:
+ output_vfcmp(ctx, "vfcmp_clt_", suffix);
+ break;
+ case 0x3:
+ output_vfcmp(ctx, "vfcmp_slt_", suffix);
+ break;
+ case 0x4:
+ output_vfcmp(ctx, "vfcmp_ceq_", suffix);
+ break;
+ case 0x5:
+ output_vfcmp(ctx, "vfcmp_seq_", suffix);
+ break;
+ case 0x6:
+ output_vfcmp(ctx, "vfcmp_cle_", suffix);
+ break;
+ case 0x7:
+ output_vfcmp(ctx, "vfcmp_sle_", suffix);
+ break;
+ case 0x8:
+ output_vfcmp(ctx, "vfcmp_cun_", suffix);
+ break;
+ case 0x9:
+ output_vfcmp(ctx, "vfcmp_sun_", suffix);
+ break;
+ case 0xA:
+ output_vfcmp(ctx, "vfcmp_cult_", suffix);
+ break;
+ case 0xB:
+ output_vfcmp(ctx, "vfcmp_sult_", suffix);
+ break;
+ case 0xC:
+ output_vfcmp(ctx, "vfcmp_cueq_", suffix);
+ break;
+ case 0xD:
+ output_vfcmp(ctx, "vfcmp_sueq_", suffix);
+ break;
+ case 0xE:
+ output_vfcmp(ctx, "vfcmp_cule_", suffix);
+ break;
+ case 0xF:
+ output_vfcmp(ctx, "vfcmp_sule_", suffix);
+ break;
+ case 0x10:
+ output_vfcmp(ctx, "vfcmp_cne_", suffix);
+ break;
+ case 0x11:
+ output_vfcmp(ctx, "vfcmp_sne_", suffix);
+ break;
+ case 0x14:
+ output_vfcmp(ctx, "vfcmp_cor_", suffix);
+ break;
+ case 0x15:
+ output_vfcmp(ctx, "vfcmp_sor_", suffix);
+ break;
+ case 0x18:
+ output_vfcmp(ctx, "vfcmp_cune_", suffix);
+ break;
+ case 0x19:
+ output_vfcmp(ctx, "vfcmp_sune_", suffix);
+ break;
+ default:
+ ret = false;
+ }
+ return ret;
+}
+
+#define LSX_FCMP_INSN(suffix) \
+static bool trans_vfcmp_cond_##suffix(DisasContext *ctx, \
+ arg_vvv_fcond * a) \
+{ \
+ return output_vvv_fcond(ctx, a, #suffix); \
+}
+
+LSX_FCMP_INSN(s)
+LSX_FCMP_INSN(d)
+
+INSN_LSX(vbitsel_v, vvvv)
+INSN_LSX(vbitseli_b, vv_i)
+
+INSN_LSX(vseteqz_v, cv)
+INSN_LSX(vsetnez_v, cv)
+INSN_LSX(vsetanyeqz_b, cv)
+INSN_LSX(vsetanyeqz_h, cv)
+INSN_LSX(vsetanyeqz_w, cv)
+INSN_LSX(vsetanyeqz_d, cv)
+INSN_LSX(vsetallnez_b, cv)
+INSN_LSX(vsetallnez_h, cv)
+INSN_LSX(vsetallnez_w, cv)
+INSN_LSX(vsetallnez_d, cv)
+
+INSN_LSX(vinsgr2vr_b, vr_i)
+INSN_LSX(vinsgr2vr_h, vr_i)
+INSN_LSX(vinsgr2vr_w, vr_i)
+INSN_LSX(vinsgr2vr_d, vr_i)
+INSN_LSX(vpickve2gr_b, rv_i)
+INSN_LSX(vpickve2gr_h, rv_i)
+INSN_LSX(vpickve2gr_w, rv_i)
+INSN_LSX(vpickve2gr_d, rv_i)
+INSN_LSX(vpickve2gr_bu, rv_i)
+INSN_LSX(vpickve2gr_hu, rv_i)
+INSN_LSX(vpickve2gr_wu, rv_i)
+INSN_LSX(vpickve2gr_du, rv_i)
+
+INSN_LSX(vreplgr2vr_b, vr)
+INSN_LSX(vreplgr2vr_h, vr)
+INSN_LSX(vreplgr2vr_w, vr)
+INSN_LSX(vreplgr2vr_d, vr)
+
+INSN_LSX(vreplve_b, vvr)
+INSN_LSX(vreplve_h, vvr)
+INSN_LSX(vreplve_w, vvr)
+INSN_LSX(vreplve_d, vvr)
+INSN_LSX(vreplvei_b, vv_i)
+INSN_LSX(vreplvei_h, vv_i)
+INSN_LSX(vreplvei_w, vv_i)
+INSN_LSX(vreplvei_d, vv_i)
+
+INSN_LSX(vbsll_v, vv_i)
+INSN_LSX(vbsrl_v, vv_i)
+
+INSN_LSX(vpackev_b, vvv)
+INSN_LSX(vpackev_h, vvv)
+INSN_LSX(vpackev_w, vvv)
+INSN_LSX(vpackev_d, vvv)
+INSN_LSX(vpackod_b, vvv)
+INSN_LSX(vpackod_h, vvv)
+INSN_LSX(vpackod_w, vvv)
+INSN_LSX(vpackod_d, vvv)
+
+INSN_LSX(vpickev_b, vvv)
+INSN_LSX(vpickev_h, vvv)
+INSN_LSX(vpickev_w, vvv)
+INSN_LSX(vpickev_d, vvv)
+INSN_LSX(vpickod_b, vvv)
+INSN_LSX(vpickod_h, vvv)
+INSN_LSX(vpickod_w, vvv)
+INSN_LSX(vpickod_d, vvv)
+
+INSN_LSX(vilvl_b, vvv)
+INSN_LSX(vilvl_h, vvv)
+INSN_LSX(vilvl_w, vvv)
+INSN_LSX(vilvl_d, vvv)
+INSN_LSX(vilvh_b, vvv)
+INSN_LSX(vilvh_h, vvv)
+INSN_LSX(vilvh_w, vvv)
+INSN_LSX(vilvh_d, vvv)
+
+INSN_LSX(vshuf_b, vvvv)
+INSN_LSX(vshuf_h, vvv)
+INSN_LSX(vshuf_w, vvv)
+INSN_LSX(vshuf_d, vvv)
+INSN_LSX(vshuf4i_b, vv_i)
+INSN_LSX(vshuf4i_h, vv_i)
+INSN_LSX(vshuf4i_w, vv_i)
+INSN_LSX(vshuf4i_d, vv_i)
+
+INSN_LSX(vpermi_w, vv_i)
+
+INSN_LSX(vextrins_d, vv_i)
+INSN_LSX(vextrins_w, vv_i)
+INSN_LSX(vextrins_h, vv_i)
+INSN_LSX(vextrins_b, vv_i)
+
+INSN_LSX(vld, vr_i)
+INSN_LSX(vst, vr_i)
+INSN_LSX(vldx, vrr)
+INSN_LSX(vstx, vrr)
+
+INSN_LSX(vldrepl_d, vr_i)
+INSN_LSX(vldrepl_w, vr_i)
+INSN_LSX(vldrepl_h, vr_i)
+INSN_LSX(vldrepl_b, vr_i)
+INSN_LSX(vstelm_d, vr_ii)
+INSN_LSX(vstelm_w, vr_ii)
+INSN_LSX(vstelm_h, vr_ii)
+INSN_LSX(vstelm_b, vr_ii)
diff --git a/target/loongarch/fpu_helper.c b/target/loongarch/fpu_helper.c
index 4b96372..f6753c5 100644
--- a/target/loongarch/fpu_helper.c
+++ b/target/loongarch/fpu_helper.c
@@ -33,7 +33,7 @@ void restore_fp_status(CPULoongArchState *env)
set_flush_to_zero(0, &env->fp_status);
}
-static int ieee_ex_to_loongarch(int xcpt)
+int ieee_ex_to_loongarch(int xcpt)
{
int ret = 0;
if (xcpt & float_flag_invalid) {
diff --git a/target/loongarch/gdbstub.c b/target/loongarch/gdbstub.c
index fa3e034..0752fff 100644
--- a/target/loongarch/gdbstub.c
+++ b/target/loongarch/gdbstub.c
@@ -69,7 +69,7 @@ static int loongarch_gdb_get_fpu(CPULoongArchState *env,
GByteArray *mem_buf, int n)
{
if (0 <= n && n < 32) {
- return gdb_get_reg64(mem_buf, env->fpr[n]);
+ return gdb_get_reg64(mem_buf, env->fpr[n].vreg.D(0));
} else if (n == 32) {
uint64_t val = read_fcc(env);
return gdb_get_reg64(mem_buf, val);
@@ -85,7 +85,7 @@ static int loongarch_gdb_set_fpu(CPULoongArchState *env,
int length = 0;
if (0 <= n && n < 32) {
- env->fpr[n] = ldq_p(mem_buf);
+ env->fpr[n].vreg.D(0) = ldq_p(mem_buf);
length = 8;
} else if (n == 32) {
uint64_t val = ldq_p(mem_buf);
diff --git a/target/loongarch/helper.h b/target/loongarch/helper.h
index 9c01823..b9de77d 100644
--- a/target/loongarch/helper.h
+++ b/target/loongarch/helper.h
@@ -130,3 +130,569 @@ DEF_HELPER_4(ldpte, void, env, tl, tl, i32)
DEF_HELPER_1(ertn, void, env)
DEF_HELPER_1(idle, void, env)
#endif
+
+/* LoongArch LSX */
+DEF_HELPER_4(vhaddw_h_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_w_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_d_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_q_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_hu_bu, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_wu_hu, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_du_wu, void, env, i32, i32, i32)
+DEF_HELPER_4(vhaddw_qu_du, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_h_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_w_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_d_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_q_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_hu_bu, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_wu_hu, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_du_wu, void, env, i32, i32, i32)
+DEF_HELPER_4(vhsubw_qu_du, void, env, i32, i32, i32)
+
+DEF_HELPER_FLAGS_4(vaddwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vsubwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwev_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_q_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vaddwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vsubwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwev_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsubwod_q_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vaddwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwev_q_du_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vaddwod_q_du_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vavg_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavg_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vavgr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vavgr_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vabsd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vabsd_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vadda_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vadda_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vadda_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vadda_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmini_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmini_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vmaxi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vmaxi_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vmuh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmuh_du, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmulwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmulwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmulwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmulwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmadd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmadd_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmsub_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmsub_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmaddwev_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwev_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwev_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_h_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_w_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_d_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmaddwev_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwev_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwev_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_h_bu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_w_hu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_d_wu, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(vmaddwev_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwev_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwev_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_h_bu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_w_hu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vmaddwod_d_wu_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_4(vdiv_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vdiv_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vdiv_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vdiv_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vdiv_bu, void, env, i32, i32, i32)
+DEF_HELPER_4(vdiv_hu, void, env, i32, i32, i32)
+DEF_HELPER_4(vdiv_wu, void, env, i32, i32, i32)
+DEF_HELPER_4(vdiv_du, void, env, i32, i32, i32)
+DEF_HELPER_4(vmod_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vmod_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vmod_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vmod_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vmod_bu, void, env, i32, i32, i32)
+DEF_HELPER_4(vmod_hu, void, env, i32, i32, i32)
+DEF_HELPER_4(vmod_wu, void, env, i32, i32, i32)
+DEF_HELPER_4(vmod_du, void, env, i32, i32, i32)
+
+DEF_HELPER_FLAGS_4(vsat_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vsat_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_3(vexth_h_b, void, env, i32, i32)
+DEF_HELPER_3(vexth_w_h, void, env, i32, i32)
+DEF_HELPER_3(vexth_d_w, void, env, i32, i32)
+DEF_HELPER_3(vexth_q_d, void, env, i32, i32)
+DEF_HELPER_3(vexth_hu_bu, void, env, i32, i32)
+DEF_HELPER_3(vexth_wu_hu, void, env, i32, i32)
+DEF_HELPER_3(vexth_du_wu, void, env, i32, i32)
+DEF_HELPER_3(vexth_qu_du, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_4(vsigncov_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsigncov_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsigncov_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vsigncov_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_3(vmskltz_b, void, env, i32, i32)
+DEF_HELPER_3(vmskltz_h, void, env, i32, i32)
+DEF_HELPER_3(vmskltz_w, void, env, i32, i32)
+DEF_HELPER_3(vmskltz_d, void, env, i32, i32)
+DEF_HELPER_3(vmskgez_b, void, env, i32, i32)
+DEF_HELPER_3(vmsknz_b, void, env, i32,i32)
+
+DEF_HELPER_FLAGS_4(vnori_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_4(vsllwil_h_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vsllwil_w_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsllwil_d_w, void, env, i32, i32, i32)
+DEF_HELPER_3(vextl_q_d, void, env, i32, i32)
+DEF_HELPER_4(vsllwil_hu_bu, void, env, i32, i32, i32)
+DEF_HELPER_4(vsllwil_wu_hu, void, env, i32, i32, i32)
+DEF_HELPER_4(vsllwil_du_wu, void, env, i32, i32, i32)
+DEF_HELPER_3(vextl_qu_du, void, env, i32, i32)
+
+DEF_HELPER_4(vsrlr_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlr_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlr_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlr_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlri_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlri_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlri_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlri_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vsrar_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrar_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrar_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrar_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrari_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrari_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrari_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrari_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vsrln_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrln_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrln_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vsran_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsran_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsran_w_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vsrlni_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlni_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlni_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlni_d_q, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrani_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrani_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrani_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrani_d_q, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vsrlrn_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlrn_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlrn_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrarn_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrarn_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrarn_w_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vsrlrni_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlrni_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlrni_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrlrni_d_q, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrarni_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrarni_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrarni_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vsrarni_d_q, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vssrln_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrln_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrln_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssran_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssran_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssran_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrln_bu_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrln_hu_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrln_wu_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssran_bu_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssran_hu_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssran_wu_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vssrlni_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlni_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlni_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlni_d_q, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrani_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrani_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrani_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrani_d_q, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlni_bu_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlni_hu_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlni_wu_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlni_du_q, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrani_bu_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrani_hu_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrani_wu_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrani_du_q, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vssrlrn_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlrn_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlrn_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarn_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarn_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarn_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlrn_bu_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlrn_hu_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlrn_wu_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarn_bu_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarn_hu_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarn_wu_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vssrlrni_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlrni_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlrni_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlrni_d_q, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarni_b_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarni_h_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarni_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarni_d_q, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlrni_bu_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlrni_hu_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlrni_wu_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrlrni_du_q, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarni_bu_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarni_hu_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarni_wu_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vssrarni_du_q, void, env, i32, i32, i32)
+
+DEF_HELPER_3(vclo_b, void, env, i32, i32)
+DEF_HELPER_3(vclo_h, void, env, i32, i32)
+DEF_HELPER_3(vclo_w, void, env, i32, i32)
+DEF_HELPER_3(vclo_d, void, env, i32, i32)
+DEF_HELPER_3(vclz_b, void, env, i32, i32)
+DEF_HELPER_3(vclz_h, void, env, i32, i32)
+DEF_HELPER_3(vclz_w, void, env, i32, i32)
+DEF_HELPER_3(vclz_d, void, env, i32, i32)
+
+DEF_HELPER_3(vpcnt_b, void, env, i32, i32)
+DEF_HELPER_3(vpcnt_h, void, env, i32, i32)
+DEF_HELPER_3(vpcnt_w, void, env, i32, i32)
+DEF_HELPER_3(vpcnt_d, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_4(vbitclr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitclr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitclr_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitclr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitclri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitclri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitclri_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitclri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vbitset_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitset_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitset_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitset_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitseti_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitseti_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitseti_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitseti_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vbitrev_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitrev_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitrev_w, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitrev_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(vbitrevi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitrevi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitrevi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vbitrevi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_4(vfrstp_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vfrstp_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vfrstpi_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vfrstpi_h, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vfadd_s, void, env, i32, i32, i32)
+DEF_HELPER_4(vfadd_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vfsub_s, void, env, i32, i32, i32)
+DEF_HELPER_4(vfsub_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vfmul_s, void, env, i32, i32, i32)
+DEF_HELPER_4(vfmul_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vfdiv_s, void, env, i32, i32, i32)
+DEF_HELPER_4(vfdiv_d, void, env, i32, i32, i32)
+
+DEF_HELPER_5(vfmadd_s, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vfmadd_d, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vfmsub_s, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vfmsub_d, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vfnmadd_s, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vfnmadd_d, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vfnmsub_s, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vfnmsub_d, void, env, i32, i32, i32, i32)
+
+DEF_HELPER_4(vfmax_s, void, env, i32, i32, i32)
+DEF_HELPER_4(vfmax_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vfmin_s, void, env, i32, i32, i32)
+DEF_HELPER_4(vfmin_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vfmaxa_s, void, env, i32, i32, i32)
+DEF_HELPER_4(vfmaxa_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vfmina_s, void, env, i32, i32, i32)
+DEF_HELPER_4(vfmina_d, void, env, i32, i32, i32)
+
+DEF_HELPER_3(vflogb_s, void, env, i32, i32)
+DEF_HELPER_3(vflogb_d, void, env, i32, i32)
+
+DEF_HELPER_3(vfclass_s, void, env, i32, i32)
+DEF_HELPER_3(vfclass_d, void, env, i32, i32)
+
+DEF_HELPER_3(vfsqrt_s, void, env, i32, i32)
+DEF_HELPER_3(vfsqrt_d, void, env, i32, i32)
+DEF_HELPER_3(vfrecip_s, void, env, i32, i32)
+DEF_HELPER_3(vfrecip_d, void, env, i32, i32)
+DEF_HELPER_3(vfrsqrt_s, void, env, i32, i32)
+DEF_HELPER_3(vfrsqrt_d, void, env, i32, i32)
+
+DEF_HELPER_3(vfcvtl_s_h, void, env, i32, i32)
+DEF_HELPER_3(vfcvth_s_h, void, env, i32, i32)
+DEF_HELPER_3(vfcvtl_d_s, void, env, i32, i32)
+DEF_HELPER_3(vfcvth_d_s, void, env, i32, i32)
+DEF_HELPER_4(vfcvt_h_s, void, env, i32, i32, i32)
+DEF_HELPER_4(vfcvt_s_d, void, env, i32, i32, i32)
+
+DEF_HELPER_3(vfrintrne_s, void, env, i32, i32)
+DEF_HELPER_3(vfrintrne_d, void, env, i32, i32)
+DEF_HELPER_3(vfrintrz_s, void, env, i32, i32)
+DEF_HELPER_3(vfrintrz_d, void, env, i32, i32)
+DEF_HELPER_3(vfrintrp_s, void, env, i32, i32)
+DEF_HELPER_3(vfrintrp_d, void, env, i32, i32)
+DEF_HELPER_3(vfrintrm_s, void, env, i32, i32)
+DEF_HELPER_3(vfrintrm_d, void, env, i32, i32)
+DEF_HELPER_3(vfrint_s, void, env, i32, i32)
+DEF_HELPER_3(vfrint_d, void, env, i32, i32)
+
+DEF_HELPER_3(vftintrne_w_s, void, env, i32, i32)
+DEF_HELPER_3(vftintrne_l_d, void, env, i32, i32)
+DEF_HELPER_3(vftintrz_w_s, void, env, i32, i32)
+DEF_HELPER_3(vftintrz_l_d, void, env, i32, i32)
+DEF_HELPER_3(vftintrp_w_s, void, env, i32, i32)
+DEF_HELPER_3(vftintrp_l_d, void, env, i32, i32)
+DEF_HELPER_3(vftintrm_w_s, void, env, i32, i32)
+DEF_HELPER_3(vftintrm_l_d, void, env, i32, i32)
+DEF_HELPER_3(vftint_w_s, void, env, i32, i32)
+DEF_HELPER_3(vftint_l_d, void, env, i32, i32)
+DEF_HELPER_3(vftintrz_wu_s, void, env, i32, i32)
+DEF_HELPER_3(vftintrz_lu_d, void, env, i32, i32)
+DEF_HELPER_3(vftint_wu_s, void, env, i32, i32)
+DEF_HELPER_3(vftint_lu_d, void, env, i32, i32)
+DEF_HELPER_4(vftintrne_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vftintrz_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vftintrp_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vftintrm_w_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vftint_w_d, void, env, i32, i32, i32)
+DEF_HELPER_3(vftintrnel_l_s, void, env, i32, i32)
+DEF_HELPER_3(vftintrneh_l_s, void, env, i32, i32)
+DEF_HELPER_3(vftintrzl_l_s, void, env, i32, i32)
+DEF_HELPER_3(vftintrzh_l_s, void, env, i32, i32)
+DEF_HELPER_3(vftintrpl_l_s, void, env, i32, i32)
+DEF_HELPER_3(vftintrph_l_s, void, env, i32, i32)
+DEF_HELPER_3(vftintrml_l_s, void, env, i32, i32)
+DEF_HELPER_3(vftintrmh_l_s, void, env, i32, i32)
+DEF_HELPER_3(vftintl_l_s, void, env, i32, i32)
+DEF_HELPER_3(vftinth_l_s, void, env, i32, i32)
+
+DEF_HELPER_3(vffint_s_w, void, env, i32, i32)
+DEF_HELPER_3(vffint_d_l, void, env, i32, i32)
+DEF_HELPER_3(vffint_s_wu, void, env, i32, i32)
+DEF_HELPER_3(vffint_d_lu, void, env, i32, i32)
+DEF_HELPER_3(vffintl_d_w, void, env, i32, i32)
+DEF_HELPER_3(vffinth_d_w, void, env, i32, i32)
+DEF_HELPER_4(vffint_s_l, void, env, i32, i32, i32)
+
+DEF_HELPER_FLAGS_4(vseqi_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vseqi_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vseqi_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vseqi_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vslei_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslei_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(vslti_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_w, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_d, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_bu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_hu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_wu, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(vslti_du, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_5(vfcmp_c_s, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vfcmp_s_s, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vfcmp_c_d, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(vfcmp_s_d, void, env, i32, i32, i32, i32)
+
+DEF_HELPER_FLAGS_4(vbitseli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_3(vsetanyeqz_b, void, env, i32, i32)
+DEF_HELPER_3(vsetanyeqz_h, void, env, i32, i32)
+DEF_HELPER_3(vsetanyeqz_w, void, env, i32, i32)
+DEF_HELPER_3(vsetanyeqz_d, void, env, i32, i32)
+DEF_HELPER_3(vsetallnez_b, void, env, i32, i32)
+DEF_HELPER_3(vsetallnez_h, void, env, i32, i32)
+DEF_HELPER_3(vsetallnez_w, void, env, i32, i32)
+DEF_HELPER_3(vsetallnez_d, void, env, i32, i32)
+
+DEF_HELPER_4(vpackev_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vpackev_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vpackev_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vpackev_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vpackod_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vpackod_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vpackod_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vpackod_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vpickev_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vpickev_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vpickev_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vpickev_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vpickod_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vpickod_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vpickod_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vpickod_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vilvl_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vilvl_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vilvl_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vilvl_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vilvh_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vilvh_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vilvh_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vilvh_d, void, env, i32, i32, i32)
+
+DEF_HELPER_5(vshuf_b, void, env, i32, i32, i32, i32)
+DEF_HELPER_4(vshuf_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vshuf_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vshuf_d, void, env, i32, i32, i32)
+DEF_HELPER_4(vshuf4i_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vshuf4i_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vshuf4i_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vshuf4i_d, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vpermi_w, void, env, i32, i32, i32)
+
+DEF_HELPER_4(vextrins_b, void, env, i32, i32, i32)
+DEF_HELPER_4(vextrins_h, void, env, i32, i32, i32)
+DEF_HELPER_4(vextrins_w, void, env, i32, i32, i32)
+DEF_HELPER_4(vextrins_d, void, env, i32, i32, i32)
diff --git a/target/loongarch/insn_trans/trans_farith.c.inc b/target/loongarch/insn_trans/trans_farith.c.inc
index 7081fbb..21ea473 100644
--- a/target/loongarch/insn_trans/trans_farith.c.inc
+++ b/target/loongarch/insn_trans/trans_farith.c.inc
@@ -17,18 +17,29 @@
static bool gen_fff(DisasContext *ctx, arg_fff *a,
void (*func)(TCGv, TCGv_env, TCGv, TCGv))
{
+ TCGv dest = get_fpr(ctx, a->fd);
+ TCGv src1 = get_fpr(ctx, a->fj);
+ TCGv src2 = get_fpr(ctx, a->fk);
+
CHECK_FPE;
- func(cpu_fpr[a->fd], cpu_env, cpu_fpr[a->fj], cpu_fpr[a->fk]);
+ func(dest, cpu_env, src1, src2);
+ set_fpr(a->fd, dest);
+
return true;
}
static bool gen_ff(DisasContext *ctx, arg_ff *a,
void (*func)(TCGv, TCGv_env, TCGv))
{
+ TCGv dest = get_fpr(ctx, a->fd);
+ TCGv src = get_fpr(ctx, a->fj);
+
CHECK_FPE;
- func(cpu_fpr[a->fd], cpu_env, cpu_fpr[a->fj]);
+ func(dest, cpu_env, src);
+ set_fpr(a->fd, dest);
+
return true;
}
@@ -37,61 +48,98 @@ static bool gen_muladd(DisasContext *ctx, arg_ffff *a,
int flag)
{
TCGv_i32 tflag = tcg_constant_i32(flag);
+ TCGv dest = get_fpr(ctx, a->fd);
+ TCGv src1 = get_fpr(ctx, a->fj);
+ TCGv src2 = get_fpr(ctx, a->fk);
+ TCGv src3 = get_fpr(ctx, a->fa);
CHECK_FPE;
- func(cpu_fpr[a->fd], cpu_env, cpu_fpr[a->fj],
- cpu_fpr[a->fk], cpu_fpr[a->fa], tflag);
+ func(dest, cpu_env, src1, src2, src3, tflag);
+ set_fpr(a->fd, dest);
+
return true;
}
static bool trans_fcopysign_s(DisasContext *ctx, arg_fcopysign_s *a)
{
+ TCGv dest = get_fpr(ctx, a->fd);
+ TCGv src1 = get_fpr(ctx, a->fk);
+ TCGv src2 = get_fpr(ctx, a->fj);
+
CHECK_FPE;
- tcg_gen_deposit_i64(cpu_fpr[a->fd], cpu_fpr[a->fk], cpu_fpr[a->fj], 0, 31);
+ tcg_gen_deposit_i64(dest, src1, src2, 0, 31);
+ set_fpr(a->fd, dest);
+
return true;
}
static bool trans_fcopysign_d(DisasContext *ctx, arg_fcopysign_d *a)
{
+ TCGv dest = get_fpr(ctx, a->fd);
+ TCGv src1 = get_fpr(ctx, a->fk);
+ TCGv src2 = get_fpr(ctx, a->fj);
+
CHECK_FPE;
- tcg_gen_deposit_i64(cpu_fpr[a->fd], cpu_fpr[a->fk], cpu_fpr[a->fj], 0, 63);
+ tcg_gen_deposit_i64(dest, src1, src2, 0, 63);
+ set_fpr(a->fd, dest);
+
return true;
}
static bool trans_fabs_s(DisasContext *ctx, arg_fabs_s *a)
{
+ TCGv dest = get_fpr(ctx, a->fd);
+ TCGv src = get_fpr(ctx, a->fj);
+
CHECK_FPE;
- tcg_gen_andi_i64(cpu_fpr[a->fd], cpu_fpr[a->fj], MAKE_64BIT_MASK(0, 31));
- gen_nanbox_s(cpu_fpr[a->fd], cpu_fpr[a->fd]);
+ tcg_gen_andi_i64(dest, src, MAKE_64BIT_MASK(0, 31));
+ gen_nanbox_s(dest, dest);
+ set_fpr(a->fd, dest);
+
return true;
}
static bool trans_fabs_d(DisasContext *ctx, arg_fabs_d *a)
{
+ TCGv dest = get_fpr(ctx, a->fd);
+ TCGv src = get_fpr(ctx, a->fj);
+
CHECK_FPE;
- tcg_gen_andi_i64(cpu_fpr[a->fd], cpu_fpr[a->fj], MAKE_64BIT_MASK(0, 63));
+ tcg_gen_andi_i64(dest, src, MAKE_64BIT_MASK(0, 63));
+ set_fpr(a->fd, dest);
+
return true;
}
static bool trans_fneg_s(DisasContext *ctx, arg_fneg_s *a)
{
+ TCGv dest = get_fpr(ctx, a->fd);
+ TCGv src = get_fpr(ctx, a->fj);
+
CHECK_FPE;
- tcg_gen_xori_i64(cpu_fpr[a->fd], cpu_fpr[a->fj], 0x80000000);
- gen_nanbox_s(cpu_fpr[a->fd], cpu_fpr[a->fd]);
+ tcg_gen_xori_i64(dest, src, 0x80000000);
+ gen_nanbox_s(dest, dest);
+ set_fpr(a->fd, dest);
+
return true;
}
static bool trans_fneg_d(DisasContext *ctx, arg_fneg_d *a)
{
+ TCGv dest = get_fpr(ctx, a->fd);
+ TCGv src = get_fpr(ctx, a->fj);
+
CHECK_FPE;
- tcg_gen_xori_i64(cpu_fpr[a->fd], cpu_fpr[a->fj], 0x8000000000000000LL);
+ tcg_gen_xori_i64(dest, src, 0x8000000000000000LL);
+ set_fpr(a->fd, dest);
+
return true;
}
diff --git a/target/loongarch/insn_trans/trans_fcmp.c.inc b/target/loongarch/insn_trans/trans_fcmp.c.inc
index 3b0da2b..a78868d 100644
--- a/target/loongarch/insn_trans/trans_fcmp.c.inc
+++ b/target/loongarch/insn_trans/trans_fcmp.c.inc
@@ -25,17 +25,19 @@ static uint32_t get_fcmp_flags(int cond)
static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a)
{
- TCGv var;
+ TCGv var, src1, src2;
uint32_t flags;
void (*fn)(TCGv, TCGv_env, TCGv, TCGv, TCGv_i32);
CHECK_FPE;
var = tcg_temp_new();
+ src1 = get_fpr(ctx, a->fj);
+ src2 = get_fpr(ctx, a->fk);
fn = (a->fcond & 1 ? gen_helper_fcmp_s_s : gen_helper_fcmp_c_s);
flags = get_fcmp_flags(a->fcond >> 1);
- fn(var, cpu_env, cpu_fpr[a->fj], cpu_fpr[a->fk], tcg_constant_i32(flags));
+ fn(var, cpu_env, src1, src2, tcg_constant_i32(flags));
tcg_gen_st8_tl(var, cpu_env, offsetof(CPULoongArchState, cf[a->cd]));
return true;
@@ -43,17 +45,19 @@ static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a)
static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a)
{
- TCGv var;
+ TCGv var, src1, src2;
uint32_t flags;
void (*fn)(TCGv, TCGv_env, TCGv, TCGv, TCGv_i32);
CHECK_FPE;
var = tcg_temp_new();
+ src1 = get_fpr(ctx, a->fj);
+ src2 = get_fpr(ctx, a->fk);
fn = (a->fcond & 1 ? gen_helper_fcmp_s_d : gen_helper_fcmp_c_d);
flags = get_fcmp_flags(a->fcond >> 1);
- fn(var, cpu_env, cpu_fpr[a->fj], cpu_fpr[a->fk], tcg_constant_i32(flags));
+ fn(var, cpu_env, src1, src2, tcg_constant_i32(flags));
tcg_gen_st8_tl(var, cpu_env, offsetof(CPULoongArchState, cf[a->cd]));
return true;
diff --git a/target/loongarch/insn_trans/trans_fmemory.c.inc b/target/loongarch/insn_trans/trans_fmemory.c.inc
index 0d11843..91c09fb 100644
--- a/target/loongarch/insn_trans/trans_fmemory.c.inc
+++ b/target/loongarch/insn_trans/trans_fmemory.c.inc
@@ -13,6 +13,7 @@ static void maybe_nanbox_load(TCGv freg, MemOp mop)
static bool gen_fload_i(DisasContext *ctx, arg_fr_i *a, MemOp mop)
{
TCGv addr = gpr_src(ctx, a->rj, EXT_NONE);
+ TCGv dest = get_fpr(ctx, a->fd);
CHECK_FPE;
@@ -22,8 +23,9 @@ static bool gen_fload_i(DisasContext *ctx, arg_fr_i *a, MemOp mop)
addr = temp;
}
- tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop);
- maybe_nanbox_load(cpu_fpr[a->fd], mop);
+ tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop);
+ maybe_nanbox_load(dest, mop);
+ set_fpr(a->fd, dest);
return true;
}
@@ -31,6 +33,7 @@ static bool gen_fload_i(DisasContext *ctx, arg_fr_i *a, MemOp mop)
static bool gen_fstore_i(DisasContext *ctx, arg_fr_i *a, MemOp mop)
{
TCGv addr = gpr_src(ctx, a->rj, EXT_NONE);
+ TCGv src = get_fpr(ctx, a->fd);
CHECK_FPE;
@@ -40,7 +43,8 @@ static bool gen_fstore_i(DisasContext *ctx, arg_fr_i *a, MemOp mop)
addr = temp;
}
- tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop);
+ tcg_gen_qemu_st_tl(src, addr, ctx->mem_idx, mop);
+
return true;
}
@@ -48,14 +52,16 @@ static bool gen_floadx(DisasContext *ctx, arg_frr *a, MemOp mop)
{
TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
+ TCGv dest = get_fpr(ctx, a->fd);
TCGv addr;
CHECK_FPE;
addr = tcg_temp_new();
tcg_gen_add_tl(addr, src1, src2);
- tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop);
- maybe_nanbox_load(cpu_fpr[a->fd], mop);
+ tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop);
+ maybe_nanbox_load(dest, mop);
+ set_fpr(a->fd, dest);
return true;
}
@@ -64,13 +70,14 @@ static bool gen_fstorex(DisasContext *ctx, arg_frr *a, MemOp mop)
{
TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
+ TCGv src3 = get_fpr(ctx, a->fd);
TCGv addr;
CHECK_FPE;
addr = tcg_temp_new();
tcg_gen_add_tl(addr, src1, src2);
- tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop);
+ tcg_gen_qemu_st_tl(src3, addr, ctx->mem_idx, mop);
return true;
}
@@ -79,6 +86,7 @@ static bool gen_fload_gt(DisasContext *ctx, arg_frr *a, MemOp mop)
{
TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
+ TCGv dest = get_fpr(ctx, a->fd);
TCGv addr;
CHECK_FPE;
@@ -86,8 +94,9 @@ static bool gen_fload_gt(DisasContext *ctx, arg_frr *a, MemOp mop)
addr = tcg_temp_new();
gen_helper_asrtgt_d(cpu_env, src1, src2);
tcg_gen_add_tl(addr, src1, src2);
- tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop);
- maybe_nanbox_load(cpu_fpr[a->fd], mop);
+ tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop);
+ maybe_nanbox_load(dest, mop);
+ set_fpr(a->fd, dest);
return true;
}
@@ -96,6 +105,7 @@ static bool gen_fstore_gt(DisasContext *ctx, arg_frr *a, MemOp mop)
{
TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
+ TCGv src3 = get_fpr(ctx, a->fd);
TCGv addr;
CHECK_FPE;
@@ -103,7 +113,7 @@ static bool gen_fstore_gt(DisasContext *ctx, arg_frr *a, MemOp mop)
addr = tcg_temp_new();
gen_helper_asrtgt_d(cpu_env, src1, src2);
tcg_gen_add_tl(addr, src1, src2);
- tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop);
+ tcg_gen_qemu_st_tl(src3, addr, ctx->mem_idx, mop);
return true;
}
@@ -112,6 +122,7 @@ static bool gen_fload_le(DisasContext *ctx, arg_frr *a, MemOp mop)
{
TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
+ TCGv dest = get_fpr(ctx, a->fd);
TCGv addr;
CHECK_FPE;
@@ -119,8 +130,9 @@ static bool gen_fload_le(DisasContext *ctx, arg_frr *a, MemOp mop)
addr = tcg_temp_new();
gen_helper_asrtle_d(cpu_env, src1, src2);
tcg_gen_add_tl(addr, src1, src2);
- tcg_gen_qemu_ld_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop);
- maybe_nanbox_load(cpu_fpr[a->fd], mop);
+ tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, mop);
+ maybe_nanbox_load(dest, mop);
+ set_fpr(a->fd, dest);
return true;
}
@@ -129,6 +141,7 @@ static bool gen_fstore_le(DisasContext *ctx, arg_frr *a, MemOp mop)
{
TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
+ TCGv src3 = get_fpr(ctx, a->fd);
TCGv addr;
CHECK_FPE;
@@ -136,7 +149,7 @@ static bool gen_fstore_le(DisasContext *ctx, arg_frr *a, MemOp mop)
addr = tcg_temp_new();
gen_helper_asrtle_d(cpu_env, src1, src2);
tcg_gen_add_tl(addr, src1, src2);
- tcg_gen_qemu_st_tl(cpu_fpr[a->fd], addr, ctx->mem_idx, mop);
+ tcg_gen_qemu_st_tl(src3, addr, ctx->mem_idx, mop);
return true;
}
diff --git a/target/loongarch/insn_trans/trans_fmov.c.inc b/target/loongarch/insn_trans/trans_fmov.c.inc
index 069c941..5af0dd1 100644
--- a/target/loongarch/insn_trans/trans_fmov.c.inc
+++ b/target/loongarch/insn_trans/trans_fmov.c.inc
@@ -10,14 +10,17 @@ static const uint32_t fcsr_mask[4] = {
static bool trans_fsel(DisasContext *ctx, arg_fsel *a)
{
TCGv zero = tcg_constant_tl(0);
+ TCGv dest = get_fpr(ctx, a->fd);
+ TCGv src1 = get_fpr(ctx, a->fj);
+ TCGv src2 = get_fpr(ctx, a->fk);
TCGv cond;
CHECK_FPE;
cond = tcg_temp_new();
tcg_gen_ld8u_tl(cond, cpu_env, offsetof(CPULoongArchState, cf[a->ca]));
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_fpr[a->fd], cond, zero,
- cpu_fpr[a->fj], cpu_fpr[a->fk]);
+ tcg_gen_movcond_tl(TCG_COND_EQ, dest, cond, zero, src1, src2);
+ set_fpr(a->fd, dest);
return true;
}
@@ -25,15 +28,16 @@ static bool trans_fsel(DisasContext *ctx, arg_fsel *a)
static bool gen_f2f(DisasContext *ctx, arg_ff *a,
void (*func)(TCGv, TCGv), bool nanbox)
{
- TCGv dest = cpu_fpr[a->fd];
- TCGv src = cpu_fpr[a->fj];
+ TCGv dest = get_fpr(ctx, a->fd);
+ TCGv src = get_fpr(ctx, a->fj);
CHECK_FPE;
func(dest, src);
if (nanbox) {
- gen_nanbox_s(cpu_fpr[a->fd], cpu_fpr[a->fd]);
+ gen_nanbox_s(dest, dest);
}
+ set_fpr(a->fd, dest);
return true;
}
@@ -42,10 +46,13 @@ static bool gen_r2f(DisasContext *ctx, arg_fr *a,
void (*func)(TCGv, TCGv))
{
TCGv src = gpr_src(ctx, a->rj, EXT_NONE);
+ TCGv dest = get_fpr(ctx, a->fd);
CHECK_FPE;
- func(cpu_fpr[a->fd], src);
+ func(dest, src);
+ set_fpr(a->fd, dest);
+
return true;
}
@@ -53,10 +60,11 @@ static bool gen_f2r(DisasContext *ctx, arg_rf *a,
void (*func)(TCGv, TCGv))
{
TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE);
+ TCGv src = get_fpr(ctx, a->fj);
CHECK_FPE;
- func(dest, cpu_fpr[a->fj]);
+ func(dest, src);
gen_set_gpr(a->rd, dest, EXT_NONE);
return true;
@@ -124,11 +132,12 @@ static void gen_movfrh2gr_s(TCGv dest, TCGv src)
static bool trans_movfr2cf(DisasContext *ctx, arg_movfr2cf *a)
{
TCGv t0;
+ TCGv src = get_fpr(ctx, a->fj);
CHECK_FPE;
t0 = tcg_temp_new();
- tcg_gen_andi_tl(t0, cpu_fpr[a->fj], 0x1);
+ tcg_gen_andi_tl(t0, src, 0x1);
tcg_gen_st8_tl(t0, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7]));
return true;
@@ -136,10 +145,14 @@ static bool trans_movfr2cf(DisasContext *ctx, arg_movfr2cf *a)
static bool trans_movcf2fr(DisasContext *ctx, arg_movcf2fr *a)
{
+ TCGv dest = get_fpr(ctx, a->fd);
+
CHECK_FPE;
- tcg_gen_ld8u_tl(cpu_fpr[a->fd], cpu_env,
+ tcg_gen_ld8u_tl(dest, cpu_env,
offsetof(CPULoongArchState, cf[a->cj & 0x7]));
+ set_fpr(a->fd, dest);
+
return true;
}
diff --git a/target/loongarch/insn_trans/trans_lsx.c.inc b/target/loongarch/insn_trans/trans_lsx.c.inc
new file mode 100644
index 0000000..0be2b5a
--- /dev/null
+++ b/target/loongarch/insn_trans/trans_lsx.c.inc
@@ -0,0 +1,4400 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LSX translate functions
+ * Copyright (c) 2022-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef CONFIG_USER_ONLY
+#define CHECK_SXE do { \
+ if ((ctx->base.tb->flags & HW_FLAGS_EUEN_SXE) == 0) { \
+ generate_exception(ctx, EXCCODE_SXD); \
+ return true; \
+ } \
+} while (0)
+#else
+#define CHECK_SXE
+#endif
+
+static bool gen_vvvv(DisasContext *ctx, arg_vvvv *a,
+ void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32,
+ TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 vk = tcg_constant_i32(a->vk);
+ TCGv_i32 va = tcg_constant_i32(a->va);
+
+ CHECK_SXE;
+ func(cpu_env, vd, vj, vk, va);
+ return true;
+}
+
+static bool gen_vvv(DisasContext *ctx, arg_vvv *a,
+ void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 vk = tcg_constant_i32(a->vk);
+
+ CHECK_SXE;
+
+ func(cpu_env, vd, vj, vk);
+ return true;
+}
+
+static bool gen_vv(DisasContext *ctx, arg_vv *a,
+ void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+
+ CHECK_SXE;
+ func(cpu_env, vd, vj);
+ return true;
+}
+
+static bool gen_vv_i(DisasContext *ctx, arg_vv_i *a,
+ void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 imm = tcg_constant_i32(a->imm);
+
+ CHECK_SXE;
+ func(cpu_env, vd, vj, imm);
+ return true;
+}
+
+static bool gen_cv(DisasContext *ctx, arg_cv *a,
+ void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 cd = tcg_constant_i32(a->cd);
+
+ CHECK_SXE;
+ func(cpu_env, cd, vj);
+ return true;
+}
+
+static bool gvec_vvv(DisasContext *ctx, arg_vvv *a, MemOp mop,
+ void (*func)(unsigned, uint32_t, uint32_t,
+ uint32_t, uint32_t, uint32_t))
+{
+ uint32_t vd_ofs, vj_ofs, vk_ofs;
+
+ CHECK_SXE;
+
+ vd_ofs = vec_full_offset(a->vd);
+ vj_ofs = vec_full_offset(a->vj);
+ vk_ofs = vec_full_offset(a->vk);
+
+ func(mop, vd_ofs, vj_ofs, vk_ofs, 16, ctx->vl/8);
+ return true;
+}
+
+static bool gvec_vv(DisasContext *ctx, arg_vv *a, MemOp mop,
+ void (*func)(unsigned, uint32_t, uint32_t,
+ uint32_t, uint32_t))
+{
+ uint32_t vd_ofs, vj_ofs;
+
+ CHECK_SXE;
+
+ vd_ofs = vec_full_offset(a->vd);
+ vj_ofs = vec_full_offset(a->vj);
+
+ func(mop, vd_ofs, vj_ofs, 16, ctx->vl/8);
+ return true;
+}
+
+static bool gvec_vv_i(DisasContext *ctx, arg_vv_i *a, MemOp mop,
+ void (*func)(unsigned, uint32_t, uint32_t,
+ int64_t, uint32_t, uint32_t))
+{
+ uint32_t vd_ofs, vj_ofs;
+
+ CHECK_SXE;
+
+ vd_ofs = vec_full_offset(a->vd);
+ vj_ofs = vec_full_offset(a->vj);
+
+ func(mop, vd_ofs, vj_ofs, a->imm , 16, ctx->vl/8);
+ return true;
+}
+
+static bool gvec_subi(DisasContext *ctx, arg_vv_i *a, MemOp mop)
+{
+ uint32_t vd_ofs, vj_ofs;
+
+ CHECK_SXE;
+
+ vd_ofs = vec_full_offset(a->vd);
+ vj_ofs = vec_full_offset(a->vj);
+
+ tcg_gen_gvec_addi(mop, vd_ofs, vj_ofs, -a->imm, 16, ctx->vl/8);
+ return true;
+}
+
+TRANS(vadd_b, gvec_vvv, MO_8, tcg_gen_gvec_add)
+TRANS(vadd_h, gvec_vvv, MO_16, tcg_gen_gvec_add)
+TRANS(vadd_w, gvec_vvv, MO_32, tcg_gen_gvec_add)
+TRANS(vadd_d, gvec_vvv, MO_64, tcg_gen_gvec_add)
+
+#define VADDSUB_Q(NAME) \
+static bool trans_v## NAME ##_q(DisasContext *ctx, arg_vvv *a) \
+{ \
+ TCGv_i64 rh, rl, ah, al, bh, bl; \
+ \
+ CHECK_SXE; \
+ \
+ rh = tcg_temp_new_i64(); \
+ rl = tcg_temp_new_i64(); \
+ ah = tcg_temp_new_i64(); \
+ al = tcg_temp_new_i64(); \
+ bh = tcg_temp_new_i64(); \
+ bl = tcg_temp_new_i64(); \
+ \
+ get_vreg64(ah, a->vj, 1); \
+ get_vreg64(al, a->vj, 0); \
+ get_vreg64(bh, a->vk, 1); \
+ get_vreg64(bl, a->vk, 0); \
+ \
+ tcg_gen_## NAME ##2_i64(rl, rh, al, ah, bl, bh); \
+ \
+ set_vreg64(rh, a->vd, 1); \
+ set_vreg64(rl, a->vd, 0); \
+ \
+ return true; \
+}
+
+VADDSUB_Q(add)
+VADDSUB_Q(sub)
+
+TRANS(vsub_b, gvec_vvv, MO_8, tcg_gen_gvec_sub)
+TRANS(vsub_h, gvec_vvv, MO_16, tcg_gen_gvec_sub)
+TRANS(vsub_w, gvec_vvv, MO_32, tcg_gen_gvec_sub)
+TRANS(vsub_d, gvec_vvv, MO_64, tcg_gen_gvec_sub)
+
+TRANS(vaddi_bu, gvec_vv_i, MO_8, tcg_gen_gvec_addi)
+TRANS(vaddi_hu, gvec_vv_i, MO_16, tcg_gen_gvec_addi)
+TRANS(vaddi_wu, gvec_vv_i, MO_32, tcg_gen_gvec_addi)
+TRANS(vaddi_du, gvec_vv_i, MO_64, tcg_gen_gvec_addi)
+TRANS(vsubi_bu, gvec_subi, MO_8)
+TRANS(vsubi_hu, gvec_subi, MO_16)
+TRANS(vsubi_wu, gvec_subi, MO_32)
+TRANS(vsubi_du, gvec_subi, MO_64)
+
+TRANS(vneg_b, gvec_vv, MO_8, tcg_gen_gvec_neg)
+TRANS(vneg_h, gvec_vv, MO_16, tcg_gen_gvec_neg)
+TRANS(vneg_w, gvec_vv, MO_32, tcg_gen_gvec_neg)
+TRANS(vneg_d, gvec_vv, MO_64, tcg_gen_gvec_neg)
+
+TRANS(vsadd_b, gvec_vvv, MO_8, tcg_gen_gvec_ssadd)
+TRANS(vsadd_h, gvec_vvv, MO_16, tcg_gen_gvec_ssadd)
+TRANS(vsadd_w, gvec_vvv, MO_32, tcg_gen_gvec_ssadd)
+TRANS(vsadd_d, gvec_vvv, MO_64, tcg_gen_gvec_ssadd)
+TRANS(vsadd_bu, gvec_vvv, MO_8, tcg_gen_gvec_usadd)
+TRANS(vsadd_hu, gvec_vvv, MO_16, tcg_gen_gvec_usadd)
+TRANS(vsadd_wu, gvec_vvv, MO_32, tcg_gen_gvec_usadd)
+TRANS(vsadd_du, gvec_vvv, MO_64, tcg_gen_gvec_usadd)
+TRANS(vssub_b, gvec_vvv, MO_8, tcg_gen_gvec_sssub)
+TRANS(vssub_h, gvec_vvv, MO_16, tcg_gen_gvec_sssub)
+TRANS(vssub_w, gvec_vvv, MO_32, tcg_gen_gvec_sssub)
+TRANS(vssub_d, gvec_vvv, MO_64, tcg_gen_gvec_sssub)
+TRANS(vssub_bu, gvec_vvv, MO_8, tcg_gen_gvec_ussub)
+TRANS(vssub_hu, gvec_vvv, MO_16, tcg_gen_gvec_ussub)
+TRANS(vssub_wu, gvec_vvv, MO_32, tcg_gen_gvec_ussub)
+TRANS(vssub_du, gvec_vvv, MO_64, tcg_gen_gvec_ussub)
+
+TRANS(vhaddw_h_b, gen_vvv, gen_helper_vhaddw_h_b)
+TRANS(vhaddw_w_h, gen_vvv, gen_helper_vhaddw_w_h)
+TRANS(vhaddw_d_w, gen_vvv, gen_helper_vhaddw_d_w)
+TRANS(vhaddw_q_d, gen_vvv, gen_helper_vhaddw_q_d)
+TRANS(vhaddw_hu_bu, gen_vvv, gen_helper_vhaddw_hu_bu)
+TRANS(vhaddw_wu_hu, gen_vvv, gen_helper_vhaddw_wu_hu)
+TRANS(vhaddw_du_wu, gen_vvv, gen_helper_vhaddw_du_wu)
+TRANS(vhaddw_qu_du, gen_vvv, gen_helper_vhaddw_qu_du)
+TRANS(vhsubw_h_b, gen_vvv, gen_helper_vhsubw_h_b)
+TRANS(vhsubw_w_h, gen_vvv, gen_helper_vhsubw_w_h)
+TRANS(vhsubw_d_w, gen_vvv, gen_helper_vhsubw_d_w)
+TRANS(vhsubw_q_d, gen_vvv, gen_helper_vhsubw_q_d)
+TRANS(vhsubw_hu_bu, gen_vvv, gen_helper_vhsubw_hu_bu)
+TRANS(vhsubw_wu_hu, gen_vvv, gen_helper_vhsubw_wu_hu)
+TRANS(vhsubw_du_wu, gen_vvv, gen_helper_vhsubw_du_wu)
+TRANS(vhsubw_qu_du, gen_vvv, gen_helper_vhsubw_qu_du)
+
+static void gen_vaddwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Sign-extend the even elements from a */
+ tcg_gen_shli_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t1, t1, halfbits);
+
+ /* Sign-extend the even elements from b */
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void gen_vaddwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16s_i32(t1, a);
+ tcg_gen_ext16s_i32(t2, b);
+ tcg_gen_add_i32(t, t1, t2);
+}
+
+static void gen_vaddwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(t1, a);
+ tcg_gen_ext32s_i64(t2, b);
+ tcg_gen_add_i64(t, t1, t2);
+}
+
+static void do_vaddwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vaddwev_s,
+ .fno = gen_helper_vaddwev_h_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vaddwev_w_h,
+ .fniv = gen_vaddwev_s,
+ .fno = gen_helper_vaddwev_w_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vaddwev_d_w,
+ .fniv = gen_vaddwev_s,
+ .fno = gen_helper_vaddwev_d_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vaddwev_q_d,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vaddwev_h_b, gvec_vvv, MO_8, do_vaddwev_s)
+TRANS(vaddwev_w_h, gvec_vvv, MO_16, do_vaddwev_s)
+TRANS(vaddwev_d_w, gvec_vvv, MO_32, do_vaddwev_s)
+TRANS(vaddwev_q_d, gvec_vvv, MO_64, do_vaddwev_s)
+
+static void gen_vaddwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_sari_i32(t1, a, 16);
+ tcg_gen_sari_i32(t2, b, 16);
+ tcg_gen_add_i32(t, t1, t2);
+}
+
+static void gen_vaddwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t1, a, 32);
+ tcg_gen_sari_i64(t2, b, 32);
+ tcg_gen_add_i64(t, t1, t2);
+}
+
+static void gen_vaddwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Sign-extend the odd elements for vector */
+ tcg_gen_sari_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void do_vaddwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vaddwod_s,
+ .fno = gen_helper_vaddwod_h_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vaddwod_w_h,
+ .fniv = gen_vaddwod_s,
+ .fno = gen_helper_vaddwod_w_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vaddwod_d_w,
+ .fniv = gen_vaddwod_s,
+ .fno = gen_helper_vaddwod_d_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vaddwod_q_d,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vaddwod_h_b, gvec_vvv, MO_8, do_vaddwod_s)
+TRANS(vaddwod_w_h, gvec_vvv, MO_16, do_vaddwod_s)
+TRANS(vaddwod_d_w, gvec_vvv, MO_32, do_vaddwod_s)
+TRANS(vaddwod_q_d, gvec_vvv, MO_64, do_vaddwod_s)
+
+static void gen_vsubwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Sign-extend the even elements from a */
+ tcg_gen_shli_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t1, t1, halfbits);
+
+ /* Sign-extend the even elements from b */
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+
+ tcg_gen_sub_vec(vece, t, t1, t2);
+}
+
+static void gen_vsubwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16s_i32(t1, a);
+ tcg_gen_ext16s_i32(t2, b);
+ tcg_gen_sub_i32(t, t1, t2);
+}
+
+static void gen_vsubwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(t1, a);
+ tcg_gen_ext32s_i64(t2, b);
+ tcg_gen_sub_i64(t, t1, t2);
+}
+
+static void do_vsubwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vsubwev_s,
+ .fno = gen_helper_vsubwev_h_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vsubwev_w_h,
+ .fniv = gen_vsubwev_s,
+ .fno = gen_helper_vsubwev_w_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vsubwev_d_w,
+ .fniv = gen_vsubwev_s,
+ .fno = gen_helper_vsubwev_d_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vsubwev_q_d,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vsubwev_h_b, gvec_vvv, MO_8, do_vsubwev_s)
+TRANS(vsubwev_w_h, gvec_vvv, MO_16, do_vsubwev_s)
+TRANS(vsubwev_d_w, gvec_vvv, MO_32, do_vsubwev_s)
+TRANS(vsubwev_q_d, gvec_vvv, MO_64, do_vsubwev_s)
+
+static void gen_vsubwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Sign-extend the odd elements for vector */
+ tcg_gen_sari_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+
+ tcg_gen_sub_vec(vece, t, t1, t2);
+}
+
+static void gen_vsubwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_sari_i32(t1, a, 16);
+ tcg_gen_sari_i32(t2, b, 16);
+ tcg_gen_sub_i32(t, t1, t2);
+}
+
+static void gen_vsubwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t1, a, 32);
+ tcg_gen_sari_i64(t2, b, 32);
+ tcg_gen_sub_i64(t, t1, t2);
+}
+
+static void do_vsubwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vsubwod_s,
+ .fno = gen_helper_vsubwod_h_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vsubwod_w_h,
+ .fniv = gen_vsubwod_s,
+ .fno = gen_helper_vsubwod_w_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vsubwod_d_w,
+ .fniv = gen_vsubwod_s,
+ .fno = gen_helper_vsubwod_d_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vsubwod_q_d,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vsubwod_h_b, gvec_vvv, MO_8, do_vsubwod_s)
+TRANS(vsubwod_w_h, gvec_vvv, MO_16, do_vsubwod_s)
+TRANS(vsubwod_d_w, gvec_vvv, MO_32, do_vsubwod_s)
+TRANS(vsubwod_q_d, gvec_vvv, MO_64, do_vsubwod_s)
+
+static void gen_vaddwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
+ tcg_gen_and_vec(vece, t1, a, t3);
+ tcg_gen_and_vec(vece, t2, b, t3);
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void gen_vaddwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(t1, a);
+ tcg_gen_ext16u_i32(t2, b);
+ tcg_gen_add_i32(t, t1, t2);
+}
+
+static void gen_vaddwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(t1, a);
+ tcg_gen_ext32u_i64(t2, b);
+ tcg_gen_add_i64(t, t1, t2);
+}
+
+static void do_vaddwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vaddwev_u,
+ .fno = gen_helper_vaddwev_h_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vaddwev_w_hu,
+ .fniv = gen_vaddwev_u,
+ .fno = gen_helper_vaddwev_w_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vaddwev_d_wu,
+ .fniv = gen_vaddwev_u,
+ .fno = gen_helper_vaddwev_d_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vaddwev_q_du,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vaddwev_h_bu, gvec_vvv, MO_8, do_vaddwev_u)
+TRANS(vaddwev_w_hu, gvec_vvv, MO_16, do_vaddwev_u)
+TRANS(vaddwev_d_wu, gvec_vvv, MO_32, do_vaddwev_u)
+TRANS(vaddwev_q_du, gvec_vvv, MO_64, do_vaddwev_u)
+
+static void gen_vaddwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Zero-extend the odd elements for vector */
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ tcg_gen_shri_vec(vece, t2, b, halfbits);
+
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void gen_vaddwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t1, a, 16);
+ tcg_gen_shri_i32(t2, b, 16);
+ tcg_gen_add_i32(t, t1, t2);
+}
+
+static void gen_vaddwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t1, a, 32);
+ tcg_gen_shri_i64(t2, b, 32);
+ tcg_gen_add_i64(t, t1, t2);
+}
+
+static void do_vaddwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vaddwod_u,
+ .fno = gen_helper_vaddwod_h_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vaddwod_w_hu,
+ .fniv = gen_vaddwod_u,
+ .fno = gen_helper_vaddwod_w_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vaddwod_d_wu,
+ .fniv = gen_vaddwod_u,
+ .fno = gen_helper_vaddwod_d_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vaddwod_q_du,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vaddwod_h_bu, gvec_vvv, MO_8, do_vaddwod_u)
+TRANS(vaddwod_w_hu, gvec_vvv, MO_16, do_vaddwod_u)
+TRANS(vaddwod_d_wu, gvec_vvv, MO_32, do_vaddwod_u)
+TRANS(vaddwod_q_du, gvec_vvv, MO_64, do_vaddwod_u)
+
+static void gen_vsubwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
+ tcg_gen_and_vec(vece, t1, a, t3);
+ tcg_gen_and_vec(vece, t2, b, t3);
+ tcg_gen_sub_vec(vece, t, t1, t2);
+}
+
+static void gen_vsubwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(t1, a);
+ tcg_gen_ext16u_i32(t2, b);
+ tcg_gen_sub_i32(t, t1, t2);
+}
+
+static void gen_vsubwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(t1, a);
+ tcg_gen_ext32u_i64(t2, b);
+ tcg_gen_sub_i64(t, t1, t2);
+}
+
+static void do_vsubwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vsubwev_u,
+ .fno = gen_helper_vsubwev_h_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vsubwev_w_hu,
+ .fniv = gen_vsubwev_u,
+ .fno = gen_helper_vsubwev_w_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vsubwev_d_wu,
+ .fniv = gen_vsubwev_u,
+ .fno = gen_helper_vsubwev_d_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vsubwev_q_du,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vsubwev_h_bu, gvec_vvv, MO_8, do_vsubwev_u)
+TRANS(vsubwev_w_hu, gvec_vvv, MO_16, do_vsubwev_u)
+TRANS(vsubwev_d_wu, gvec_vvv, MO_32, do_vsubwev_u)
+TRANS(vsubwev_q_du, gvec_vvv, MO_64, do_vsubwev_u)
+
+static void gen_vsubwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Zero-extend the odd elements for vector */
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ tcg_gen_shri_vec(vece, t2, b, halfbits);
+
+ tcg_gen_sub_vec(vece, t, t1, t2);
+}
+
+static void gen_vsubwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t1, a, 16);
+ tcg_gen_shri_i32(t2, b, 16);
+ tcg_gen_sub_i32(t, t1, t2);
+}
+
+static void gen_vsubwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t1, a, 32);
+ tcg_gen_shri_i64(t2, b, 32);
+ tcg_gen_sub_i64(t, t1, t2);
+}
+
+static void do_vsubwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vsubwod_u,
+ .fno = gen_helper_vsubwod_h_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vsubwod_w_hu,
+ .fniv = gen_vsubwod_u,
+ .fno = gen_helper_vsubwod_w_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vsubwod_d_wu,
+ .fniv = gen_vsubwod_u,
+ .fno = gen_helper_vsubwod_d_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vsubwod_q_du,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vsubwod_h_bu, gvec_vvv, MO_8, do_vsubwod_u)
+TRANS(vsubwod_w_hu, gvec_vvv, MO_16, do_vsubwod_u)
+TRANS(vsubwod_d_wu, gvec_vvv, MO_32, do_vsubwod_u)
+TRANS(vsubwod_q_du, gvec_vvv, MO_64, do_vsubwod_u)
+
+static void gen_vaddwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, halfbits));
+
+ /* Zero-extend the even elements from a */
+ tcg_gen_and_vec(vece, t1, a, t3);
+
+ /* Sign-extend the even elements from b */
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void gen_vaddwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(t1, a);
+ tcg_gen_ext16s_i32(t2, b);
+ tcg_gen_add_i32(t, t1, t2);
+}
+
+static void gen_vaddwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(t1, a);
+ tcg_gen_ext32s_i64(t2, b);
+ tcg_gen_add_i64(t, t1, t2);
+}
+
+static void do_vaddwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vaddwev_u_s,
+ .fno = gen_helper_vaddwev_h_bu_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vaddwev_w_hu_h,
+ .fniv = gen_vaddwev_u_s,
+ .fno = gen_helper_vaddwev_w_hu_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vaddwev_d_wu_w,
+ .fniv = gen_vaddwev_u_s,
+ .fno = gen_helper_vaddwev_d_wu_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vaddwev_q_du_d,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vaddwev_h_bu_b, gvec_vvv, MO_8, do_vaddwev_u_s)
+TRANS(vaddwev_w_hu_h, gvec_vvv, MO_16, do_vaddwev_u_s)
+TRANS(vaddwev_d_wu_w, gvec_vvv, MO_32, do_vaddwev_u_s)
+TRANS(vaddwev_q_du_d, gvec_vvv, MO_64, do_vaddwev_u_s)
+
+static void gen_vaddwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Zero-extend the odd elements from a */
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ /* Sign-extend the odd elements from b */
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void gen_vaddwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t1, a, 16);
+ tcg_gen_sari_i32(t2, b, 16);
+ tcg_gen_add_i32(t, t1, t2);
+}
+
+static void gen_vaddwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t1, a, 32);
+ tcg_gen_sari_i64(t2, b, 32);
+ tcg_gen_add_i64(t, t1, t2);
+}
+
+static void do_vaddwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vaddwod_u_s,
+ .fno = gen_helper_vaddwod_h_bu_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vaddwod_w_hu_h,
+ .fniv = gen_vaddwod_u_s,
+ .fno = gen_helper_vaddwod_w_hu_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vaddwod_d_wu_w,
+ .fniv = gen_vaddwod_u_s,
+ .fno = gen_helper_vaddwod_d_wu_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vaddwod_q_du_d,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vaddwod_h_bu_b, gvec_vvv, MO_8, do_vaddwod_u_s)
+TRANS(vaddwod_w_hu_h, gvec_vvv, MO_16, do_vaddwod_u_s)
+TRANS(vaddwod_d_wu_w, gvec_vvv, MO_32, do_vaddwod_u_s)
+TRANS(vaddwod_q_du_d, gvec_vvv, MO_64, do_vaddwod_u_s)
+
+static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
+ void (*gen_shr_vec)(unsigned, TCGv_vec,
+ TCGv_vec, int64_t),
+ void (*gen_round_vec)(unsigned, TCGv_vec,
+ TCGv_vec, TCGv_vec))
+{
+ TCGv_vec tmp = tcg_temp_new_vec_matching(t);
+ gen_round_vec(vece, tmp, a, b);
+ tcg_gen_and_vec(vece, tmp, tmp, tcg_constant_vec_matching(t, vece, 1));
+ gen_shr_vec(vece, a, a, 1);
+ gen_shr_vec(vece, b, b, 1);
+ tcg_gen_add_vec(vece, t, a, b);
+ tcg_gen_add_vec(vece, t, t, tmp);
+}
+
+static void gen_vavg_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vavg(vece, t, a, b, tcg_gen_sari_vec, tcg_gen_and_vec);
+}
+
+static void gen_vavg_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vavg(vece, t, a, b, tcg_gen_shri_vec, tcg_gen_and_vec);
+}
+
+static void gen_vavgr_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vavg(vece, t, a, b, tcg_gen_sari_vec, tcg_gen_or_vec);
+}
+
+static void gen_vavgr_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vavg(vece, t, a, b, tcg_gen_shri_vec, tcg_gen_or_vec);
+}
+
+static void do_vavg_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vavg_s,
+ .fno = gen_helper_vavg_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vavg_s,
+ .fno = gen_helper_vavg_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vavg_s,
+ .fno = gen_helper_vavg_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vavg_s,
+ .fno = gen_helper_vavg_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+static void do_vavg_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vavg_u,
+ .fno = gen_helper_vavg_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vavg_u,
+ .fno = gen_helper_vavg_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vavg_u,
+ .fno = gen_helper_vavg_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vavg_u,
+ .fno = gen_helper_vavg_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vavg_b, gvec_vvv, MO_8, do_vavg_s)
+TRANS(vavg_h, gvec_vvv, MO_16, do_vavg_s)
+TRANS(vavg_w, gvec_vvv, MO_32, do_vavg_s)
+TRANS(vavg_d, gvec_vvv, MO_64, do_vavg_s)
+TRANS(vavg_bu, gvec_vvv, MO_8, do_vavg_u)
+TRANS(vavg_hu, gvec_vvv, MO_16, do_vavg_u)
+TRANS(vavg_wu, gvec_vvv, MO_32, do_vavg_u)
+TRANS(vavg_du, gvec_vvv, MO_64, do_vavg_u)
+
+static void do_vavgr_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vavgr_s,
+ .fno = gen_helper_vavgr_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vavgr_s,
+ .fno = gen_helper_vavgr_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vavgr_s,
+ .fno = gen_helper_vavgr_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vavgr_s,
+ .fno = gen_helper_vavgr_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+static void do_vavgr_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vavgr_u,
+ .fno = gen_helper_vavgr_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vavgr_u,
+ .fno = gen_helper_vavgr_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vavgr_u,
+ .fno = gen_helper_vavgr_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vavgr_u,
+ .fno = gen_helper_vavgr_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vavgr_b, gvec_vvv, MO_8, do_vavgr_s)
+TRANS(vavgr_h, gvec_vvv, MO_16, do_vavgr_s)
+TRANS(vavgr_w, gvec_vvv, MO_32, do_vavgr_s)
+TRANS(vavgr_d, gvec_vvv, MO_64, do_vavgr_s)
+TRANS(vavgr_bu, gvec_vvv, MO_8, do_vavgr_u)
+TRANS(vavgr_hu, gvec_vvv, MO_16, do_vavgr_u)
+TRANS(vavgr_wu, gvec_vvv, MO_32, do_vavgr_u)
+TRANS(vavgr_du, gvec_vvv, MO_64, do_vavgr_u)
+
+static void gen_vabsd_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_smax_vec(vece, t, a, b);
+ tcg_gen_smin_vec(vece, a, a, b);
+ tcg_gen_sub_vec(vece, t, t, a);
+}
+
+static void do_vabsd_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_smax_vec, INDEX_op_smin_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vabsd_s,
+ .fno = gen_helper_vabsd_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vabsd_s,
+ .fno = gen_helper_vabsd_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vabsd_s,
+ .fno = gen_helper_vabsd_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vabsd_s,
+ .fno = gen_helper_vabsd_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+static void gen_vabsd_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_umax_vec(vece, t, a, b);
+ tcg_gen_umin_vec(vece, a, a, b);
+ tcg_gen_sub_vec(vece, t, t, a);
+}
+
+static void do_vabsd_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_umax_vec, INDEX_op_umin_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vabsd_u,
+ .fno = gen_helper_vabsd_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vabsd_u,
+ .fno = gen_helper_vabsd_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vabsd_u,
+ .fno = gen_helper_vabsd_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vabsd_u,
+ .fno = gen_helper_vabsd_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vabsd_b, gvec_vvv, MO_8, do_vabsd_s)
+TRANS(vabsd_h, gvec_vvv, MO_16, do_vabsd_s)
+TRANS(vabsd_w, gvec_vvv, MO_32, do_vabsd_s)
+TRANS(vabsd_d, gvec_vvv, MO_64, do_vabsd_s)
+TRANS(vabsd_bu, gvec_vvv, MO_8, do_vabsd_u)
+TRANS(vabsd_hu, gvec_vvv, MO_16, do_vabsd_u)
+TRANS(vabsd_wu, gvec_vvv, MO_32, do_vabsd_u)
+TRANS(vabsd_du, gvec_vvv, MO_64, do_vabsd_u)
+
+static void gen_vadda(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ tcg_gen_abs_vec(vece, t1, a);
+ tcg_gen_abs_vec(vece, t2, b);
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void do_vadda(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_abs_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vadda,
+ .fno = gen_helper_vadda_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vadda,
+ .fno = gen_helper_vadda_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vadda,
+ .fno = gen_helper_vadda_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vadda,
+ .fno = gen_helper_vadda_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vadda_b, gvec_vvv, MO_8, do_vadda)
+TRANS(vadda_h, gvec_vvv, MO_16, do_vadda)
+TRANS(vadda_w, gvec_vvv, MO_32, do_vadda)
+TRANS(vadda_d, gvec_vvv, MO_64, do_vadda)
+
+TRANS(vmax_b, gvec_vvv, MO_8, tcg_gen_gvec_smax)
+TRANS(vmax_h, gvec_vvv, MO_16, tcg_gen_gvec_smax)
+TRANS(vmax_w, gvec_vvv, MO_32, tcg_gen_gvec_smax)
+TRANS(vmax_d, gvec_vvv, MO_64, tcg_gen_gvec_smax)
+TRANS(vmax_bu, gvec_vvv, MO_8, tcg_gen_gvec_umax)
+TRANS(vmax_hu, gvec_vvv, MO_16, tcg_gen_gvec_umax)
+TRANS(vmax_wu, gvec_vvv, MO_32, tcg_gen_gvec_umax)
+TRANS(vmax_du, gvec_vvv, MO_64, tcg_gen_gvec_umax)
+
+TRANS(vmin_b, gvec_vvv, MO_8, tcg_gen_gvec_smin)
+TRANS(vmin_h, gvec_vvv, MO_16, tcg_gen_gvec_smin)
+TRANS(vmin_w, gvec_vvv, MO_32, tcg_gen_gvec_smin)
+TRANS(vmin_d, gvec_vvv, MO_64, tcg_gen_gvec_smin)
+TRANS(vmin_bu, gvec_vvv, MO_8, tcg_gen_gvec_umin)
+TRANS(vmin_hu, gvec_vvv, MO_16, tcg_gen_gvec_umin)
+TRANS(vmin_wu, gvec_vvv, MO_32, tcg_gen_gvec_umin)
+TRANS(vmin_du, gvec_vvv, MO_64, tcg_gen_gvec_umin)
+
+static void gen_vmini_s(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_smin_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void gen_vmini_u(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_umin_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void gen_vmaxi_s(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_smax_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void gen_vmaxi_u(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_umax_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void do_vmini_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_smin_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vmini_s,
+ .fnoi = gen_helper_vmini_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmini_s,
+ .fnoi = gen_helper_vmini_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vmini_s,
+ .fnoi = gen_helper_vmini_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vmini_s,
+ .fnoi = gen_helper_vmini_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+static void do_vmini_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vmini_u,
+ .fnoi = gen_helper_vmini_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmini_u,
+ .fnoi = gen_helper_vmini_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vmini_u,
+ .fnoi = gen_helper_vmini_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vmini_u,
+ .fnoi = gen_helper_vmini_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+TRANS(vmini_b, gvec_vv_i, MO_8, do_vmini_s)
+TRANS(vmini_h, gvec_vv_i, MO_16, do_vmini_s)
+TRANS(vmini_w, gvec_vv_i, MO_32, do_vmini_s)
+TRANS(vmini_d, gvec_vv_i, MO_64, do_vmini_s)
+TRANS(vmini_bu, gvec_vv_i, MO_8, do_vmini_u)
+TRANS(vmini_hu, gvec_vv_i, MO_16, do_vmini_u)
+TRANS(vmini_wu, gvec_vv_i, MO_32, do_vmini_u)
+TRANS(vmini_du, gvec_vv_i, MO_64, do_vmini_u)
+
+static void do_vmaxi_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_smax_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vmaxi_s,
+ .fnoi = gen_helper_vmaxi_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmaxi_s,
+ .fnoi = gen_helper_vmaxi_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vmaxi_s,
+ .fnoi = gen_helper_vmaxi_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vmaxi_s,
+ .fnoi = gen_helper_vmaxi_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+static void do_vmaxi_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_umax_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vmaxi_u,
+ .fnoi = gen_helper_vmaxi_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmaxi_u,
+ .fnoi = gen_helper_vmaxi_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vmaxi_u,
+ .fnoi = gen_helper_vmaxi_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vmaxi_u,
+ .fnoi = gen_helper_vmaxi_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+TRANS(vmaxi_b, gvec_vv_i, MO_8, do_vmaxi_s)
+TRANS(vmaxi_h, gvec_vv_i, MO_16, do_vmaxi_s)
+TRANS(vmaxi_w, gvec_vv_i, MO_32, do_vmaxi_s)
+TRANS(vmaxi_d, gvec_vv_i, MO_64, do_vmaxi_s)
+TRANS(vmaxi_bu, gvec_vv_i, MO_8, do_vmaxi_u)
+TRANS(vmaxi_hu, gvec_vv_i, MO_16, do_vmaxi_u)
+TRANS(vmaxi_wu, gvec_vv_i, MO_32, do_vmaxi_u)
+TRANS(vmaxi_du, gvec_vv_i, MO_64, do_vmaxi_u)
+
+TRANS(vmul_b, gvec_vvv, MO_8, tcg_gen_gvec_mul)
+TRANS(vmul_h, gvec_vvv, MO_16, tcg_gen_gvec_mul)
+TRANS(vmul_w, gvec_vvv, MO_32, tcg_gen_gvec_mul)
+TRANS(vmul_d, gvec_vvv, MO_64, tcg_gen_gvec_mul)
+
+static void gen_vmuh_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 discard = tcg_temp_new_i32();
+ tcg_gen_muls2_i32(discard, t, a, b);
+}
+
+static void gen_vmuh_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 discard = tcg_temp_new_i64();
+ tcg_gen_muls2_i64(discard, t, a, b);
+}
+
+static void do_vmuh_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen3 op[4] = {
+ {
+ .fno = gen_helper_vmuh_b,
+ .vece = MO_8
+ },
+ {
+ .fno = gen_helper_vmuh_h,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmuh_w,
+ .fno = gen_helper_vmuh_w,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmuh_d,
+ .fno = gen_helper_vmuh_d,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmuh_b, gvec_vvv, MO_8, do_vmuh_s)
+TRANS(vmuh_h, gvec_vvv, MO_16, do_vmuh_s)
+TRANS(vmuh_w, gvec_vvv, MO_32, do_vmuh_s)
+TRANS(vmuh_d, gvec_vvv, MO_64, do_vmuh_s)
+
+static void gen_vmuh_wu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 discard = tcg_temp_new_i32();
+ tcg_gen_mulu2_i32(discard, t, a, b);
+}
+
+static void gen_vmuh_du(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 discard = tcg_temp_new_i64();
+ tcg_gen_mulu2_i64(discard, t, a, b);
+}
+
+static void do_vmuh_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen3 op[4] = {
+ {
+ .fno = gen_helper_vmuh_bu,
+ .vece = MO_8
+ },
+ {
+ .fno = gen_helper_vmuh_hu,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmuh_wu,
+ .fno = gen_helper_vmuh_wu,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmuh_du,
+ .fno = gen_helper_vmuh_du,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmuh_bu, gvec_vvv, MO_8, do_vmuh_u)
+TRANS(vmuh_hu, gvec_vvv, MO_16, do_vmuh_u)
+TRANS(vmuh_wu, gvec_vvv, MO_32, do_vmuh_u)
+TRANS(vmuh_du, gvec_vvv, MO_64, do_vmuh_u)
+
+static void gen_vmulwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ tcg_gen_shli_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t1, t1, halfbits);
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+ tcg_gen_mul_vec(vece, t, t1, t2);
+}
+
+static void gen_vmulwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16s_i32(t1, a);
+ tcg_gen_ext16s_i32(t2, b);
+ tcg_gen_mul_i32(t, t1, t2);
+}
+
+static void gen_vmulwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(t1, a);
+ tcg_gen_ext32s_i64(t2, b);
+ tcg_gen_mul_i64(t, t1, t2);
+}
+
+static void do_vmulwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_mul_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmulwev_s,
+ .fno = gen_helper_vmulwev_h_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmulwev_w_h,
+ .fniv = gen_vmulwev_s,
+ .fno = gen_helper_vmulwev_w_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmulwev_d_w,
+ .fniv = gen_vmulwev_s,
+ .fno = gen_helper_vmulwev_d_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmulwev_h_b, gvec_vvv, MO_8, do_vmulwev_s)
+TRANS(vmulwev_w_h, gvec_vvv, MO_16, do_vmulwev_s)
+TRANS(vmulwev_d_w, gvec_vvv, MO_32, do_vmulwev_s)
+
+static void tcg_gen_mulus2_i64(TCGv_i64 rl, TCGv_i64 rh,
+ TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ tcg_gen_mulsu2_i64(rl, rh, arg2, arg1);
+}
+
+#define VMUL_Q(NAME, FN, idx1, idx2) \
+static bool trans_## NAME (DisasContext *ctx, arg_vvv *a) \
+{ \
+ TCGv_i64 rh, rl, arg1, arg2; \
+ \
+ rh = tcg_temp_new_i64(); \
+ rl = tcg_temp_new_i64(); \
+ arg1 = tcg_temp_new_i64(); \
+ arg2 = tcg_temp_new_i64(); \
+ \
+ get_vreg64(arg1, a->vj, idx1); \
+ get_vreg64(arg2, a->vk, idx2); \
+ \
+ tcg_gen_## FN ##_i64(rl, rh, arg1, arg2); \
+ \
+ set_vreg64(rh, a->vd, 1); \
+ set_vreg64(rl, a->vd, 0); \
+ \
+ return true; \
+}
+
+VMUL_Q(vmulwev_q_d, muls2, 0, 0)
+VMUL_Q(vmulwod_q_d, muls2, 1, 1)
+VMUL_Q(vmulwev_q_du, mulu2, 0, 0)
+VMUL_Q(vmulwod_q_du, mulu2, 1, 1)
+VMUL_Q(vmulwev_q_du_d, mulus2, 0, 0)
+VMUL_Q(vmulwod_q_du_d, mulus2, 1, 1)
+
+static void gen_vmulwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ tcg_gen_sari_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+ tcg_gen_mul_vec(vece, t, t1, t2);
+}
+
+static void gen_vmulwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_sari_i32(t1, a, 16);
+ tcg_gen_sari_i32(t2, b, 16);
+ tcg_gen_mul_i32(t, t1, t2);
+}
+
+static void gen_vmulwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t1, a, 32);
+ tcg_gen_sari_i64(t2, b, 32);
+ tcg_gen_mul_i64(t, t1, t2);
+}
+
+static void do_vmulwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_mul_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmulwod_s,
+ .fno = gen_helper_vmulwod_h_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmulwod_w_h,
+ .fniv = gen_vmulwod_s,
+ .fno = gen_helper_vmulwod_w_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmulwod_d_w,
+ .fniv = gen_vmulwod_s,
+ .fno = gen_helper_vmulwod_d_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmulwod_h_b, gvec_vvv, MO_8, do_vmulwod_s)
+TRANS(vmulwod_w_h, gvec_vvv, MO_16, do_vmulwod_s)
+TRANS(vmulwod_d_w, gvec_vvv, MO_32, do_vmulwod_s)
+
+static void gen_vmulwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, mask;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
+ tcg_gen_and_vec(vece, t1, a, mask);
+ tcg_gen_and_vec(vece, t2, b, mask);
+ tcg_gen_mul_vec(vece, t, t1, t2);
+}
+
+static void gen_vmulwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(t1, a);
+ tcg_gen_ext16u_i32(t2, b);
+ tcg_gen_mul_i32(t, t1, t2);
+}
+
+static void gen_vmulwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(t1, a);
+ tcg_gen_ext32u_i64(t2, b);
+ tcg_gen_mul_i64(t, t1, t2);
+}
+
+static void do_vmulwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_mul_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmulwev_u,
+ .fno = gen_helper_vmulwev_h_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmulwev_w_hu,
+ .fniv = gen_vmulwev_u,
+ .fno = gen_helper_vmulwev_w_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmulwev_d_wu,
+ .fniv = gen_vmulwev_u,
+ .fno = gen_helper_vmulwev_d_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmulwev_h_bu, gvec_vvv, MO_8, do_vmulwev_u)
+TRANS(vmulwev_w_hu, gvec_vvv, MO_16, do_vmulwev_u)
+TRANS(vmulwev_d_wu, gvec_vvv, MO_32, do_vmulwev_u)
+
+static void gen_vmulwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ tcg_gen_shri_vec(vece, t2, b, halfbits);
+ tcg_gen_mul_vec(vece, t, t1, t2);
+}
+
+static void gen_vmulwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t1, a, 16);
+ tcg_gen_shri_i32(t2, b, 16);
+ tcg_gen_mul_i32(t, t1, t2);
+}
+
+static void gen_vmulwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t1, a, 32);
+ tcg_gen_shri_i64(t2, b, 32);
+ tcg_gen_mul_i64(t, t1, t2);
+}
+
+static void do_vmulwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_mul_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmulwod_u,
+ .fno = gen_helper_vmulwod_h_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmulwod_w_hu,
+ .fniv = gen_vmulwod_u,
+ .fno = gen_helper_vmulwod_w_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmulwod_d_wu,
+ .fniv = gen_vmulwod_u,
+ .fno = gen_helper_vmulwod_d_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmulwod_h_bu, gvec_vvv, MO_8, do_vmulwod_u)
+TRANS(vmulwod_w_hu, gvec_vvv, MO_16, do_vmulwod_u)
+TRANS(vmulwod_d_wu, gvec_vvv, MO_32, do_vmulwod_u)
+
+static void gen_vmulwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, mask;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
+ tcg_gen_and_vec(vece, t1, a, mask);
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+ tcg_gen_mul_vec(vece, t, t1, t2);
+}
+
+static void gen_vmulwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(t1, a);
+ tcg_gen_ext16s_i32(t2, b);
+ tcg_gen_mul_i32(t, t1, t2);
+}
+
+static void gen_vmulwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(t1, a);
+ tcg_gen_ext32s_i64(t2, b);
+ tcg_gen_mul_i64(t, t1, t2);
+}
+
+static void do_vmulwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_mul_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmulwev_u_s,
+ .fno = gen_helper_vmulwev_h_bu_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmulwev_w_hu_h,
+ .fniv = gen_vmulwev_u_s,
+ .fno = gen_helper_vmulwev_w_hu_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmulwev_d_wu_w,
+ .fniv = gen_vmulwev_u_s,
+ .fno = gen_helper_vmulwev_d_wu_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmulwev_h_bu_b, gvec_vvv, MO_8, do_vmulwev_u_s)
+TRANS(vmulwev_w_hu_h, gvec_vvv, MO_16, do_vmulwev_u_s)
+TRANS(vmulwev_d_wu_w, gvec_vvv, MO_32, do_vmulwev_u_s)
+
+static void gen_vmulwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+ tcg_gen_mul_vec(vece, t, t1, t2);
+}
+
+static void gen_vmulwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t1, a, 16);
+ tcg_gen_sari_i32(t2, b, 16);
+ tcg_gen_mul_i32(t, t1, t2);
+}
+static void gen_vmulwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t1, a, 32);
+ tcg_gen_sari_i64(t2, b, 32);
+ tcg_gen_mul_i64(t, t1, t2);
+}
+
+static void do_vmulwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_mul_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmulwod_u_s,
+ .fno = gen_helper_vmulwod_h_bu_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmulwod_w_hu_h,
+ .fniv = gen_vmulwod_u_s,
+ .fno = gen_helper_vmulwod_w_hu_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmulwod_d_wu_w,
+ .fniv = gen_vmulwod_u_s,
+ .fno = gen_helper_vmulwod_d_wu_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmulwod_h_bu_b, gvec_vvv, MO_8, do_vmulwod_u_s)
+TRANS(vmulwod_w_hu_h, gvec_vvv, MO_16, do_vmulwod_u_s)
+TRANS(vmulwod_d_wu_w, gvec_vvv, MO_32, do_vmulwod_u_s)
+
+static void gen_vmadd(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1;
+
+ t1 = tcg_temp_new_vec_matching(t);
+ tcg_gen_mul_vec(vece, t1, a, b);
+ tcg_gen_add_vec(vece, t, t, t1);
+}
+
+static void gen_vmadd_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ tcg_gen_mul_i32(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmadd_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ tcg_gen_mul_i64(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmadd(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vmadd,
+ .fno = gen_helper_vmadd_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmadd,
+ .fno = gen_helper_vmadd_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmadd_w,
+ .fniv = gen_vmadd,
+ .fno = gen_helper_vmadd_w,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmadd_d,
+ .fniv = gen_vmadd,
+ .fno = gen_helper_vmadd_d,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmadd_b, gvec_vvv, MO_8, do_vmadd)
+TRANS(vmadd_h, gvec_vvv, MO_16, do_vmadd)
+TRANS(vmadd_w, gvec_vvv, MO_32, do_vmadd)
+TRANS(vmadd_d, gvec_vvv, MO_64, do_vmadd)
+
+static void gen_vmsub(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1;
+
+ t1 = tcg_temp_new_vec_matching(t);
+ tcg_gen_mul_vec(vece, t1, a, b);
+ tcg_gen_sub_vec(vece, t, t, t1);
+}
+
+static void gen_vmsub_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ tcg_gen_mul_i32(t1, a, b);
+ tcg_gen_sub_i32(t, t, t1);
+}
+
+static void gen_vmsub_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ tcg_gen_mul_i64(t1, a, b);
+ tcg_gen_sub_i64(t, t, t1);
+}
+
+static void do_vmsub(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_mul_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vmsub,
+ .fno = gen_helper_vmsub_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmsub,
+ .fno = gen_helper_vmsub_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmsub_w,
+ .fniv = gen_vmsub,
+ .fno = gen_helper_vmsub_w,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmsub_d,
+ .fniv = gen_vmsub,
+ .fno = gen_helper_vmsub_d,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmsub_b, gvec_vvv, MO_8, do_vmsub)
+TRANS(vmsub_h, gvec_vvv, MO_16, do_vmsub)
+TRANS(vmsub_w, gvec_vvv, MO_32, do_vmsub)
+TRANS(vmsub_d, gvec_vvv, MO_64, do_vmsub)
+
+static void gen_vmaddwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_temp_new_vec_matching(t);
+ tcg_gen_shli_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t1, t1, halfbits);
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+ tcg_gen_mul_vec(vece, t3, t1, t2);
+ tcg_gen_add_vec(vece, t, t, t3);
+}
+
+static void gen_vmaddwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ gen_vmulwev_w_h(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmaddwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ gen_vmulwev_d_w(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmaddwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec,
+ INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmaddwev_s,
+ .fno = gen_helper_vmaddwev_h_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmaddwev_w_h,
+ .fniv = gen_vmaddwev_s,
+ .fno = gen_helper_vmaddwev_w_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmaddwev_d_w,
+ .fniv = gen_vmaddwev_s,
+ .fno = gen_helper_vmaddwev_d_w,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmaddwev_h_b, gvec_vvv, MO_8, do_vmaddwev_s)
+TRANS(vmaddwev_w_h, gvec_vvv, MO_16, do_vmaddwev_s)
+TRANS(vmaddwev_d_w, gvec_vvv, MO_32, do_vmaddwev_s)
+
+#define VMADD_Q(NAME, FN, idx1, idx2) \
+static bool trans_## NAME (DisasContext *ctx, arg_vvv *a) \
+{ \
+ TCGv_i64 rh, rl, arg1, arg2, th, tl; \
+ \
+ rh = tcg_temp_new_i64(); \
+ rl = tcg_temp_new_i64(); \
+ arg1 = tcg_temp_new_i64(); \
+ arg2 = tcg_temp_new_i64(); \
+ th = tcg_temp_new_i64(); \
+ tl = tcg_temp_new_i64(); \
+ \
+ get_vreg64(arg1, a->vj, idx1); \
+ get_vreg64(arg2, a->vk, idx2); \
+ get_vreg64(rh, a->vd, 1); \
+ get_vreg64(rl, a->vd, 0); \
+ \
+ tcg_gen_## FN ##_i64(tl, th, arg1, arg2); \
+ tcg_gen_add2_i64(rl, rh, rl, rh, tl, th); \
+ \
+ set_vreg64(rh, a->vd, 1); \
+ set_vreg64(rl, a->vd, 0); \
+ \
+ return true; \
+}
+
+VMADD_Q(vmaddwev_q_d, muls2, 0, 0)
+VMADD_Q(vmaddwod_q_d, muls2, 1, 1)
+VMADD_Q(vmaddwev_q_du, mulu2, 0, 0)
+VMADD_Q(vmaddwod_q_du, mulu2, 1, 1)
+VMADD_Q(vmaddwev_q_du_d, mulus2, 0, 0)
+VMADD_Q(vmaddwod_q_du_d, mulus2, 1, 1)
+
+static void gen_vmaddwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_temp_new_vec_matching(t);
+ tcg_gen_sari_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+ tcg_gen_mul_vec(vece, t3, t1, t2);
+ tcg_gen_add_vec(vece, t, t, t3);
+}
+
+static void gen_vmaddwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ gen_vmulwod_w_h(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmaddwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ gen_vmulwod_d_w(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmaddwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmaddwod_s,
+ .fno = gen_helper_vmaddwod_h_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmaddwod_w_h,
+ .fniv = gen_vmaddwod_s,
+ .fno = gen_helper_vmaddwod_w_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmaddwod_d_w,
+ .fniv = gen_vmaddwod_s,
+ .fno = gen_helper_vmaddwod_d_w,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmaddwod_h_b, gvec_vvv, MO_8, do_vmaddwod_s)
+TRANS(vmaddwod_w_h, gvec_vvv, MO_16, do_vmaddwod_s)
+TRANS(vmaddwod_d_w, gvec_vvv, MO_32, do_vmaddwod_s)
+
+static void gen_vmaddwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, mask;
+
+ t1 = tcg_temp_new_vec_matching(t);
+ t2 = tcg_temp_new_vec_matching(b);
+ mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
+ tcg_gen_and_vec(vece, t1, a, mask);
+ tcg_gen_and_vec(vece, t2, b, mask);
+ tcg_gen_mul_vec(vece, t1, t1, t2);
+ tcg_gen_add_vec(vece, t, t, t1);
+}
+
+static void gen_vmaddwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ gen_vmulwev_w_hu(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmaddwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ gen_vmulwev_d_wu(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmaddwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmaddwev_u,
+ .fno = gen_helper_vmaddwev_h_bu,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmaddwev_w_hu,
+ .fniv = gen_vmaddwev_u,
+ .fno = gen_helper_vmaddwev_w_hu,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmaddwev_d_wu,
+ .fniv = gen_vmaddwev_u,
+ .fno = gen_helper_vmaddwev_d_wu,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmaddwev_h_bu, gvec_vvv, MO_8, do_vmaddwev_u)
+TRANS(vmaddwev_w_hu, gvec_vvv, MO_16, do_vmaddwev_u)
+TRANS(vmaddwev_d_wu, gvec_vvv, MO_32, do_vmaddwev_u)
+
+static void gen_vmaddwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_temp_new_vec_matching(t);
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ tcg_gen_shri_vec(vece, t2, b, halfbits);
+ tcg_gen_mul_vec(vece, t3, t1, t2);
+ tcg_gen_add_vec(vece, t, t, t3);
+}
+
+static void gen_vmaddwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ gen_vmulwod_w_hu(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmaddwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ gen_vmulwod_d_wu(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmaddwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmaddwod_u,
+ .fno = gen_helper_vmaddwod_h_bu,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmaddwod_w_hu,
+ .fniv = gen_vmaddwod_u,
+ .fno = gen_helper_vmaddwod_w_hu,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmaddwod_d_wu,
+ .fniv = gen_vmaddwod_u,
+ .fno = gen_helper_vmaddwod_d_wu,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmaddwod_h_bu, gvec_vvv, MO_8, do_vmaddwod_u)
+TRANS(vmaddwod_w_hu, gvec_vvv, MO_16, do_vmaddwod_u)
+TRANS(vmaddwod_d_wu, gvec_vvv, MO_32, do_vmaddwod_u)
+
+static void gen_vmaddwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, mask;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
+ tcg_gen_and_vec(vece, t1, a, mask);
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+ tcg_gen_mul_vec(vece, t1, t1, t2);
+ tcg_gen_add_vec(vece, t, t, t1);
+}
+
+static void gen_vmaddwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ gen_vmulwev_w_hu_h(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmaddwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ gen_vmulwev_d_wu_w(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmaddwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec,
+ INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmaddwev_u_s,
+ .fno = gen_helper_vmaddwev_h_bu_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmaddwev_w_hu_h,
+ .fniv = gen_vmaddwev_u_s,
+ .fno = gen_helper_vmaddwev_w_hu_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmaddwev_d_wu_w,
+ .fniv = gen_vmaddwev_u_s,
+ .fno = gen_helper_vmaddwev_d_wu_w,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmaddwev_h_bu_b, gvec_vvv, MO_8, do_vmaddwev_u_s)
+TRANS(vmaddwev_w_hu_h, gvec_vvv, MO_16, do_vmaddwev_u_s)
+TRANS(vmaddwev_d_wu_w, gvec_vvv, MO_32, do_vmaddwev_u_s)
+
+static void gen_vmaddwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_temp_new_vec_matching(t);
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+ tcg_gen_mul_vec(vece, t3, t1, t2);
+ tcg_gen_add_vec(vece, t, t, t3);
+}
+
+static void gen_vmaddwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ gen_vmulwod_w_hu_h(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmaddwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ gen_vmulwod_d_wu_w(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmaddwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_sari_vec,
+ INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmaddwod_u_s,
+ .fno = gen_helper_vmaddwod_h_bu_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmaddwod_w_hu_h,
+ .fniv = gen_vmaddwod_u_s,
+ .fno = gen_helper_vmaddwod_w_hu_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmaddwod_d_wu_w,
+ .fniv = gen_vmaddwod_u_s,
+ .fno = gen_helper_vmaddwod_d_wu_w,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmaddwod_h_bu_b, gvec_vvv, MO_8, do_vmaddwod_u_s)
+TRANS(vmaddwod_w_hu_h, gvec_vvv, MO_16, do_vmaddwod_u_s)
+TRANS(vmaddwod_d_wu_w, gvec_vvv, MO_32, do_vmaddwod_u_s)
+
+TRANS(vdiv_b, gen_vvv, gen_helper_vdiv_b)
+TRANS(vdiv_h, gen_vvv, gen_helper_vdiv_h)
+TRANS(vdiv_w, gen_vvv, gen_helper_vdiv_w)
+TRANS(vdiv_d, gen_vvv, gen_helper_vdiv_d)
+TRANS(vdiv_bu, gen_vvv, gen_helper_vdiv_bu)
+TRANS(vdiv_hu, gen_vvv, gen_helper_vdiv_hu)
+TRANS(vdiv_wu, gen_vvv, gen_helper_vdiv_wu)
+TRANS(vdiv_du, gen_vvv, gen_helper_vdiv_du)
+TRANS(vmod_b, gen_vvv, gen_helper_vmod_b)
+TRANS(vmod_h, gen_vvv, gen_helper_vmod_h)
+TRANS(vmod_w, gen_vvv, gen_helper_vmod_w)
+TRANS(vmod_d, gen_vvv, gen_helper_vmod_d)
+TRANS(vmod_bu, gen_vvv, gen_helper_vmod_bu)
+TRANS(vmod_hu, gen_vvv, gen_helper_vmod_hu)
+TRANS(vmod_wu, gen_vvv, gen_helper_vmod_wu)
+TRANS(vmod_du, gen_vvv, gen_helper_vmod_du)
+
+static void gen_vsat_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec max)
+{
+ TCGv_vec min;
+
+ min = tcg_temp_new_vec_matching(t);
+ tcg_gen_not_vec(vece, min, max);
+ tcg_gen_smax_vec(vece, t, a, min);
+ tcg_gen_smin_vec(vece, t, t, max);
+}
+
+static void do_vsat_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_smax_vec, INDEX_op_smin_vec, 0
+ };
+ static const GVecGen2s op[4] = {
+ {
+ .fniv = gen_vsat_s,
+ .fno = gen_helper_vsat_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vsat_s,
+ .fno = gen_helper_vsat_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vsat_s,
+ .fno = gen_helper_vsat_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vsat_s,
+ .fno = gen_helper_vsat_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2s(vd_ofs, vj_ofs, oprsz, maxsz,
+ tcg_constant_i64((1ll<< imm) -1), &op[vece]);
+}
+
+TRANS(vsat_b, gvec_vv_i, MO_8, do_vsat_s)
+TRANS(vsat_h, gvec_vv_i, MO_16, do_vsat_s)
+TRANS(vsat_w, gvec_vv_i, MO_32, do_vsat_s)
+TRANS(vsat_d, gvec_vv_i, MO_64, do_vsat_s)
+
+static void gen_vsat_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec max)
+{
+ tcg_gen_umin_vec(vece, t, a, max);
+}
+
+static void do_vsat_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ uint64_t max;
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2s op[4] = {
+ {
+ .fniv = gen_vsat_u,
+ .fno = gen_helper_vsat_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vsat_u,
+ .fno = gen_helper_vsat_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vsat_u,
+ .fno = gen_helper_vsat_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vsat_u,
+ .fno = gen_helper_vsat_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ max = (imm == 0x3f) ? UINT64_MAX : (1ull << (imm + 1)) - 1;
+ tcg_gen_gvec_2s(vd_ofs, vj_ofs, oprsz, maxsz,
+ tcg_constant_i64(max), &op[vece]);
+}
+
+TRANS(vsat_bu, gvec_vv_i, MO_8, do_vsat_u)
+TRANS(vsat_hu, gvec_vv_i, MO_16, do_vsat_u)
+TRANS(vsat_wu, gvec_vv_i, MO_32, do_vsat_u)
+TRANS(vsat_du, gvec_vv_i, MO_64, do_vsat_u)
+
+TRANS(vexth_h_b, gen_vv, gen_helper_vexth_h_b)
+TRANS(vexth_w_h, gen_vv, gen_helper_vexth_w_h)
+TRANS(vexth_d_w, gen_vv, gen_helper_vexth_d_w)
+TRANS(vexth_q_d, gen_vv, gen_helper_vexth_q_d)
+TRANS(vexth_hu_bu, gen_vv, gen_helper_vexth_hu_bu)
+TRANS(vexth_wu_hu, gen_vv, gen_helper_vexth_wu_hu)
+TRANS(vexth_du_wu, gen_vv, gen_helper_vexth_du_wu)
+TRANS(vexth_qu_du, gen_vv, gen_helper_vexth_qu_du)
+
+static void gen_vsigncov(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, zero;
+
+ t1 = tcg_temp_new_vec_matching(t);
+ zero = tcg_constant_vec_matching(t, vece, 0);
+
+ tcg_gen_neg_vec(vece, t1, b);
+ tcg_gen_cmpsel_vec(TCG_COND_LT, vece, t, a, zero, t1, b);
+ tcg_gen_cmpsel_vec(TCG_COND_EQ, vece, t, a, zero, zero, t);
+}
+
+static void do_vsigncov(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_neg_vec, INDEX_op_cmpsel_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vsigncov,
+ .fno = gen_helper_vsigncov_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vsigncov,
+ .fno = gen_helper_vsigncov_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vsigncov,
+ .fno = gen_helper_vsigncov_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vsigncov,
+ .fno = gen_helper_vsigncov_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vsigncov_b, gvec_vvv, MO_8, do_vsigncov)
+TRANS(vsigncov_h, gvec_vvv, MO_16, do_vsigncov)
+TRANS(vsigncov_w, gvec_vvv, MO_32, do_vsigncov)
+TRANS(vsigncov_d, gvec_vvv, MO_64, do_vsigncov)
+
+TRANS(vmskltz_b, gen_vv, gen_helper_vmskltz_b)
+TRANS(vmskltz_h, gen_vv, gen_helper_vmskltz_h)
+TRANS(vmskltz_w, gen_vv, gen_helper_vmskltz_w)
+TRANS(vmskltz_d, gen_vv, gen_helper_vmskltz_d)
+TRANS(vmskgez_b, gen_vv, gen_helper_vmskgez_b)
+TRANS(vmsknz_b, gen_vv, gen_helper_vmsknz_b)
+
+#define EXPAND_BYTE(bit) ((uint64_t)(bit ? 0xff : 0))
+
+static uint64_t vldi_get_value(DisasContext *ctx, uint32_t imm)
+{
+ int mode;
+ uint64_t data, t;
+
+ /*
+ * imm bit [11:8] is mode, mode value is 0-12.
+ * other values are invalid.
+ */
+ mode = (imm >> 8) & 0xf;
+ t = imm & 0xff;
+ switch (mode) {
+ case 0:
+ /* data: {2{24'0, imm[7:0]}} */
+ data = (t << 32) | t ;
+ break;
+ case 1:
+ /* data: {2{16'0, imm[7:0], 8'0}} */
+ data = (t << 24) | (t << 8);
+ break;
+ case 2:
+ /* data: {2{8'0, imm[7:0], 16'0}} */
+ data = (t << 48) | (t << 16);
+ break;
+ case 3:
+ /* data: {2{imm[7:0], 24'0}} */
+ data = (t << 56) | (t << 24);
+ break;
+ case 4:
+ /* data: {4{8'0, imm[7:0]}} */
+ data = (t << 48) | (t << 32) | (t << 16) | t;
+ break;
+ case 5:
+ /* data: {4{imm[7:0], 8'0}} */
+ data = (t << 56) |(t << 40) | (t << 24) | (t << 8);
+ break;
+ case 6:
+ /* data: {2{16'0, imm[7:0], 8'1}} */
+ data = (t << 40) | ((uint64_t)0xff << 32) | (t << 8) | 0xff;
+ break;
+ case 7:
+ /* data: {2{8'0, imm[7:0], 16'1}} */
+ data = (t << 48) | ((uint64_t)0xffff << 32) | (t << 16) | 0xffff;
+ break;
+ case 8:
+ /* data: {8{imm[7:0]}} */
+ data =(t << 56) | (t << 48) | (t << 40) | (t << 32) |
+ (t << 24) | (t << 16) | (t << 8) | t;
+ break;
+ case 9:
+ /* data: {{8{imm[7]}, ..., 8{imm[0]}}} */
+ {
+ uint64_t b0,b1,b2,b3,b4,b5,b6,b7;
+ b0 = t& 0x1;
+ b1 = (t & 0x2) >> 1;
+ b2 = (t & 0x4) >> 2;
+ b3 = (t & 0x8) >> 3;
+ b4 = (t & 0x10) >> 4;
+ b5 = (t & 0x20) >> 5;
+ b6 = (t & 0x40) >> 6;
+ b7 = (t & 0x80) >> 7;
+ data = (EXPAND_BYTE(b7) << 56) |
+ (EXPAND_BYTE(b6) << 48) |
+ (EXPAND_BYTE(b5) << 40) |
+ (EXPAND_BYTE(b4) << 32) |
+ (EXPAND_BYTE(b3) << 24) |
+ (EXPAND_BYTE(b2) << 16) |
+ (EXPAND_BYTE(b1) << 8) |
+ EXPAND_BYTE(b0);
+ }
+ break;
+ case 10:
+ /* data: {2{imm[7], ~imm[6], {5{imm[6]}}, imm[5:0], 19'0}} */
+ {
+ uint64_t b6, b7;
+ uint64_t t0, t1;
+ b6 = (imm & 0x40) >> 6;
+ b7 = (imm & 0x80) >> 7;
+ t0 = (imm & 0x3f);
+ t1 = (b7 << 6) | ((1-b6) << 5) | (uint64_t)(b6 ? 0x1f : 0);
+ data = (t1 << 57) | (t0 << 51) | (t1 << 25) | (t0 << 19);
+ }
+ break;
+ case 11:
+ /* data: {32'0, imm[7], ~{imm[6]}, 5{imm[6]}, imm[5:0], 19'0} */
+ {
+ uint64_t b6,b7;
+ uint64_t t0, t1;
+ b6 = (imm & 0x40) >> 6;
+ b7 = (imm & 0x80) >> 7;
+ t0 = (imm & 0x3f);
+ t1 = (b7 << 6) | ((1-b6) << 5) | (b6 ? 0x1f : 0);
+ data = (t1 << 25) | (t0 << 19);
+ }
+ break;
+ case 12:
+ /* data: {imm[7], ~imm[6], 8{imm[6]}, imm[5:0], 48'0} */
+ {
+ uint64_t b6,b7;
+ uint64_t t0, t1;
+ b6 = (imm & 0x40) >> 6;
+ b7 = (imm & 0x80) >> 7;
+ t0 = (imm & 0x3f);
+ t1 = (b7 << 9) | ((1-b6) << 8) | (b6 ? 0xff : 0);
+ data = (t1 << 54) | (t0 << 48);
+ }
+ break;
+ default:
+ generate_exception(ctx, EXCCODE_INE);
+ g_assert_not_reached();
+ }
+ return data;
+}
+
+static bool trans_vldi(DisasContext *ctx, arg_vldi *a)
+{
+ int sel, vece;
+ uint64_t value;
+ CHECK_SXE;
+
+ sel = (a->imm >> 12) & 0x1;
+
+ if (sel) {
+ value = vldi_get_value(ctx, a->imm);
+ vece = MO_64;
+ } else {
+ value = ((int32_t)(a->imm << 22)) >> 22;
+ vece = (a->imm >> 10) & 0x3;
+ }
+
+ tcg_gen_gvec_dup_i64(vece, vec_full_offset(a->vd), 16, ctx->vl/8,
+ tcg_constant_i64(value));
+ return true;
+}
+
+TRANS(vand_v, gvec_vvv, MO_64, tcg_gen_gvec_and)
+TRANS(vor_v, gvec_vvv, MO_64, tcg_gen_gvec_or)
+TRANS(vxor_v, gvec_vvv, MO_64, tcg_gen_gvec_xor)
+TRANS(vnor_v, gvec_vvv, MO_64, tcg_gen_gvec_nor)
+
+static bool trans_vandn_v(DisasContext *ctx, arg_vvv *a)
+{
+ uint32_t vd_ofs, vj_ofs, vk_ofs;
+
+ CHECK_SXE;
+
+ vd_ofs = vec_full_offset(a->vd);
+ vj_ofs = vec_full_offset(a->vj);
+ vk_ofs = vec_full_offset(a->vk);
+
+ tcg_gen_gvec_andc(MO_64, vd_ofs, vk_ofs, vj_ofs, 16, ctx->vl/8);
+ return true;
+}
+TRANS(vorn_v, gvec_vvv, MO_64, tcg_gen_gvec_orc)
+TRANS(vandi_b, gvec_vv_i, MO_8, tcg_gen_gvec_andi)
+TRANS(vori_b, gvec_vv_i, MO_8, tcg_gen_gvec_ori)
+TRANS(vxori_b, gvec_vv_i, MO_8, tcg_gen_gvec_xori)
+
+static void gen_vnori(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ TCGv_vec t1;
+
+ t1 = tcg_constant_vec_matching(t, vece, imm);
+ tcg_gen_nor_vec(vece, t, a, t1);
+}
+
+static void gen_vnori_b(TCGv_i64 t, TCGv_i64 a, int64_t imm)
+{
+ tcg_gen_movi_i64(t, dup_const(MO_8, imm));
+ tcg_gen_nor_i64(t, a, t);
+}
+
+static void do_vnori_b(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_nor_vec, 0
+ };
+ static const GVecGen2i op = {
+ .fni8 = gen_vnori_b,
+ .fniv = gen_vnori,
+ .fnoi = gen_helper_vnori_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op);
+}
+
+TRANS(vnori_b, gvec_vv_i, MO_8, do_vnori_b)
+
+TRANS(vsll_b, gvec_vvv, MO_8, tcg_gen_gvec_shlv)
+TRANS(vsll_h, gvec_vvv, MO_16, tcg_gen_gvec_shlv)
+TRANS(vsll_w, gvec_vvv, MO_32, tcg_gen_gvec_shlv)
+TRANS(vsll_d, gvec_vvv, MO_64, tcg_gen_gvec_shlv)
+TRANS(vslli_b, gvec_vv_i, MO_8, tcg_gen_gvec_shli)
+TRANS(vslli_h, gvec_vv_i, MO_16, tcg_gen_gvec_shli)
+TRANS(vslli_w, gvec_vv_i, MO_32, tcg_gen_gvec_shli)
+TRANS(vslli_d, gvec_vv_i, MO_64, tcg_gen_gvec_shli)
+
+TRANS(vsrl_b, gvec_vvv, MO_8, tcg_gen_gvec_shrv)
+TRANS(vsrl_h, gvec_vvv, MO_16, tcg_gen_gvec_shrv)
+TRANS(vsrl_w, gvec_vvv, MO_32, tcg_gen_gvec_shrv)
+TRANS(vsrl_d, gvec_vvv, MO_64, tcg_gen_gvec_shrv)
+TRANS(vsrli_b, gvec_vv_i, MO_8, tcg_gen_gvec_shri)
+TRANS(vsrli_h, gvec_vv_i, MO_16, tcg_gen_gvec_shri)
+TRANS(vsrli_w, gvec_vv_i, MO_32, tcg_gen_gvec_shri)
+TRANS(vsrli_d, gvec_vv_i, MO_64, tcg_gen_gvec_shri)
+
+TRANS(vsra_b, gvec_vvv, MO_8, tcg_gen_gvec_sarv)
+TRANS(vsra_h, gvec_vvv, MO_16, tcg_gen_gvec_sarv)
+TRANS(vsra_w, gvec_vvv, MO_32, tcg_gen_gvec_sarv)
+TRANS(vsra_d, gvec_vvv, MO_64, tcg_gen_gvec_sarv)
+TRANS(vsrai_b, gvec_vv_i, MO_8, tcg_gen_gvec_sari)
+TRANS(vsrai_h, gvec_vv_i, MO_16, tcg_gen_gvec_sari)
+TRANS(vsrai_w, gvec_vv_i, MO_32, tcg_gen_gvec_sari)
+TRANS(vsrai_d, gvec_vv_i, MO_64, tcg_gen_gvec_sari)
+
+TRANS(vrotr_b, gvec_vvv, MO_8, tcg_gen_gvec_rotrv)
+TRANS(vrotr_h, gvec_vvv, MO_16, tcg_gen_gvec_rotrv)
+TRANS(vrotr_w, gvec_vvv, MO_32, tcg_gen_gvec_rotrv)
+TRANS(vrotr_d, gvec_vvv, MO_64, tcg_gen_gvec_rotrv)
+TRANS(vrotri_b, gvec_vv_i, MO_8, tcg_gen_gvec_rotri)
+TRANS(vrotri_h, gvec_vv_i, MO_16, tcg_gen_gvec_rotri)
+TRANS(vrotri_w, gvec_vv_i, MO_32, tcg_gen_gvec_rotri)
+TRANS(vrotri_d, gvec_vv_i, MO_64, tcg_gen_gvec_rotri)
+
+TRANS(vsllwil_h_b, gen_vv_i, gen_helper_vsllwil_h_b)
+TRANS(vsllwil_w_h, gen_vv_i, gen_helper_vsllwil_w_h)
+TRANS(vsllwil_d_w, gen_vv_i, gen_helper_vsllwil_d_w)
+TRANS(vextl_q_d, gen_vv, gen_helper_vextl_q_d)
+TRANS(vsllwil_hu_bu, gen_vv_i, gen_helper_vsllwil_hu_bu)
+TRANS(vsllwil_wu_hu, gen_vv_i, gen_helper_vsllwil_wu_hu)
+TRANS(vsllwil_du_wu, gen_vv_i, gen_helper_vsllwil_du_wu)
+TRANS(vextl_qu_du, gen_vv, gen_helper_vextl_qu_du)
+
+TRANS(vsrlr_b, gen_vvv, gen_helper_vsrlr_b)
+TRANS(vsrlr_h, gen_vvv, gen_helper_vsrlr_h)
+TRANS(vsrlr_w, gen_vvv, gen_helper_vsrlr_w)
+TRANS(vsrlr_d, gen_vvv, gen_helper_vsrlr_d)
+TRANS(vsrlri_b, gen_vv_i, gen_helper_vsrlri_b)
+TRANS(vsrlri_h, gen_vv_i, gen_helper_vsrlri_h)
+TRANS(vsrlri_w, gen_vv_i, gen_helper_vsrlri_w)
+TRANS(vsrlri_d, gen_vv_i, gen_helper_vsrlri_d)
+
+TRANS(vsrar_b, gen_vvv, gen_helper_vsrar_b)
+TRANS(vsrar_h, gen_vvv, gen_helper_vsrar_h)
+TRANS(vsrar_w, gen_vvv, gen_helper_vsrar_w)
+TRANS(vsrar_d, gen_vvv, gen_helper_vsrar_d)
+TRANS(vsrari_b, gen_vv_i, gen_helper_vsrari_b)
+TRANS(vsrari_h, gen_vv_i, gen_helper_vsrari_h)
+TRANS(vsrari_w, gen_vv_i, gen_helper_vsrari_w)
+TRANS(vsrari_d, gen_vv_i, gen_helper_vsrari_d)
+
+TRANS(vsrln_b_h, gen_vvv, gen_helper_vsrln_b_h)
+TRANS(vsrln_h_w, gen_vvv, gen_helper_vsrln_h_w)
+TRANS(vsrln_w_d, gen_vvv, gen_helper_vsrln_w_d)
+TRANS(vsran_b_h, gen_vvv, gen_helper_vsran_b_h)
+TRANS(vsran_h_w, gen_vvv, gen_helper_vsran_h_w)
+TRANS(vsran_w_d, gen_vvv, gen_helper_vsran_w_d)
+
+TRANS(vsrlni_b_h, gen_vv_i, gen_helper_vsrlni_b_h)
+TRANS(vsrlni_h_w, gen_vv_i, gen_helper_vsrlni_h_w)
+TRANS(vsrlni_w_d, gen_vv_i, gen_helper_vsrlni_w_d)
+TRANS(vsrlni_d_q, gen_vv_i, gen_helper_vsrlni_d_q)
+TRANS(vsrani_b_h, gen_vv_i, gen_helper_vsrani_b_h)
+TRANS(vsrani_h_w, gen_vv_i, gen_helper_vsrani_h_w)
+TRANS(vsrani_w_d, gen_vv_i, gen_helper_vsrani_w_d)
+TRANS(vsrani_d_q, gen_vv_i, gen_helper_vsrani_d_q)
+
+TRANS(vsrlrn_b_h, gen_vvv, gen_helper_vsrlrn_b_h)
+TRANS(vsrlrn_h_w, gen_vvv, gen_helper_vsrlrn_h_w)
+TRANS(vsrlrn_w_d, gen_vvv, gen_helper_vsrlrn_w_d)
+TRANS(vsrarn_b_h, gen_vvv, gen_helper_vsrarn_b_h)
+TRANS(vsrarn_h_w, gen_vvv, gen_helper_vsrarn_h_w)
+TRANS(vsrarn_w_d, gen_vvv, gen_helper_vsrarn_w_d)
+
+TRANS(vsrlrni_b_h, gen_vv_i, gen_helper_vsrlrni_b_h)
+TRANS(vsrlrni_h_w, gen_vv_i, gen_helper_vsrlrni_h_w)
+TRANS(vsrlrni_w_d, gen_vv_i, gen_helper_vsrlrni_w_d)
+TRANS(vsrlrni_d_q, gen_vv_i, gen_helper_vsrlrni_d_q)
+TRANS(vsrarni_b_h, gen_vv_i, gen_helper_vsrarni_b_h)
+TRANS(vsrarni_h_w, gen_vv_i, gen_helper_vsrarni_h_w)
+TRANS(vsrarni_w_d, gen_vv_i, gen_helper_vsrarni_w_d)
+TRANS(vsrarni_d_q, gen_vv_i, gen_helper_vsrarni_d_q)
+
+TRANS(vssrln_b_h, gen_vvv, gen_helper_vssrln_b_h)
+TRANS(vssrln_h_w, gen_vvv, gen_helper_vssrln_h_w)
+TRANS(vssrln_w_d, gen_vvv, gen_helper_vssrln_w_d)
+TRANS(vssran_b_h, gen_vvv, gen_helper_vssran_b_h)
+TRANS(vssran_h_w, gen_vvv, gen_helper_vssran_h_w)
+TRANS(vssran_w_d, gen_vvv, gen_helper_vssran_w_d)
+TRANS(vssrln_bu_h, gen_vvv, gen_helper_vssrln_bu_h)
+TRANS(vssrln_hu_w, gen_vvv, gen_helper_vssrln_hu_w)
+TRANS(vssrln_wu_d, gen_vvv, gen_helper_vssrln_wu_d)
+TRANS(vssran_bu_h, gen_vvv, gen_helper_vssran_bu_h)
+TRANS(vssran_hu_w, gen_vvv, gen_helper_vssran_hu_w)
+TRANS(vssran_wu_d, gen_vvv, gen_helper_vssran_wu_d)
+
+TRANS(vssrlni_b_h, gen_vv_i, gen_helper_vssrlni_b_h)
+TRANS(vssrlni_h_w, gen_vv_i, gen_helper_vssrlni_h_w)
+TRANS(vssrlni_w_d, gen_vv_i, gen_helper_vssrlni_w_d)
+TRANS(vssrlni_d_q, gen_vv_i, gen_helper_vssrlni_d_q)
+TRANS(vssrani_b_h, gen_vv_i, gen_helper_vssrani_b_h)
+TRANS(vssrani_h_w, gen_vv_i, gen_helper_vssrani_h_w)
+TRANS(vssrani_w_d, gen_vv_i, gen_helper_vssrani_w_d)
+TRANS(vssrani_d_q, gen_vv_i, gen_helper_vssrani_d_q)
+TRANS(vssrlni_bu_h, gen_vv_i, gen_helper_vssrlni_bu_h)
+TRANS(vssrlni_hu_w, gen_vv_i, gen_helper_vssrlni_hu_w)
+TRANS(vssrlni_wu_d, gen_vv_i, gen_helper_vssrlni_wu_d)
+TRANS(vssrlni_du_q, gen_vv_i, gen_helper_vssrlni_du_q)
+TRANS(vssrani_bu_h, gen_vv_i, gen_helper_vssrani_bu_h)
+TRANS(vssrani_hu_w, gen_vv_i, gen_helper_vssrani_hu_w)
+TRANS(vssrani_wu_d, gen_vv_i, gen_helper_vssrani_wu_d)
+TRANS(vssrani_du_q, gen_vv_i, gen_helper_vssrani_du_q)
+
+TRANS(vssrlrn_b_h, gen_vvv, gen_helper_vssrlrn_b_h)
+TRANS(vssrlrn_h_w, gen_vvv, gen_helper_vssrlrn_h_w)
+TRANS(vssrlrn_w_d, gen_vvv, gen_helper_vssrlrn_w_d)
+TRANS(vssrarn_b_h, gen_vvv, gen_helper_vssrarn_b_h)
+TRANS(vssrarn_h_w, gen_vvv, gen_helper_vssrarn_h_w)
+TRANS(vssrarn_w_d, gen_vvv, gen_helper_vssrarn_w_d)
+TRANS(vssrlrn_bu_h, gen_vvv, gen_helper_vssrlrn_bu_h)
+TRANS(vssrlrn_hu_w, gen_vvv, gen_helper_vssrlrn_hu_w)
+TRANS(vssrlrn_wu_d, gen_vvv, gen_helper_vssrlrn_wu_d)
+TRANS(vssrarn_bu_h, gen_vvv, gen_helper_vssrarn_bu_h)
+TRANS(vssrarn_hu_w, gen_vvv, gen_helper_vssrarn_hu_w)
+TRANS(vssrarn_wu_d, gen_vvv, gen_helper_vssrarn_wu_d)
+
+TRANS(vssrlrni_b_h, gen_vv_i, gen_helper_vssrlrni_b_h)
+TRANS(vssrlrni_h_w, gen_vv_i, gen_helper_vssrlrni_h_w)
+TRANS(vssrlrni_w_d, gen_vv_i, gen_helper_vssrlrni_w_d)
+TRANS(vssrlrni_d_q, gen_vv_i, gen_helper_vssrlrni_d_q)
+TRANS(vssrarni_b_h, gen_vv_i, gen_helper_vssrarni_b_h)
+TRANS(vssrarni_h_w, gen_vv_i, gen_helper_vssrarni_h_w)
+TRANS(vssrarni_w_d, gen_vv_i, gen_helper_vssrarni_w_d)
+TRANS(vssrarni_d_q, gen_vv_i, gen_helper_vssrarni_d_q)
+TRANS(vssrlrni_bu_h, gen_vv_i, gen_helper_vssrlrni_bu_h)
+TRANS(vssrlrni_hu_w, gen_vv_i, gen_helper_vssrlrni_hu_w)
+TRANS(vssrlrni_wu_d, gen_vv_i, gen_helper_vssrlrni_wu_d)
+TRANS(vssrlrni_du_q, gen_vv_i, gen_helper_vssrlrni_du_q)
+TRANS(vssrarni_bu_h, gen_vv_i, gen_helper_vssrarni_bu_h)
+TRANS(vssrarni_hu_w, gen_vv_i, gen_helper_vssrarni_hu_w)
+TRANS(vssrarni_wu_d, gen_vv_i, gen_helper_vssrarni_wu_d)
+TRANS(vssrarni_du_q, gen_vv_i, gen_helper_vssrarni_du_q)
+
+TRANS(vclo_b, gen_vv, gen_helper_vclo_b)
+TRANS(vclo_h, gen_vv, gen_helper_vclo_h)
+TRANS(vclo_w, gen_vv, gen_helper_vclo_w)
+TRANS(vclo_d, gen_vv, gen_helper_vclo_d)
+TRANS(vclz_b, gen_vv, gen_helper_vclz_b)
+TRANS(vclz_h, gen_vv, gen_helper_vclz_h)
+TRANS(vclz_w, gen_vv, gen_helper_vclz_w)
+TRANS(vclz_d, gen_vv, gen_helper_vclz_d)
+
+TRANS(vpcnt_b, gen_vv, gen_helper_vpcnt_b)
+TRANS(vpcnt_h, gen_vv, gen_helper_vpcnt_h)
+TRANS(vpcnt_w, gen_vv, gen_helper_vpcnt_w)
+TRANS(vpcnt_d, gen_vv, gen_helper_vpcnt_d)
+
+static void do_vbit(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
+ void (*func)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
+{
+ TCGv_vec mask, lsh, t1, one;
+
+ lsh = tcg_temp_new_vec_matching(t);
+ t1 = tcg_temp_new_vec_matching(t);
+ mask = tcg_constant_vec_matching(t, vece, (8 << vece) - 1);
+ one = tcg_constant_vec_matching(t, vece, 1);
+
+ tcg_gen_and_vec(vece, lsh, b, mask);
+ tcg_gen_shlv_vec(vece, t1, one, lsh);
+ func(vece, t, a, t1);
+}
+
+static void gen_vbitclr(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vbit(vece, t, a, b, tcg_gen_andc_vec);
+}
+
+static void gen_vbitset(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vbit(vece, t, a, b, tcg_gen_or_vec);
+}
+
+static void gen_vbitrev(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vbit(vece, t, a, b, tcg_gen_xor_vec);
+}
+
+static void do_vbitclr(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shlv_vec, INDEX_op_andc_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vbitclr,
+ .fno = gen_helper_vbitclr_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vbitclr,
+ .fno = gen_helper_vbitclr_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vbitclr,
+ .fno = gen_helper_vbitclr_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vbitclr,
+ .fno = gen_helper_vbitclr_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vbitclr_b, gvec_vvv, MO_8, do_vbitclr)
+TRANS(vbitclr_h, gvec_vvv, MO_16, do_vbitclr)
+TRANS(vbitclr_w, gvec_vvv, MO_32, do_vbitclr)
+TRANS(vbitclr_d, gvec_vvv, MO_64, do_vbitclr)
+
+static void do_vbiti(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm,
+ void (*func)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
+{
+ int lsh;
+ TCGv_vec t1, one;
+
+ lsh = imm & ((8 << vece) -1);
+ t1 = tcg_temp_new_vec_matching(t);
+ one = tcg_constant_vec_matching(t, vece, 1);
+
+ tcg_gen_shli_vec(vece, t1, one, lsh);
+ func(vece, t, a, t1);
+}
+
+static void gen_vbitclri(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_vbiti(vece, t, a, imm, tcg_gen_andc_vec);
+}
+
+static void gen_vbitseti(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_vbiti(vece, t, a, imm, tcg_gen_or_vec);
+}
+
+static void gen_vbitrevi(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_vbiti(vece, t, a, imm, tcg_gen_xor_vec);
+}
+
+static void do_vbitclri(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_andc_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vbitclri,
+ .fnoi = gen_helper_vbitclri_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vbitclri,
+ .fnoi = gen_helper_vbitclri_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vbitclri,
+ .fnoi = gen_helper_vbitclri_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vbitclri,
+ .fnoi = gen_helper_vbitclri_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+TRANS(vbitclri_b, gvec_vv_i, MO_8, do_vbitclri)
+TRANS(vbitclri_h, gvec_vv_i, MO_16, do_vbitclri)
+TRANS(vbitclri_w, gvec_vv_i, MO_32, do_vbitclri)
+TRANS(vbitclri_d, gvec_vv_i, MO_64, do_vbitclri)
+
+static void do_vbitset(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shlv_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vbitset,
+ .fno = gen_helper_vbitset_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vbitset,
+ .fno = gen_helper_vbitset_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vbitset,
+ .fno = gen_helper_vbitset_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vbitset,
+ .fno = gen_helper_vbitset_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vbitset_b, gvec_vvv, MO_8, do_vbitset)
+TRANS(vbitset_h, gvec_vvv, MO_16, do_vbitset)
+TRANS(vbitset_w, gvec_vvv, MO_32, do_vbitset)
+TRANS(vbitset_d, gvec_vvv, MO_64, do_vbitset)
+
+static void do_vbitseti(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vbitseti,
+ .fnoi = gen_helper_vbitseti_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vbitseti,
+ .fnoi = gen_helper_vbitseti_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vbitseti,
+ .fnoi = gen_helper_vbitseti_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vbitseti,
+ .fnoi = gen_helper_vbitseti_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+TRANS(vbitseti_b, gvec_vv_i, MO_8, do_vbitseti)
+TRANS(vbitseti_h, gvec_vv_i, MO_16, do_vbitseti)
+TRANS(vbitseti_w, gvec_vv_i, MO_32, do_vbitseti)
+TRANS(vbitseti_d, gvec_vv_i, MO_64, do_vbitseti)
+
+static void do_vbitrev(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shlv_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vbitrev,
+ .fno = gen_helper_vbitrev_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vbitrev,
+ .fno = gen_helper_vbitrev_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vbitrev,
+ .fno = gen_helper_vbitrev_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vbitrev,
+ .fno = gen_helper_vbitrev_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vbitrev_b, gvec_vvv, MO_8, do_vbitrev)
+TRANS(vbitrev_h, gvec_vvv, MO_16, do_vbitrev)
+TRANS(vbitrev_w, gvec_vvv, MO_32, do_vbitrev)
+TRANS(vbitrev_d, gvec_vvv, MO_64, do_vbitrev)
+
+static void do_vbitrevi(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vbitrevi,
+ .fnoi = gen_helper_vbitrevi_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vbitrevi,
+ .fnoi = gen_helper_vbitrevi_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vbitrevi,
+ .fnoi = gen_helper_vbitrevi_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vbitrevi,
+ .fnoi = gen_helper_vbitrevi_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+TRANS(vbitrevi_b, gvec_vv_i, MO_8, do_vbitrevi)
+TRANS(vbitrevi_h, gvec_vv_i, MO_16, do_vbitrevi)
+TRANS(vbitrevi_w, gvec_vv_i, MO_32, do_vbitrevi)
+TRANS(vbitrevi_d, gvec_vv_i, MO_64, do_vbitrevi)
+
+TRANS(vfrstp_b, gen_vvv, gen_helper_vfrstp_b)
+TRANS(vfrstp_h, gen_vvv, gen_helper_vfrstp_h)
+TRANS(vfrstpi_b, gen_vv_i, gen_helper_vfrstpi_b)
+TRANS(vfrstpi_h, gen_vv_i, gen_helper_vfrstpi_h)
+
+TRANS(vfadd_s, gen_vvv, gen_helper_vfadd_s)
+TRANS(vfadd_d, gen_vvv, gen_helper_vfadd_d)
+TRANS(vfsub_s, gen_vvv, gen_helper_vfsub_s)
+TRANS(vfsub_d, gen_vvv, gen_helper_vfsub_d)
+TRANS(vfmul_s, gen_vvv, gen_helper_vfmul_s)
+TRANS(vfmul_d, gen_vvv, gen_helper_vfmul_d)
+TRANS(vfdiv_s, gen_vvv, gen_helper_vfdiv_s)
+TRANS(vfdiv_d, gen_vvv, gen_helper_vfdiv_d)
+
+TRANS(vfmadd_s, gen_vvvv, gen_helper_vfmadd_s)
+TRANS(vfmadd_d, gen_vvvv, gen_helper_vfmadd_d)
+TRANS(vfmsub_s, gen_vvvv, gen_helper_vfmsub_s)
+TRANS(vfmsub_d, gen_vvvv, gen_helper_vfmsub_d)
+TRANS(vfnmadd_s, gen_vvvv, gen_helper_vfnmadd_s)
+TRANS(vfnmadd_d, gen_vvvv, gen_helper_vfnmadd_d)
+TRANS(vfnmsub_s, gen_vvvv, gen_helper_vfnmsub_s)
+TRANS(vfnmsub_d, gen_vvvv, gen_helper_vfnmsub_d)
+
+TRANS(vfmax_s, gen_vvv, gen_helper_vfmax_s)
+TRANS(vfmax_d, gen_vvv, gen_helper_vfmax_d)
+TRANS(vfmin_s, gen_vvv, gen_helper_vfmin_s)
+TRANS(vfmin_d, gen_vvv, gen_helper_vfmin_d)
+
+TRANS(vfmaxa_s, gen_vvv, gen_helper_vfmaxa_s)
+TRANS(vfmaxa_d, gen_vvv, gen_helper_vfmaxa_d)
+TRANS(vfmina_s, gen_vvv, gen_helper_vfmina_s)
+TRANS(vfmina_d, gen_vvv, gen_helper_vfmina_d)
+
+TRANS(vflogb_s, gen_vv, gen_helper_vflogb_s)
+TRANS(vflogb_d, gen_vv, gen_helper_vflogb_d)
+
+TRANS(vfclass_s, gen_vv, gen_helper_vfclass_s)
+TRANS(vfclass_d, gen_vv, gen_helper_vfclass_d)
+
+TRANS(vfsqrt_s, gen_vv, gen_helper_vfsqrt_s)
+TRANS(vfsqrt_d, gen_vv, gen_helper_vfsqrt_d)
+TRANS(vfrecip_s, gen_vv, gen_helper_vfrecip_s)
+TRANS(vfrecip_d, gen_vv, gen_helper_vfrecip_d)
+TRANS(vfrsqrt_s, gen_vv, gen_helper_vfrsqrt_s)
+TRANS(vfrsqrt_d, gen_vv, gen_helper_vfrsqrt_d)
+
+TRANS(vfcvtl_s_h, gen_vv, gen_helper_vfcvtl_s_h)
+TRANS(vfcvth_s_h, gen_vv, gen_helper_vfcvth_s_h)
+TRANS(vfcvtl_d_s, gen_vv, gen_helper_vfcvtl_d_s)
+TRANS(vfcvth_d_s, gen_vv, gen_helper_vfcvth_d_s)
+TRANS(vfcvt_h_s, gen_vvv, gen_helper_vfcvt_h_s)
+TRANS(vfcvt_s_d, gen_vvv, gen_helper_vfcvt_s_d)
+
+TRANS(vfrintrne_s, gen_vv, gen_helper_vfrintrne_s)
+TRANS(vfrintrne_d, gen_vv, gen_helper_vfrintrne_d)
+TRANS(vfrintrz_s, gen_vv, gen_helper_vfrintrz_s)
+TRANS(vfrintrz_d, gen_vv, gen_helper_vfrintrz_d)
+TRANS(vfrintrp_s, gen_vv, gen_helper_vfrintrp_s)
+TRANS(vfrintrp_d, gen_vv, gen_helper_vfrintrp_d)
+TRANS(vfrintrm_s, gen_vv, gen_helper_vfrintrm_s)
+TRANS(vfrintrm_d, gen_vv, gen_helper_vfrintrm_d)
+TRANS(vfrint_s, gen_vv, gen_helper_vfrint_s)
+TRANS(vfrint_d, gen_vv, gen_helper_vfrint_d)
+
+TRANS(vftintrne_w_s, gen_vv, gen_helper_vftintrne_w_s)
+TRANS(vftintrne_l_d, gen_vv, gen_helper_vftintrne_l_d)
+TRANS(vftintrz_w_s, gen_vv, gen_helper_vftintrz_w_s)
+TRANS(vftintrz_l_d, gen_vv, gen_helper_vftintrz_l_d)
+TRANS(vftintrp_w_s, gen_vv, gen_helper_vftintrp_w_s)
+TRANS(vftintrp_l_d, gen_vv, gen_helper_vftintrp_l_d)
+TRANS(vftintrm_w_s, gen_vv, gen_helper_vftintrm_w_s)
+TRANS(vftintrm_l_d, gen_vv, gen_helper_vftintrm_l_d)
+TRANS(vftint_w_s, gen_vv, gen_helper_vftint_w_s)
+TRANS(vftint_l_d, gen_vv, gen_helper_vftint_l_d)
+TRANS(vftintrz_wu_s, gen_vv, gen_helper_vftintrz_wu_s)
+TRANS(vftintrz_lu_d, gen_vv, gen_helper_vftintrz_lu_d)
+TRANS(vftint_wu_s, gen_vv, gen_helper_vftint_wu_s)
+TRANS(vftint_lu_d, gen_vv, gen_helper_vftint_lu_d)
+TRANS(vftintrne_w_d, gen_vvv, gen_helper_vftintrne_w_d)
+TRANS(vftintrz_w_d, gen_vvv, gen_helper_vftintrz_w_d)
+TRANS(vftintrp_w_d, gen_vvv, gen_helper_vftintrp_w_d)
+TRANS(vftintrm_w_d, gen_vvv, gen_helper_vftintrm_w_d)
+TRANS(vftint_w_d, gen_vvv, gen_helper_vftint_w_d)
+TRANS(vftintrnel_l_s, gen_vv, gen_helper_vftintrnel_l_s)
+TRANS(vftintrneh_l_s, gen_vv, gen_helper_vftintrneh_l_s)
+TRANS(vftintrzl_l_s, gen_vv, gen_helper_vftintrzl_l_s)
+TRANS(vftintrzh_l_s, gen_vv, gen_helper_vftintrzh_l_s)
+TRANS(vftintrpl_l_s, gen_vv, gen_helper_vftintrpl_l_s)
+TRANS(vftintrph_l_s, gen_vv, gen_helper_vftintrph_l_s)
+TRANS(vftintrml_l_s, gen_vv, gen_helper_vftintrml_l_s)
+TRANS(vftintrmh_l_s, gen_vv, gen_helper_vftintrmh_l_s)
+TRANS(vftintl_l_s, gen_vv, gen_helper_vftintl_l_s)
+TRANS(vftinth_l_s, gen_vv, gen_helper_vftinth_l_s)
+
+TRANS(vffint_s_w, gen_vv, gen_helper_vffint_s_w)
+TRANS(vffint_d_l, gen_vv, gen_helper_vffint_d_l)
+TRANS(vffint_s_wu, gen_vv, gen_helper_vffint_s_wu)
+TRANS(vffint_d_lu, gen_vv, gen_helper_vffint_d_lu)
+TRANS(vffintl_d_w, gen_vv, gen_helper_vffintl_d_w)
+TRANS(vffinth_d_w, gen_vv, gen_helper_vffinth_d_w)
+TRANS(vffint_s_l, gen_vvv, gen_helper_vffint_s_l)
+
+static bool do_cmp(DisasContext *ctx, arg_vvv *a, MemOp mop, TCGCond cond)
+{
+ uint32_t vd_ofs, vj_ofs, vk_ofs;
+
+ CHECK_SXE;
+
+ vd_ofs = vec_full_offset(a->vd);
+ vj_ofs = vec_full_offset(a->vj);
+ vk_ofs = vec_full_offset(a->vk);
+
+ tcg_gen_gvec_cmp(cond, mop, vd_ofs, vj_ofs, vk_ofs, 16, ctx->vl/8);
+ return true;
+}
+
+static void do_cmpi_vec(TCGCond cond,
+ unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_cmp_vec(cond, vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void gen_vseqi_s_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_cmpi_vec(TCG_COND_EQ, vece, t, a, imm);
+}
+
+static void gen_vslei_s_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_cmpi_vec(TCG_COND_LE, vece, t, a, imm);
+}
+
+static void gen_vslti_s_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_cmpi_vec(TCG_COND_LT, vece, t, a, imm);
+}
+
+static void gen_vslei_u_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_cmpi_vec(TCG_COND_LEU, vece, t, a, imm);
+}
+
+static void gen_vslti_u_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_cmpi_vec(TCG_COND_LTU, vece, t, a, imm);
+}
+
+#define DO_CMPI_S(NAME) \
+static bool do_## NAME ##_s(DisasContext *ctx, arg_vv_i *a, MemOp mop) \
+{ \
+ uint32_t vd_ofs, vj_ofs; \
+ \
+ CHECK_SXE; \
+ \
+ static const TCGOpcode vecop_list[] = { \
+ INDEX_op_cmp_vec, 0 \
+ }; \
+ static const GVecGen2i op[4] = { \
+ { \
+ .fniv = gen_## NAME ##_s_vec, \
+ .fnoi = gen_helper_## NAME ##_b, \
+ .opt_opc = vecop_list, \
+ .vece = MO_8 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_s_vec, \
+ .fnoi = gen_helper_## NAME ##_h, \
+ .opt_opc = vecop_list, \
+ .vece = MO_16 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_s_vec, \
+ .fnoi = gen_helper_## NAME ##_w, \
+ .opt_opc = vecop_list, \
+ .vece = MO_32 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_s_vec, \
+ .fnoi = gen_helper_## NAME ##_d, \
+ .opt_opc = vecop_list, \
+ .vece = MO_64 \
+ } \
+ }; \
+ \
+ vd_ofs = vec_full_offset(a->vd); \
+ vj_ofs = vec_full_offset(a->vj); \
+ \
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, 16, ctx->vl/8, a->imm, &op[mop]); \
+ \
+ return true; \
+}
+
+DO_CMPI_S(vseqi)
+DO_CMPI_S(vslei)
+DO_CMPI_S(vslti)
+
+#define DO_CMPI_U(NAME) \
+static bool do_## NAME ##_u(DisasContext *ctx, arg_vv_i *a, MemOp mop) \
+{ \
+ uint32_t vd_ofs, vj_ofs; \
+ \
+ CHECK_SXE; \
+ \
+ static const TCGOpcode vecop_list[] = { \
+ INDEX_op_cmp_vec, 0 \
+ }; \
+ static const GVecGen2i op[4] = { \
+ { \
+ .fniv = gen_## NAME ##_u_vec, \
+ .fnoi = gen_helper_## NAME ##_bu, \
+ .opt_opc = vecop_list, \
+ .vece = MO_8 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_u_vec, \
+ .fnoi = gen_helper_## NAME ##_hu, \
+ .opt_opc = vecop_list, \
+ .vece = MO_16 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_u_vec, \
+ .fnoi = gen_helper_## NAME ##_wu, \
+ .opt_opc = vecop_list, \
+ .vece = MO_32 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_u_vec, \
+ .fnoi = gen_helper_## NAME ##_du, \
+ .opt_opc = vecop_list, \
+ .vece = MO_64 \
+ } \
+ }; \
+ \
+ vd_ofs = vec_full_offset(a->vd); \
+ vj_ofs = vec_full_offset(a->vj); \
+ \
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, 16, ctx->vl/8, a->imm, &op[mop]); \
+ \
+ return true; \
+}
+
+DO_CMPI_U(vslei)
+DO_CMPI_U(vslti)
+
+TRANS(vseq_b, do_cmp, MO_8, TCG_COND_EQ)
+TRANS(vseq_h, do_cmp, MO_16, TCG_COND_EQ)
+TRANS(vseq_w, do_cmp, MO_32, TCG_COND_EQ)
+TRANS(vseq_d, do_cmp, MO_64, TCG_COND_EQ)
+TRANS(vseqi_b, do_vseqi_s, MO_8)
+TRANS(vseqi_h, do_vseqi_s, MO_16)
+TRANS(vseqi_w, do_vseqi_s, MO_32)
+TRANS(vseqi_d, do_vseqi_s, MO_64)
+
+TRANS(vsle_b, do_cmp, MO_8, TCG_COND_LE)
+TRANS(vsle_h, do_cmp, MO_16, TCG_COND_LE)
+TRANS(vsle_w, do_cmp, MO_32, TCG_COND_LE)
+TRANS(vsle_d, do_cmp, MO_64, TCG_COND_LE)
+TRANS(vslei_b, do_vslei_s, MO_8)
+TRANS(vslei_h, do_vslei_s, MO_16)
+TRANS(vslei_w, do_vslei_s, MO_32)
+TRANS(vslei_d, do_vslei_s, MO_64)
+TRANS(vsle_bu, do_cmp, MO_8, TCG_COND_LEU)
+TRANS(vsle_hu, do_cmp, MO_16, TCG_COND_LEU)
+TRANS(vsle_wu, do_cmp, MO_32, TCG_COND_LEU)
+TRANS(vsle_du, do_cmp, MO_64, TCG_COND_LEU)
+TRANS(vslei_bu, do_vslei_u, MO_8)
+TRANS(vslei_hu, do_vslei_u, MO_16)
+TRANS(vslei_wu, do_vslei_u, MO_32)
+TRANS(vslei_du, do_vslei_u, MO_64)
+
+TRANS(vslt_b, do_cmp, MO_8, TCG_COND_LT)
+TRANS(vslt_h, do_cmp, MO_16, TCG_COND_LT)
+TRANS(vslt_w, do_cmp, MO_32, TCG_COND_LT)
+TRANS(vslt_d, do_cmp, MO_64, TCG_COND_LT)
+TRANS(vslti_b, do_vslti_s, MO_8)
+TRANS(vslti_h, do_vslti_s, MO_16)
+TRANS(vslti_w, do_vslti_s, MO_32)
+TRANS(vslti_d, do_vslti_s, MO_64)
+TRANS(vslt_bu, do_cmp, MO_8, TCG_COND_LTU)
+TRANS(vslt_hu, do_cmp, MO_16, TCG_COND_LTU)
+TRANS(vslt_wu, do_cmp, MO_32, TCG_COND_LTU)
+TRANS(vslt_du, do_cmp, MO_64, TCG_COND_LTU)
+TRANS(vslti_bu, do_vslti_u, MO_8)
+TRANS(vslti_hu, do_vslti_u, MO_16)
+TRANS(vslti_wu, do_vslti_u, MO_32)
+TRANS(vslti_du, do_vslti_u, MO_64)
+
+static bool trans_vfcmp_cond_s(DisasContext *ctx, arg_vvv_fcond *a)
+{
+ uint32_t flags;
+ void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 vk = tcg_constant_i32(a->vk);
+
+ CHECK_SXE;
+
+ fn = (a->fcond & 1 ? gen_helper_vfcmp_s_s : gen_helper_vfcmp_c_s);
+ flags = get_fcmp_flags(a->fcond >> 1);
+ fn(cpu_env, vd, vj, vk, tcg_constant_i32(flags));
+
+ return true;
+}
+
+static bool trans_vfcmp_cond_d(DisasContext *ctx, arg_vvv_fcond *a)
+{
+ uint32_t flags;
+ void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 vk = tcg_constant_i32(a->vk);
+
+ fn = (a->fcond & 1 ? gen_helper_vfcmp_s_d : gen_helper_vfcmp_c_d);
+ flags = get_fcmp_flags(a->fcond >> 1);
+ fn(cpu_env, vd, vj, vk, tcg_constant_i32(flags));
+
+ return true;
+}
+
+static bool trans_vbitsel_v(DisasContext *ctx, arg_vvvv *a)
+{
+ CHECK_SXE;
+
+ tcg_gen_gvec_bitsel(MO_64, vec_full_offset(a->vd), vec_full_offset(a->va),
+ vec_full_offset(a->vk), vec_full_offset(a->vj),
+ 16, ctx->vl/8);
+ return true;
+}
+
+static void gen_vbitseli(unsigned vece, TCGv_vec a, TCGv_vec b, int64_t imm)
+{
+ tcg_gen_bitsel_vec(vece, a, a, tcg_constant_vec_matching(a, vece, imm), b);
+}
+
+static bool trans_vbitseli_b(DisasContext *ctx, arg_vv_i *a)
+{
+ static const GVecGen2i op = {
+ .fniv = gen_vbitseli,
+ .fnoi = gen_helper_vbitseli_b,
+ .vece = MO_8,
+ .load_dest = true
+ };
+
+ CHECK_SXE;
+
+ tcg_gen_gvec_2i(vec_full_offset(a->vd), vec_full_offset(a->vj),
+ 16, ctx->vl/8, a->imm, &op);
+ return true;
+}
+
+#define VSET(NAME, COND) \
+static bool trans_## NAME (DisasContext *ctx, arg_cv *a) \
+{ \
+ TCGv_i64 t1, al, ah; \
+ \
+ al = tcg_temp_new_i64(); \
+ ah = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ \
+ get_vreg64(ah, a->vj, 1); \
+ get_vreg64(al, a->vj, 0); \
+ \
+ CHECK_SXE; \
+ tcg_gen_or_i64(t1, al, ah); \
+ tcg_gen_setcondi_i64(COND, t1, t1, 0); \
+ tcg_gen_st8_tl(t1, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); \
+ \
+ return true; \
+}
+
+VSET(vseteqz_v, TCG_COND_EQ)
+VSET(vsetnez_v, TCG_COND_NE)
+
+TRANS(vsetanyeqz_b, gen_cv, gen_helper_vsetanyeqz_b)
+TRANS(vsetanyeqz_h, gen_cv, gen_helper_vsetanyeqz_h)
+TRANS(vsetanyeqz_w, gen_cv, gen_helper_vsetanyeqz_w)
+TRANS(vsetanyeqz_d, gen_cv, gen_helper_vsetanyeqz_d)
+TRANS(vsetallnez_b, gen_cv, gen_helper_vsetallnez_b)
+TRANS(vsetallnez_h, gen_cv, gen_helper_vsetallnez_h)
+TRANS(vsetallnez_w, gen_cv, gen_helper_vsetallnez_w)
+TRANS(vsetallnez_d, gen_cv, gen_helper_vsetallnez_d)
+
+static bool trans_vinsgr2vr_b(DisasContext *ctx, arg_vr_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_st8_i64(cpu_gpr[a->rj], cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vd].vreg.B(a->imm)));
+ return true;
+}
+
+static bool trans_vinsgr2vr_h(DisasContext *ctx, arg_vr_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_st16_i64(cpu_gpr[a->rj], cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vd].vreg.H(a->imm)));
+ return true;
+}
+
+static bool trans_vinsgr2vr_w(DisasContext *ctx, arg_vr_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_st32_i64(cpu_gpr[a->rj], cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vd].vreg.W(a->imm)));
+ return true;
+}
+
+static bool trans_vinsgr2vr_d(DisasContext *ctx, arg_vr_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_st_i64(cpu_gpr[a->rj], cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vd].vreg.D(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_b(DisasContext *ctx, arg_rv_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_ld8s_i64(cpu_gpr[a->rd], cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.B(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_h(DisasContext *ctx, arg_rv_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_ld16s_i64(cpu_gpr[a->rd], cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.H(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_w(DisasContext *ctx, arg_rv_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_ld32s_i64(cpu_gpr[a->rd], cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.W(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_d(DisasContext *ctx, arg_rv_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_ld_i64(cpu_gpr[a->rd], cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.D(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_bu(DisasContext *ctx, arg_rv_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_ld8u_i64(cpu_gpr[a->rd], cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.B(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_hu(DisasContext *ctx, arg_rv_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_ld16u_i64(cpu_gpr[a->rd], cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.H(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_wu(DisasContext *ctx, arg_rv_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_ld32u_i64(cpu_gpr[a->rd], cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.W(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_du(DisasContext *ctx, arg_rv_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_ld_i64(cpu_gpr[a->rd], cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.D(a->imm)));
+ return true;
+}
+
+static bool gvec_dup(DisasContext *ctx, arg_vr *a, MemOp mop)
+{
+ CHECK_SXE;
+
+ tcg_gen_gvec_dup_i64(mop, vec_full_offset(a->vd),
+ 16, ctx->vl/8, cpu_gpr[a->rj]);
+ return true;
+}
+
+TRANS(vreplgr2vr_b, gvec_dup, MO_8)
+TRANS(vreplgr2vr_h, gvec_dup, MO_16)
+TRANS(vreplgr2vr_w, gvec_dup, MO_32)
+TRANS(vreplgr2vr_d, gvec_dup, MO_64)
+
+static bool trans_vreplvei_b(DisasContext *ctx, arg_vv_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_gvec_dup_mem(MO_8,vec_full_offset(a->vd),
+ offsetof(CPULoongArchState,
+ fpr[a->vj].vreg.B((a->imm))),
+ 16, ctx->vl/8);
+ return true;
+}
+
+static bool trans_vreplvei_h(DisasContext *ctx, arg_vv_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_gvec_dup_mem(MO_16, vec_full_offset(a->vd),
+ offsetof(CPULoongArchState,
+ fpr[a->vj].vreg.H((a->imm))),
+ 16, ctx->vl/8);
+ return true;
+}
+static bool trans_vreplvei_w(DisasContext *ctx, arg_vv_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_gvec_dup_mem(MO_32, vec_full_offset(a->vd),
+ offsetof(CPULoongArchState,
+ fpr[a->vj].vreg.W((a->imm))),
+ 16, ctx->vl/8);
+ return true;
+}
+static bool trans_vreplvei_d(DisasContext *ctx, arg_vv_i *a)
+{
+ CHECK_SXE;
+ tcg_gen_gvec_dup_mem(MO_64, vec_full_offset(a->vd),
+ offsetof(CPULoongArchState,
+ fpr[a->vj].vreg.D((a->imm))),
+ 16, ctx->vl/8);
+ return true;
+}
+
+static bool gen_vreplve(DisasContext *ctx, arg_vvr *a, int vece, int bit,
+ void (*func)(TCGv_i64, TCGv_ptr, tcg_target_long))
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_ptr t1 = tcg_temp_new_ptr();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ CHECK_SXE;
+
+ tcg_gen_andi_i64(t0, gpr_src(ctx, a->rk, EXT_NONE), (LSX_LEN/bit) -1);
+ tcg_gen_shli_i64(t0, t0, vece);
+ if (HOST_BIG_ENDIAN) {
+ tcg_gen_xori_i64(t0, t0, vece << ((LSX_LEN/bit) -1));
+ }
+
+ tcg_gen_trunc_i64_ptr(t1, t0);
+ tcg_gen_add_ptr(t1, t1, cpu_env);
+ func(t2, t1, vec_full_offset(a->vj));
+ tcg_gen_gvec_dup_i64(vece, vec_full_offset(a->vd), 16, ctx->vl/8, t2);
+
+ return true;
+}
+
+TRANS(vreplve_b, gen_vreplve, MO_8, 8, tcg_gen_ld8u_i64)
+TRANS(vreplve_h, gen_vreplve, MO_16, 16, tcg_gen_ld16u_i64)
+TRANS(vreplve_w, gen_vreplve, MO_32, 32, tcg_gen_ld32u_i64)
+TRANS(vreplve_d, gen_vreplve, MO_64, 64, tcg_gen_ld_i64)
+
+static bool trans_vbsll_v(DisasContext *ctx, arg_vv_i *a)
+{
+ int ofs;
+ TCGv_i64 desthigh, destlow, high, low;
+
+ CHECK_SXE;
+
+ desthigh = tcg_temp_new_i64();
+ destlow = tcg_temp_new_i64();
+ high = tcg_temp_new_i64();
+ low = tcg_temp_new_i64();
+
+ get_vreg64(low, a->vj, 0);
+
+ ofs = ((a->imm) & 0xf) * 8;
+ if (ofs < 64) {
+ get_vreg64(high, a->vj, 1);
+ tcg_gen_extract2_i64(desthigh, low, high, 64 - ofs);
+ tcg_gen_shli_i64(destlow, low, ofs);
+ } else {
+ tcg_gen_shli_i64(desthigh, low, ofs - 64);
+ destlow = tcg_constant_i64(0);
+ }
+
+ set_vreg64(desthigh, a->vd, 1);
+ set_vreg64(destlow, a->vd, 0);
+
+ return true;
+}
+
+static bool trans_vbsrl_v(DisasContext *ctx, arg_vv_i *a)
+{
+ TCGv_i64 desthigh, destlow, high, low;
+ int ofs;
+
+ CHECK_SXE;
+
+ desthigh = tcg_temp_new_i64();
+ destlow = tcg_temp_new_i64();
+ high = tcg_temp_new_i64();
+ low = tcg_temp_new_i64();
+
+ get_vreg64(high, a->vj, 1);
+
+ ofs = ((a->imm) & 0xf) * 8;
+ if (ofs < 64) {
+ get_vreg64(low, a->vj, 0);
+ tcg_gen_extract2_i64(destlow, low, high, ofs);
+ tcg_gen_shri_i64(desthigh, high, ofs);
+ } else {
+ tcg_gen_shri_i64(destlow, high, ofs - 64);
+ desthigh = tcg_constant_i64(0);
+ }
+
+ set_vreg64(desthigh, a->vd, 1);
+ set_vreg64(destlow, a->vd, 0);
+
+ return true;
+}
+
+TRANS(vpackev_b, gen_vvv, gen_helper_vpackev_b)
+TRANS(vpackev_h, gen_vvv, gen_helper_vpackev_h)
+TRANS(vpackev_w, gen_vvv, gen_helper_vpackev_w)
+TRANS(vpackev_d, gen_vvv, gen_helper_vpackev_d)
+TRANS(vpackod_b, gen_vvv, gen_helper_vpackod_b)
+TRANS(vpackod_h, gen_vvv, gen_helper_vpackod_h)
+TRANS(vpackod_w, gen_vvv, gen_helper_vpackod_w)
+TRANS(vpackod_d, gen_vvv, gen_helper_vpackod_d)
+
+TRANS(vpickev_b, gen_vvv, gen_helper_vpickev_b)
+TRANS(vpickev_h, gen_vvv, gen_helper_vpickev_h)
+TRANS(vpickev_w, gen_vvv, gen_helper_vpickev_w)
+TRANS(vpickev_d, gen_vvv, gen_helper_vpickev_d)
+TRANS(vpickod_b, gen_vvv, gen_helper_vpickod_b)
+TRANS(vpickod_h, gen_vvv, gen_helper_vpickod_h)
+TRANS(vpickod_w, gen_vvv, gen_helper_vpickod_w)
+TRANS(vpickod_d, gen_vvv, gen_helper_vpickod_d)
+
+TRANS(vilvl_b, gen_vvv, gen_helper_vilvl_b)
+TRANS(vilvl_h, gen_vvv, gen_helper_vilvl_h)
+TRANS(vilvl_w, gen_vvv, gen_helper_vilvl_w)
+TRANS(vilvl_d, gen_vvv, gen_helper_vilvl_d)
+TRANS(vilvh_b, gen_vvv, gen_helper_vilvh_b)
+TRANS(vilvh_h, gen_vvv, gen_helper_vilvh_h)
+TRANS(vilvh_w, gen_vvv, gen_helper_vilvh_w)
+TRANS(vilvh_d, gen_vvv, gen_helper_vilvh_d)
+
+TRANS(vshuf_b, gen_vvvv, gen_helper_vshuf_b)
+TRANS(vshuf_h, gen_vvv, gen_helper_vshuf_h)
+TRANS(vshuf_w, gen_vvv, gen_helper_vshuf_w)
+TRANS(vshuf_d, gen_vvv, gen_helper_vshuf_d)
+TRANS(vshuf4i_b, gen_vv_i, gen_helper_vshuf4i_b)
+TRANS(vshuf4i_h, gen_vv_i, gen_helper_vshuf4i_h)
+TRANS(vshuf4i_w, gen_vv_i, gen_helper_vshuf4i_w)
+TRANS(vshuf4i_d, gen_vv_i, gen_helper_vshuf4i_d)
+
+TRANS(vpermi_w, gen_vv_i, gen_helper_vpermi_w)
+
+TRANS(vextrins_b, gen_vv_i, gen_helper_vextrins_b)
+TRANS(vextrins_h, gen_vv_i, gen_helper_vextrins_h)
+TRANS(vextrins_w, gen_vv_i, gen_helper_vextrins_w)
+TRANS(vextrins_d, gen_vv_i, gen_helper_vextrins_d)
+
+static bool trans_vld(DisasContext *ctx, arg_vr_i *a)
+{
+ TCGv addr, temp;
+ TCGv_i64 rl, rh;
+ TCGv_i128 val;
+
+ CHECK_SXE;
+
+ addr = gpr_src(ctx, a->rj, EXT_NONE);
+ val = tcg_temp_new_i128();
+ rl = tcg_temp_new_i64();
+ rh = tcg_temp_new_i64();
+
+ if (a->imm) {
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, addr, a->imm);
+ addr = temp;
+ }
+
+ tcg_gen_qemu_ld_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE);
+ tcg_gen_extr_i128_i64(rl, rh, val);
+ set_vreg64(rh, a->vd, 1);
+ set_vreg64(rl, a->vd, 0);
+
+ return true;
+}
+
+static bool trans_vst(DisasContext *ctx, arg_vr_i *a)
+{
+ TCGv addr, temp;
+ TCGv_i128 val;
+ TCGv_i64 ah, al;
+
+ CHECK_SXE;
+
+ addr = gpr_src(ctx, a->rj, EXT_NONE);
+ val = tcg_temp_new_i128();
+ ah = tcg_temp_new_i64();
+ al = tcg_temp_new_i64();
+
+ if (a->imm) {
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, addr, a->imm);
+ addr = temp;
+ }
+
+ get_vreg64(ah, a->vd, 1);
+ get_vreg64(al, a->vd, 0);
+ tcg_gen_concat_i64_i128(val, al, ah);
+ tcg_gen_qemu_st_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE);
+
+ return true;
+}
+
+static bool trans_vldx(DisasContext *ctx, arg_vrr *a)
+{
+ TCGv addr, src1, src2;
+ TCGv_i64 rl, rh;
+ TCGv_i128 val;
+
+ CHECK_SXE;
+
+ addr = tcg_temp_new();
+ src1 = gpr_src(ctx, a->rj, EXT_NONE);
+ src2 = gpr_src(ctx, a->rk, EXT_NONE);
+ val = tcg_temp_new_i128();
+ rl = tcg_temp_new_i64();
+ rh = tcg_temp_new_i64();
+
+ tcg_gen_add_tl(addr, src1, src2);
+ tcg_gen_qemu_ld_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE);
+ tcg_gen_extr_i128_i64(rl, rh, val);
+ set_vreg64(rh, a->vd, 1);
+ set_vreg64(rl, a->vd, 0);
+
+ return true;
+}
+
+static bool trans_vstx(DisasContext *ctx, arg_vrr *a)
+{
+ TCGv addr, src1, src2;
+ TCGv_i64 ah, al;
+ TCGv_i128 val;
+
+ CHECK_SXE;
+
+ addr = tcg_temp_new();
+ src1 = gpr_src(ctx, a->rj, EXT_NONE);
+ src2 = gpr_src(ctx, a->rk, EXT_NONE);
+ val = tcg_temp_new_i128();
+ ah = tcg_temp_new_i64();
+ al = tcg_temp_new_i64();
+
+ tcg_gen_add_tl(addr, src1, src2);
+ get_vreg64(ah, a->vd, 1);
+ get_vreg64(al, a->vd, 0);
+ tcg_gen_concat_i64_i128(val, al, ah);
+ tcg_gen_qemu_st_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE);
+
+ return true;
+}
+
+#define VLDREPL(NAME, MO) \
+static bool trans_## NAME (DisasContext *ctx, arg_vr_i *a) \
+{ \
+ TCGv addr, temp; \
+ TCGv_i64 val; \
+ \
+ CHECK_SXE; \
+ \
+ addr = gpr_src(ctx, a->rj, EXT_NONE); \
+ val = tcg_temp_new_i64(); \
+ \
+ if (a->imm) { \
+ temp = tcg_temp_new(); \
+ tcg_gen_addi_tl(temp, addr, a->imm); \
+ addr = temp; \
+ } \
+ \
+ tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, MO); \
+ tcg_gen_gvec_dup_i64(MO, vec_full_offset(a->vd), 16, ctx->vl/8, val); \
+ \
+ return true; \
+}
+
+VLDREPL(vldrepl_b, MO_8)
+VLDREPL(vldrepl_h, MO_16)
+VLDREPL(vldrepl_w, MO_32)
+VLDREPL(vldrepl_d, MO_64)
+
+#define VSTELM(NAME, MO, E) \
+static bool trans_## NAME (DisasContext *ctx, arg_vr_ii *a) \
+{ \
+ TCGv addr, temp; \
+ TCGv_i64 val; \
+ \
+ CHECK_SXE; \
+ \
+ addr = gpr_src(ctx, a->rj, EXT_NONE); \
+ val = tcg_temp_new_i64(); \
+ \
+ if (a->imm) { \
+ temp = tcg_temp_new(); \
+ tcg_gen_addi_tl(temp, addr, a->imm); \
+ addr = temp; \
+ } \
+ \
+ tcg_gen_ld_i64(val, cpu_env, \
+ offsetof(CPULoongArchState, fpr[a->vd].vreg.E(a->imm2))); \
+ tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, MO); \
+ \
+ return true; \
+}
+
+VSTELM(vstelm_b, MO_8, B)
+VSTELM(vstelm_h, MO_16, H)
+VSTELM(vstelm_w, MO_32, W)
+VSTELM(vstelm_d, MO_64, D)
diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode
index de7b8f0..c9c3bc2 100644
--- a/target/loongarch/insns.decode
+++ b/target/loongarch/insns.decode
@@ -485,3 +485,814 @@ ldpte 0000 01100100 01 ........ ..... 00000 @j_i
ertn 0000 01100100 10000 01110 00000 00000 @empty
idle 0000 01100100 10001 ............... @i15
dbcl 0000 00000010 10101 ............... @i15
+
+#
+# LSX Fields
+#
+
+%i9s3 10:s9 !function=shl_3
+%i10s2 10:s10 !function=shl_2
+%i11s1 10:s11 !function=shl_1
+%i8s3 10:s8 !function=shl_3
+%i8s2 10:s8 !function=shl_2
+%i8s1 10:s8 !function=shl_1
+
+#
+# LSX Argument sets
+#
+
+&vv vd vj
+&cv cd vj
+&vvv vd vj vk
+&vv_i vd vj imm
+&vvvv vd vj vk va
+&vvv_fcond vd vj vk fcond
+&vr_i vd rj imm
+&rv_i rd vj imm
+&vr vd rj
+&vvr vd vj rk
+&vrr vd rj rk
+&vr_ii vd rj imm imm2
+&v_i vd imm
+
+#
+# LSX Formats
+#
+@vv .... ........ ..... ..... vj:5 vd:5 &vv
+@cv .... ........ ..... ..... vj:5 .. cd:3 &cv
+@vvv .... ........ ..... vk:5 vj:5 vd:5 &vvv
+@vv_ui1 .... ........ ..... .... imm:1 vj:5 vd:5 &vv_i
+@vv_ui2 .... ........ ..... ... imm:2 vj:5 vd:5 &vv_i
+@vv_ui3 .... ........ ..... .. imm:3 vj:5 vd:5 &vv_i
+@vv_ui4 .... ........ ..... . imm:4 vj:5 vd:5 &vv_i
+@vv_ui5 .... ........ ..... imm:5 vj:5 vd:5 &vv_i
+@vv_ui6 .... ........ .... imm:6 vj:5 vd:5 &vv_i
+@vv_ui7 .... ........ ... imm:7 vj:5 vd:5 &vv_i
+@vv_ui8 .... ........ .. imm:8 vj:5 vd:5 &vv_i
+@vv_i5 .... ........ ..... imm:s5 vj:5 vd:5 &vv_i
+@vvvv .... ........ va:5 vk:5 vj:5 vd:5 &vvvv
+@vvv_fcond .... ........ fcond:5 vk:5 vj:5 vd:5 &vvv_fcond
+@vr_ui4 .... ........ ..... . imm:4 rj:5 vd:5 &vr_i
+@vr_ui3 .... ........ ..... .. imm:3 rj:5 vd:5 &vr_i
+@vr_ui2 .... ........ ..... ... imm:2 rj:5 vd:5 &vr_i
+@vr_ui1 .... ........ ..... .... imm:1 rj:5 vd:5 &vr_i
+@rv_ui4 .... ........ ..... . imm:4 vj:5 rd:5 &rv_i
+@rv_ui3 .... ........ ..... .. imm:3 vj:5 rd:5 &rv_i
+@rv_ui2 .... ........ ..... ... imm:2 vj:5 rd:5 &rv_i
+@rv_ui1 .... ........ ..... .... imm:1 vj:5 rd:5 &rv_i
+@vr .... ........ ..... ..... rj:5 vd:5 &vr
+@vvr .... ........ ..... rk:5 vj:5 vd:5 &vvr
+@vr_i9 .... ........ . ......... rj:5 vd:5 &vr_i imm=%i9s3
+@vr_i10 .... ........ .......... rj:5 vd:5 &vr_i imm=%i10s2
+@vr_i11 .... ....... ........... rj:5 vd:5 &vr_i imm=%i11s1
+@vr_i12 .... ...... imm:s12 rj:5 vd:5 &vr_i
+@vr_i8i1 .... ........ . imm2:1 ........ rj:5 vd:5 &vr_ii imm=%i8s3
+@vr_i8i2 .... ........ imm2:2 ........ rj:5 vd:5 &vr_ii imm=%i8s2
+@vr_i8i3 .... ....... imm2:3 ........ rj:5 vd:5 &vr_ii imm=%i8s1
+@vr_i8i4 .... ...... imm2:4 imm:s8 rj:5 vd:5 &vr_ii
+@vrr .... ........ ..... rk:5 rj:5 vd:5 &vrr
+@v_i13 .... ........ .. imm:13 vd:5 &v_i
+
+vadd_b 0111 00000000 10100 ..... ..... ..... @vvv
+vadd_h 0111 00000000 10101 ..... ..... ..... @vvv
+vadd_w 0111 00000000 10110 ..... ..... ..... @vvv
+vadd_d 0111 00000000 10111 ..... ..... ..... @vvv
+vadd_q 0111 00010010 11010 ..... ..... ..... @vvv
+vsub_b 0111 00000000 11000 ..... ..... ..... @vvv
+vsub_h 0111 00000000 11001 ..... ..... ..... @vvv
+vsub_w 0111 00000000 11010 ..... ..... ..... @vvv
+vsub_d 0111 00000000 11011 ..... ..... ..... @vvv
+vsub_q 0111 00010010 11011 ..... ..... ..... @vvv
+
+vaddi_bu 0111 00101000 10100 ..... ..... ..... @vv_ui5
+vaddi_hu 0111 00101000 10101 ..... ..... ..... @vv_ui5
+vaddi_wu 0111 00101000 10110 ..... ..... ..... @vv_ui5
+vaddi_du 0111 00101000 10111 ..... ..... ..... @vv_ui5
+vsubi_bu 0111 00101000 11000 ..... ..... ..... @vv_ui5
+vsubi_hu 0111 00101000 11001 ..... ..... ..... @vv_ui5
+vsubi_wu 0111 00101000 11010 ..... ..... ..... @vv_ui5
+vsubi_du 0111 00101000 11011 ..... ..... ..... @vv_ui5
+
+vneg_b 0111 00101001 11000 01100 ..... ..... @vv
+vneg_h 0111 00101001 11000 01101 ..... ..... @vv
+vneg_w 0111 00101001 11000 01110 ..... ..... @vv
+vneg_d 0111 00101001 11000 01111 ..... ..... @vv
+
+vsadd_b 0111 00000100 01100 ..... ..... ..... @vvv
+vsadd_h 0111 00000100 01101 ..... ..... ..... @vvv
+vsadd_w 0111 00000100 01110 ..... ..... ..... @vvv
+vsadd_d 0111 00000100 01111 ..... ..... ..... @vvv
+vsadd_bu 0111 00000100 10100 ..... ..... ..... @vvv
+vsadd_hu 0111 00000100 10101 ..... ..... ..... @vvv
+vsadd_wu 0111 00000100 10110 ..... ..... ..... @vvv
+vsadd_du 0111 00000100 10111 ..... ..... ..... @vvv
+vssub_b 0111 00000100 10000 ..... ..... ..... @vvv
+vssub_h 0111 00000100 10001 ..... ..... ..... @vvv
+vssub_w 0111 00000100 10010 ..... ..... ..... @vvv
+vssub_d 0111 00000100 10011 ..... ..... ..... @vvv
+vssub_bu 0111 00000100 11000 ..... ..... ..... @vvv
+vssub_hu 0111 00000100 11001 ..... ..... ..... @vvv
+vssub_wu 0111 00000100 11010 ..... ..... ..... @vvv
+vssub_du 0111 00000100 11011 ..... ..... ..... @vvv
+
+vhaddw_h_b 0111 00000101 01000 ..... ..... ..... @vvv
+vhaddw_w_h 0111 00000101 01001 ..... ..... ..... @vvv
+vhaddw_d_w 0111 00000101 01010 ..... ..... ..... @vvv
+vhaddw_q_d 0111 00000101 01011 ..... ..... ..... @vvv
+vhaddw_hu_bu 0111 00000101 10000 ..... ..... ..... @vvv
+vhaddw_wu_hu 0111 00000101 10001 ..... ..... ..... @vvv
+vhaddw_du_wu 0111 00000101 10010 ..... ..... ..... @vvv
+vhaddw_qu_du 0111 00000101 10011 ..... ..... ..... @vvv
+vhsubw_h_b 0111 00000101 01100 ..... ..... ..... @vvv
+vhsubw_w_h 0111 00000101 01101 ..... ..... ..... @vvv
+vhsubw_d_w 0111 00000101 01110 ..... ..... ..... @vvv
+vhsubw_q_d 0111 00000101 01111 ..... ..... ..... @vvv
+vhsubw_hu_bu 0111 00000101 10100 ..... ..... ..... @vvv
+vhsubw_wu_hu 0111 00000101 10101 ..... ..... ..... @vvv
+vhsubw_du_wu 0111 00000101 10110 ..... ..... ..... @vvv
+vhsubw_qu_du 0111 00000101 10111 ..... ..... ..... @vvv
+
+vaddwev_h_b 0111 00000001 11100 ..... ..... ..... @vvv
+vaddwev_w_h 0111 00000001 11101 ..... ..... ..... @vvv
+vaddwev_d_w 0111 00000001 11110 ..... ..... ..... @vvv
+vaddwev_q_d 0111 00000001 11111 ..... ..... ..... @vvv
+vaddwod_h_b 0111 00000010 00100 ..... ..... ..... @vvv
+vaddwod_w_h 0111 00000010 00101 ..... ..... ..... @vvv
+vaddwod_d_w 0111 00000010 00110 ..... ..... ..... @vvv
+vaddwod_q_d 0111 00000010 00111 ..... ..... ..... @vvv
+vsubwev_h_b 0111 00000010 00000 ..... ..... ..... @vvv
+vsubwev_w_h 0111 00000010 00001 ..... ..... ..... @vvv
+vsubwev_d_w 0111 00000010 00010 ..... ..... ..... @vvv
+vsubwev_q_d 0111 00000010 00011 ..... ..... ..... @vvv
+vsubwod_h_b 0111 00000010 01000 ..... ..... ..... @vvv
+vsubwod_w_h 0111 00000010 01001 ..... ..... ..... @vvv
+vsubwod_d_w 0111 00000010 01010 ..... ..... ..... @vvv
+vsubwod_q_d 0111 00000010 01011 ..... ..... ..... @vvv
+
+vaddwev_h_bu 0111 00000010 11100 ..... ..... ..... @vvv
+vaddwev_w_hu 0111 00000010 11101 ..... ..... ..... @vvv
+vaddwev_d_wu 0111 00000010 11110 ..... ..... ..... @vvv
+vaddwev_q_du 0111 00000010 11111 ..... ..... ..... @vvv
+vaddwod_h_bu 0111 00000011 00100 ..... ..... ..... @vvv
+vaddwod_w_hu 0111 00000011 00101 ..... ..... ..... @vvv
+vaddwod_d_wu 0111 00000011 00110 ..... ..... ..... @vvv
+vaddwod_q_du 0111 00000011 00111 ..... ..... ..... @vvv
+vsubwev_h_bu 0111 00000011 00000 ..... ..... ..... @vvv
+vsubwev_w_hu 0111 00000011 00001 ..... ..... ..... @vvv
+vsubwev_d_wu 0111 00000011 00010 ..... ..... ..... @vvv
+vsubwev_q_du 0111 00000011 00011 ..... ..... ..... @vvv
+vsubwod_h_bu 0111 00000011 01000 ..... ..... ..... @vvv
+vsubwod_w_hu 0111 00000011 01001 ..... ..... ..... @vvv
+vsubwod_d_wu 0111 00000011 01010 ..... ..... ..... @vvv
+vsubwod_q_du 0111 00000011 01011 ..... ..... ..... @vvv
+
+vaddwev_h_bu_b 0111 00000011 11100 ..... ..... ..... @vvv
+vaddwev_w_hu_h 0111 00000011 11101 ..... ..... ..... @vvv
+vaddwev_d_wu_w 0111 00000011 11110 ..... ..... ..... @vvv
+vaddwev_q_du_d 0111 00000011 11111 ..... ..... ..... @vvv
+vaddwod_h_bu_b 0111 00000100 00000 ..... ..... ..... @vvv
+vaddwod_w_hu_h 0111 00000100 00001 ..... ..... ..... @vvv
+vaddwod_d_wu_w 0111 00000100 00010 ..... ..... ..... @vvv
+vaddwod_q_du_d 0111 00000100 00011 ..... ..... ..... @vvv
+
+vavg_b 0111 00000110 01000 ..... ..... ..... @vvv
+vavg_h 0111 00000110 01001 ..... ..... ..... @vvv
+vavg_w 0111 00000110 01010 ..... ..... ..... @vvv
+vavg_d 0111 00000110 01011 ..... ..... ..... @vvv
+vavg_bu 0111 00000110 01100 ..... ..... ..... @vvv
+vavg_hu 0111 00000110 01101 ..... ..... ..... @vvv
+vavg_wu 0111 00000110 01110 ..... ..... ..... @vvv
+vavg_du 0111 00000110 01111 ..... ..... ..... @vvv
+vavgr_b 0111 00000110 10000 ..... ..... ..... @vvv
+vavgr_h 0111 00000110 10001 ..... ..... ..... @vvv
+vavgr_w 0111 00000110 10010 ..... ..... ..... @vvv
+vavgr_d 0111 00000110 10011 ..... ..... ..... @vvv
+vavgr_bu 0111 00000110 10100 ..... ..... ..... @vvv
+vavgr_hu 0111 00000110 10101 ..... ..... ..... @vvv
+vavgr_wu 0111 00000110 10110 ..... ..... ..... @vvv
+vavgr_du 0111 00000110 10111 ..... ..... ..... @vvv
+
+vabsd_b 0111 00000110 00000 ..... ..... ..... @vvv
+vabsd_h 0111 00000110 00001 ..... ..... ..... @vvv
+vabsd_w 0111 00000110 00010 ..... ..... ..... @vvv
+vabsd_d 0111 00000110 00011 ..... ..... ..... @vvv
+vabsd_bu 0111 00000110 00100 ..... ..... ..... @vvv
+vabsd_hu 0111 00000110 00101 ..... ..... ..... @vvv
+vabsd_wu 0111 00000110 00110 ..... ..... ..... @vvv
+vabsd_du 0111 00000110 00111 ..... ..... ..... @vvv
+
+vadda_b 0111 00000101 11000 ..... ..... ..... @vvv
+vadda_h 0111 00000101 11001 ..... ..... ..... @vvv
+vadda_w 0111 00000101 11010 ..... ..... ..... @vvv
+vadda_d 0111 00000101 11011 ..... ..... ..... @vvv
+
+vmax_b 0111 00000111 00000 ..... ..... ..... @vvv
+vmax_h 0111 00000111 00001 ..... ..... ..... @vvv
+vmax_w 0111 00000111 00010 ..... ..... ..... @vvv
+vmax_d 0111 00000111 00011 ..... ..... ..... @vvv
+vmaxi_b 0111 00101001 00000 ..... ..... ..... @vv_i5
+vmaxi_h 0111 00101001 00001 ..... ..... ..... @vv_i5
+vmaxi_w 0111 00101001 00010 ..... ..... ..... @vv_i5
+vmaxi_d 0111 00101001 00011 ..... ..... ..... @vv_i5
+vmax_bu 0111 00000111 01000 ..... ..... ..... @vvv
+vmax_hu 0111 00000111 01001 ..... ..... ..... @vvv
+vmax_wu 0111 00000111 01010 ..... ..... ..... @vvv
+vmax_du 0111 00000111 01011 ..... ..... ..... @vvv
+vmaxi_bu 0111 00101001 01000 ..... ..... ..... @vv_ui5
+vmaxi_hu 0111 00101001 01001 ..... ..... ..... @vv_ui5
+vmaxi_wu 0111 00101001 01010 ..... ..... ..... @vv_ui5
+vmaxi_du 0111 00101001 01011 ..... ..... ..... @vv_ui5
+
+vmin_b 0111 00000111 00100 ..... ..... ..... @vvv
+vmin_h 0111 00000111 00101 ..... ..... ..... @vvv
+vmin_w 0111 00000111 00110 ..... ..... ..... @vvv
+vmin_d 0111 00000111 00111 ..... ..... ..... @vvv
+vmini_b 0111 00101001 00100 ..... ..... ..... @vv_i5
+vmini_h 0111 00101001 00101 ..... ..... ..... @vv_i5
+vmini_w 0111 00101001 00110 ..... ..... ..... @vv_i5
+vmini_d 0111 00101001 00111 ..... ..... ..... @vv_i5
+vmin_bu 0111 00000111 01100 ..... ..... ..... @vvv
+vmin_hu 0111 00000111 01101 ..... ..... ..... @vvv
+vmin_wu 0111 00000111 01110 ..... ..... ..... @vvv
+vmin_du 0111 00000111 01111 ..... ..... ..... @vvv
+vmini_bu 0111 00101001 01100 ..... ..... ..... @vv_ui5
+vmini_hu 0111 00101001 01101 ..... ..... ..... @vv_ui5
+vmini_wu 0111 00101001 01110 ..... ..... ..... @vv_ui5
+vmini_du 0111 00101001 01111 ..... ..... ..... @vv_ui5
+
+vmul_b 0111 00001000 01000 ..... ..... ..... @vvv
+vmul_h 0111 00001000 01001 ..... ..... ..... @vvv
+vmul_w 0111 00001000 01010 ..... ..... ..... @vvv
+vmul_d 0111 00001000 01011 ..... ..... ..... @vvv
+vmuh_b 0111 00001000 01100 ..... ..... ..... @vvv
+vmuh_h 0111 00001000 01101 ..... ..... ..... @vvv
+vmuh_w 0111 00001000 01110 ..... ..... ..... @vvv
+vmuh_d 0111 00001000 01111 ..... ..... ..... @vvv
+vmuh_bu 0111 00001000 10000 ..... ..... ..... @vvv
+vmuh_hu 0111 00001000 10001 ..... ..... ..... @vvv
+vmuh_wu 0111 00001000 10010 ..... ..... ..... @vvv
+vmuh_du 0111 00001000 10011 ..... ..... ..... @vvv
+
+vmulwev_h_b 0111 00001001 00000 ..... ..... ..... @vvv
+vmulwev_w_h 0111 00001001 00001 ..... ..... ..... @vvv
+vmulwev_d_w 0111 00001001 00010 ..... ..... ..... @vvv
+vmulwev_q_d 0111 00001001 00011 ..... ..... ..... @vvv
+vmulwod_h_b 0111 00001001 00100 ..... ..... ..... @vvv
+vmulwod_w_h 0111 00001001 00101 ..... ..... ..... @vvv
+vmulwod_d_w 0111 00001001 00110 ..... ..... ..... @vvv
+vmulwod_q_d 0111 00001001 00111 ..... ..... ..... @vvv
+vmulwev_h_bu 0111 00001001 10000 ..... ..... ..... @vvv
+vmulwev_w_hu 0111 00001001 10001 ..... ..... ..... @vvv
+vmulwev_d_wu 0111 00001001 10010 ..... ..... ..... @vvv
+vmulwev_q_du 0111 00001001 10011 ..... ..... ..... @vvv
+vmulwod_h_bu 0111 00001001 10100 ..... ..... ..... @vvv
+vmulwod_w_hu 0111 00001001 10101 ..... ..... ..... @vvv
+vmulwod_d_wu 0111 00001001 10110 ..... ..... ..... @vvv
+vmulwod_q_du 0111 00001001 10111 ..... ..... ..... @vvv
+vmulwev_h_bu_b 0111 00001010 00000 ..... ..... ..... @vvv
+vmulwev_w_hu_h 0111 00001010 00001 ..... ..... ..... @vvv
+vmulwev_d_wu_w 0111 00001010 00010 ..... ..... ..... @vvv
+vmulwev_q_du_d 0111 00001010 00011 ..... ..... ..... @vvv
+vmulwod_h_bu_b 0111 00001010 00100 ..... ..... ..... @vvv
+vmulwod_w_hu_h 0111 00001010 00101 ..... ..... ..... @vvv
+vmulwod_d_wu_w 0111 00001010 00110 ..... ..... ..... @vvv
+vmulwod_q_du_d 0111 00001010 00111 ..... ..... ..... @vvv
+
+vmadd_b 0111 00001010 10000 ..... ..... ..... @vvv
+vmadd_h 0111 00001010 10001 ..... ..... ..... @vvv
+vmadd_w 0111 00001010 10010 ..... ..... ..... @vvv
+vmadd_d 0111 00001010 10011 ..... ..... ..... @vvv
+vmsub_b 0111 00001010 10100 ..... ..... ..... @vvv
+vmsub_h 0111 00001010 10101 ..... ..... ..... @vvv
+vmsub_w 0111 00001010 10110 ..... ..... ..... @vvv
+vmsub_d 0111 00001010 10111 ..... ..... ..... @vvv
+
+vmaddwev_h_b 0111 00001010 11000 ..... ..... ..... @vvv
+vmaddwev_w_h 0111 00001010 11001 ..... ..... ..... @vvv
+vmaddwev_d_w 0111 00001010 11010 ..... ..... ..... @vvv
+vmaddwev_q_d 0111 00001010 11011 ..... ..... ..... @vvv
+vmaddwod_h_b 0111 00001010 11100 ..... ..... ..... @vvv
+vmaddwod_w_h 0111 00001010 11101 ..... ..... ..... @vvv
+vmaddwod_d_w 0111 00001010 11110 ..... ..... ..... @vvv
+vmaddwod_q_d 0111 00001010 11111 ..... ..... ..... @vvv
+vmaddwev_h_bu 0111 00001011 01000 ..... ..... ..... @vvv
+vmaddwev_w_hu 0111 00001011 01001 ..... ..... ..... @vvv
+vmaddwev_d_wu 0111 00001011 01010 ..... ..... ..... @vvv
+vmaddwev_q_du 0111 00001011 01011 ..... ..... ..... @vvv
+vmaddwod_h_bu 0111 00001011 01100 ..... ..... ..... @vvv
+vmaddwod_w_hu 0111 00001011 01101 ..... ..... ..... @vvv
+vmaddwod_d_wu 0111 00001011 01110 ..... ..... ..... @vvv
+vmaddwod_q_du 0111 00001011 01111 ..... ..... ..... @vvv
+vmaddwev_h_bu_b 0111 00001011 11000 ..... ..... ..... @vvv
+vmaddwev_w_hu_h 0111 00001011 11001 ..... ..... ..... @vvv
+vmaddwev_d_wu_w 0111 00001011 11010 ..... ..... ..... @vvv
+vmaddwev_q_du_d 0111 00001011 11011 ..... ..... ..... @vvv
+vmaddwod_h_bu_b 0111 00001011 11100 ..... ..... ..... @vvv
+vmaddwod_w_hu_h 0111 00001011 11101 ..... ..... ..... @vvv
+vmaddwod_d_wu_w 0111 00001011 11110 ..... ..... ..... @vvv
+vmaddwod_q_du_d 0111 00001011 11111 ..... ..... ..... @vvv
+
+vdiv_b 0111 00001110 00000 ..... ..... ..... @vvv
+vdiv_h 0111 00001110 00001 ..... ..... ..... @vvv
+vdiv_w 0111 00001110 00010 ..... ..... ..... @vvv
+vdiv_d 0111 00001110 00011 ..... ..... ..... @vvv
+vdiv_bu 0111 00001110 01000 ..... ..... ..... @vvv
+vdiv_hu 0111 00001110 01001 ..... ..... ..... @vvv
+vdiv_wu 0111 00001110 01010 ..... ..... ..... @vvv
+vdiv_du 0111 00001110 01011 ..... ..... ..... @vvv
+vmod_b 0111 00001110 00100 ..... ..... ..... @vvv
+vmod_h 0111 00001110 00101 ..... ..... ..... @vvv
+vmod_w 0111 00001110 00110 ..... ..... ..... @vvv
+vmod_d 0111 00001110 00111 ..... ..... ..... @vvv
+vmod_bu 0111 00001110 01100 ..... ..... ..... @vvv
+vmod_hu 0111 00001110 01101 ..... ..... ..... @vvv
+vmod_wu 0111 00001110 01110 ..... ..... ..... @vvv
+vmod_du 0111 00001110 01111 ..... ..... ..... @vvv
+
+vsat_b 0111 00110010 01000 01 ... ..... ..... @vv_ui3
+vsat_h 0111 00110010 01000 1 .... ..... ..... @vv_ui4
+vsat_w 0111 00110010 01001 ..... ..... ..... @vv_ui5
+vsat_d 0111 00110010 0101 ...... ..... ..... @vv_ui6
+vsat_bu 0111 00110010 10000 01 ... ..... ..... @vv_ui3
+vsat_hu 0111 00110010 10000 1 .... ..... ..... @vv_ui4
+vsat_wu 0111 00110010 10001 ..... ..... ..... @vv_ui5
+vsat_du 0111 00110010 1001 ...... ..... ..... @vv_ui6
+
+vexth_h_b 0111 00101001 11101 11000 ..... ..... @vv
+vexth_w_h 0111 00101001 11101 11001 ..... ..... @vv
+vexth_d_w 0111 00101001 11101 11010 ..... ..... @vv
+vexth_q_d 0111 00101001 11101 11011 ..... ..... @vv
+vexth_hu_bu 0111 00101001 11101 11100 ..... ..... @vv
+vexth_wu_hu 0111 00101001 11101 11101 ..... ..... @vv
+vexth_du_wu 0111 00101001 11101 11110 ..... ..... @vv
+vexth_qu_du 0111 00101001 11101 11111 ..... ..... @vv
+
+vsigncov_b 0111 00010010 11100 ..... ..... ..... @vvv
+vsigncov_h 0111 00010010 11101 ..... ..... ..... @vvv
+vsigncov_w 0111 00010010 11110 ..... ..... ..... @vvv
+vsigncov_d 0111 00010010 11111 ..... ..... ..... @vvv
+
+vmskltz_b 0111 00101001 11000 10000 ..... ..... @vv
+vmskltz_h 0111 00101001 11000 10001 ..... ..... @vv
+vmskltz_w 0111 00101001 11000 10010 ..... ..... @vv
+vmskltz_d 0111 00101001 11000 10011 ..... ..... @vv
+vmskgez_b 0111 00101001 11000 10100 ..... ..... @vv
+vmsknz_b 0111 00101001 11000 11000 ..... ..... @vv
+
+vldi 0111 00111110 00 ............. ..... @v_i13
+
+vand_v 0111 00010010 01100 ..... ..... ..... @vvv
+vor_v 0111 00010010 01101 ..... ..... ..... @vvv
+vxor_v 0111 00010010 01110 ..... ..... ..... @vvv
+vnor_v 0111 00010010 01111 ..... ..... ..... @vvv
+vandn_v 0111 00010010 10000 ..... ..... ..... @vvv
+vorn_v 0111 00010010 10001 ..... ..... ..... @vvv
+
+vandi_b 0111 00111101 00 ........ ..... ..... @vv_ui8
+vori_b 0111 00111101 01 ........ ..... ..... @vv_ui8
+vxori_b 0111 00111101 10 ........ ..... ..... @vv_ui8
+vnori_b 0111 00111101 11 ........ ..... ..... @vv_ui8
+
+vsll_b 0111 00001110 10000 ..... ..... ..... @vvv
+vsll_h 0111 00001110 10001 ..... ..... ..... @vvv
+vsll_w 0111 00001110 10010 ..... ..... ..... @vvv
+vsll_d 0111 00001110 10011 ..... ..... ..... @vvv
+vslli_b 0111 00110010 11000 01 ... ..... ..... @vv_ui3
+vslli_h 0111 00110010 11000 1 .... ..... ..... @vv_ui4
+vslli_w 0111 00110010 11001 ..... ..... ..... @vv_ui5
+vslli_d 0111 00110010 1101 ...... ..... ..... @vv_ui6
+
+vsrl_b 0111 00001110 10100 ..... ..... ..... @vvv
+vsrl_h 0111 00001110 10101 ..... ..... ..... @vvv
+vsrl_w 0111 00001110 10110 ..... ..... ..... @vvv
+vsrl_d 0111 00001110 10111 ..... ..... ..... @vvv
+vsrli_b 0111 00110011 00000 01 ... ..... ..... @vv_ui3
+vsrli_h 0111 00110011 00000 1 .... ..... ..... @vv_ui4
+vsrli_w 0111 00110011 00001 ..... ..... ..... @vv_ui5
+vsrli_d 0111 00110011 0001 ...... ..... ..... @vv_ui6
+
+vsra_b 0111 00001110 11000 ..... ..... ..... @vvv
+vsra_h 0111 00001110 11001 ..... ..... ..... @vvv
+vsra_w 0111 00001110 11010 ..... ..... ..... @vvv
+vsra_d 0111 00001110 11011 ..... ..... ..... @vvv
+vsrai_b 0111 00110011 01000 01 ... ..... ..... @vv_ui3
+vsrai_h 0111 00110011 01000 1 .... ..... ..... @vv_ui4
+vsrai_w 0111 00110011 01001 ..... ..... ..... @vv_ui5
+vsrai_d 0111 00110011 0101 ...... ..... ..... @vv_ui6
+
+vrotr_b 0111 00001110 11100 ..... ..... ..... @vvv
+vrotr_h 0111 00001110 11101 ..... ..... ..... @vvv
+vrotr_w 0111 00001110 11110 ..... ..... ..... @vvv
+vrotr_d 0111 00001110 11111 ..... ..... ..... @vvv
+vrotri_b 0111 00101010 00000 01 ... ..... ..... @vv_ui3
+vrotri_h 0111 00101010 00000 1 .... ..... ..... @vv_ui4
+vrotri_w 0111 00101010 00001 ..... ..... ..... @vv_ui5
+vrotri_d 0111 00101010 0001 ...... ..... ..... @vv_ui6
+
+vsllwil_h_b 0111 00110000 10000 01 ... ..... ..... @vv_ui3
+vsllwil_w_h 0111 00110000 10000 1 .... ..... ..... @vv_ui4
+vsllwil_d_w 0111 00110000 10001 ..... ..... ..... @vv_ui5
+vextl_q_d 0111 00110000 10010 00000 ..... ..... @vv
+vsllwil_hu_bu 0111 00110000 11000 01 ... ..... ..... @vv_ui3
+vsllwil_wu_hu 0111 00110000 11000 1 .... ..... ..... @vv_ui4
+vsllwil_du_wu 0111 00110000 11001 ..... ..... ..... @vv_ui5
+vextl_qu_du 0111 00110000 11010 00000 ..... ..... @vv
+
+vsrlr_b 0111 00001111 00000 ..... ..... ..... @vvv
+vsrlr_h 0111 00001111 00001 ..... ..... ..... @vvv
+vsrlr_w 0111 00001111 00010 ..... ..... ..... @vvv
+vsrlr_d 0111 00001111 00011 ..... ..... ..... @vvv
+vsrlri_b 0111 00101010 01000 01 ... ..... ..... @vv_ui3
+vsrlri_h 0111 00101010 01000 1 .... ..... ..... @vv_ui4
+vsrlri_w 0111 00101010 01001 ..... ..... ..... @vv_ui5
+vsrlri_d 0111 00101010 0101 ...... ..... ..... @vv_ui6
+
+vsrar_b 0111 00001111 00100 ..... ..... ..... @vvv
+vsrar_h 0111 00001111 00101 ..... ..... ..... @vvv
+vsrar_w 0111 00001111 00110 ..... ..... ..... @vvv
+vsrar_d 0111 00001111 00111 ..... ..... ..... @vvv
+vsrari_b 0111 00101010 10000 01 ... ..... ..... @vv_ui3
+vsrari_h 0111 00101010 10000 1 .... ..... ..... @vv_ui4
+vsrari_w 0111 00101010 10001 ..... ..... ..... @vv_ui5
+vsrari_d 0111 00101010 1001 ...... ..... ..... @vv_ui6
+
+vsrln_b_h 0111 00001111 01001 ..... ..... ..... @vvv
+vsrln_h_w 0111 00001111 01010 ..... ..... ..... @vvv
+vsrln_w_d 0111 00001111 01011 ..... ..... ..... @vvv
+vsran_b_h 0111 00001111 01101 ..... ..... ..... @vvv
+vsran_h_w 0111 00001111 01110 ..... ..... ..... @vvv
+vsran_w_d 0111 00001111 01111 ..... ..... ..... @vvv
+
+vsrlni_b_h 0111 00110100 00000 1 .... ..... ..... @vv_ui4
+vsrlni_h_w 0111 00110100 00001 ..... ..... ..... @vv_ui5
+vsrlni_w_d 0111 00110100 0001 ...... ..... ..... @vv_ui6
+vsrlni_d_q 0111 00110100 001 ....... ..... ..... @vv_ui7
+vsrani_b_h 0111 00110101 10000 1 .... ..... ..... @vv_ui4
+vsrani_h_w 0111 00110101 10001 ..... ..... ..... @vv_ui5
+vsrani_w_d 0111 00110101 1001 ...... ..... ..... @vv_ui6
+vsrani_d_q 0111 00110101 101 ....... ..... ..... @vv_ui7
+
+vsrlrn_b_h 0111 00001111 10001 ..... ..... ..... @vvv
+vsrlrn_h_w 0111 00001111 10010 ..... ..... ..... @vvv
+vsrlrn_w_d 0111 00001111 10011 ..... ..... ..... @vvv
+vsrarn_b_h 0111 00001111 10101 ..... ..... ..... @vvv
+vsrarn_h_w 0111 00001111 10110 ..... ..... ..... @vvv
+vsrarn_w_d 0111 00001111 10111 ..... ..... ..... @vvv
+
+vsrlrni_b_h 0111 00110100 01000 1 .... ..... ..... @vv_ui4
+vsrlrni_h_w 0111 00110100 01001 ..... ..... ..... @vv_ui5
+vsrlrni_w_d 0111 00110100 0101 ...... ..... ..... @vv_ui6
+vsrlrni_d_q 0111 00110100 011 ....... ..... ..... @vv_ui7
+vsrarni_b_h 0111 00110101 11000 1 .... ..... ..... @vv_ui4
+vsrarni_h_w 0111 00110101 11001 ..... ..... ..... @vv_ui5
+vsrarni_w_d 0111 00110101 1101 ...... ..... ..... @vv_ui6
+vsrarni_d_q 0111 00110101 111 ....... ..... ..... @vv_ui7
+
+vssrln_b_h 0111 00001111 11001 ..... ..... ..... @vvv
+vssrln_h_w 0111 00001111 11010 ..... ..... ..... @vvv
+vssrln_w_d 0111 00001111 11011 ..... ..... ..... @vvv
+vssran_b_h 0111 00001111 11101 ..... ..... ..... @vvv
+vssran_h_w 0111 00001111 11110 ..... ..... ..... @vvv
+vssran_w_d 0111 00001111 11111 ..... ..... ..... @vvv
+vssrln_bu_h 0111 00010000 01001 ..... ..... ..... @vvv
+vssrln_hu_w 0111 00010000 01010 ..... ..... ..... @vvv
+vssrln_wu_d 0111 00010000 01011 ..... ..... ..... @vvv
+vssran_bu_h 0111 00010000 01101 ..... ..... ..... @vvv
+vssran_hu_w 0111 00010000 01110 ..... ..... ..... @vvv
+vssran_wu_d 0111 00010000 01111 ..... ..... ..... @vvv
+
+vssrlni_b_h 0111 00110100 10000 1 .... ..... ..... @vv_ui4
+vssrlni_h_w 0111 00110100 10001 ..... ..... ..... @vv_ui5
+vssrlni_w_d 0111 00110100 1001 ...... ..... ..... @vv_ui6
+vssrlni_d_q 0111 00110100 101 ....... ..... ..... @vv_ui7
+vssrani_b_h 0111 00110110 00000 1 .... ..... ..... @vv_ui4
+vssrani_h_w 0111 00110110 00001 ..... ..... ..... @vv_ui5
+vssrani_w_d 0111 00110110 0001 ...... ..... ..... @vv_ui6
+vssrani_d_q 0111 00110110 001 ....... ..... ..... @vv_ui7
+vssrlni_bu_h 0111 00110100 11000 1 .... ..... ..... @vv_ui4
+vssrlni_hu_w 0111 00110100 11001 ..... ..... ..... @vv_ui5
+vssrlni_wu_d 0111 00110100 1101 ...... ..... ..... @vv_ui6
+vssrlni_du_q 0111 00110100 111 ....... ..... ..... @vv_ui7
+vssrani_bu_h 0111 00110110 01000 1 .... ..... ..... @vv_ui4
+vssrani_hu_w 0111 00110110 01001 ..... ..... ..... @vv_ui5
+vssrani_wu_d 0111 00110110 0101 ...... ..... ..... @vv_ui6
+vssrani_du_q 0111 00110110 011 ....... ..... ..... @vv_ui7
+
+vssrlrn_b_h 0111 00010000 00001 ..... ..... ..... @vvv
+vssrlrn_h_w 0111 00010000 00010 ..... ..... ..... @vvv
+vssrlrn_w_d 0111 00010000 00011 ..... ..... ..... @vvv
+vssrarn_b_h 0111 00010000 00101 ..... ..... ..... @vvv
+vssrarn_h_w 0111 00010000 00110 ..... ..... ..... @vvv
+vssrarn_w_d 0111 00010000 00111 ..... ..... ..... @vvv
+vssrlrn_bu_h 0111 00010000 10001 ..... ..... ..... @vvv
+vssrlrn_hu_w 0111 00010000 10010 ..... ..... ..... @vvv
+vssrlrn_wu_d 0111 00010000 10011 ..... ..... ..... @vvv
+vssrarn_bu_h 0111 00010000 10101 ..... ..... ..... @vvv
+vssrarn_hu_w 0111 00010000 10110 ..... ..... ..... @vvv
+vssrarn_wu_d 0111 00010000 10111 ..... ..... ..... @vvv
+
+vssrlrni_b_h 0111 00110101 00000 1 .... ..... ..... @vv_ui4
+vssrlrni_h_w 0111 00110101 00001 ..... ..... ..... @vv_ui5
+vssrlrni_w_d 0111 00110101 0001 ...... ..... ..... @vv_ui6
+vssrlrni_d_q 0111 00110101 001 ....... ..... ..... @vv_ui7
+vssrarni_b_h 0111 00110110 10000 1 .... ..... ..... @vv_ui4
+vssrarni_h_w 0111 00110110 10001 ..... ..... ..... @vv_ui5
+vssrarni_w_d 0111 00110110 1001 ...... ..... ..... @vv_ui6
+vssrarni_d_q 0111 00110110 101 ....... ..... ..... @vv_ui7
+vssrlrni_bu_h 0111 00110101 01000 1 .... ..... ..... @vv_ui4
+vssrlrni_hu_w 0111 00110101 01001 ..... ..... ..... @vv_ui5
+vssrlrni_wu_d 0111 00110101 0101 ...... ..... ..... @vv_ui6
+vssrlrni_du_q 0111 00110101 011 ....... ..... ..... @vv_ui7
+vssrarni_bu_h 0111 00110110 11000 1 .... ..... ..... @vv_ui4
+vssrarni_hu_w 0111 00110110 11001 ..... ..... ..... @vv_ui5
+vssrarni_wu_d 0111 00110110 1101 ...... ..... ..... @vv_ui6
+vssrarni_du_q 0111 00110110 111 ....... ..... ..... @vv_ui7
+
+vclo_b 0111 00101001 11000 00000 ..... ..... @vv
+vclo_h 0111 00101001 11000 00001 ..... ..... @vv
+vclo_w 0111 00101001 11000 00010 ..... ..... @vv
+vclo_d 0111 00101001 11000 00011 ..... ..... @vv
+vclz_b 0111 00101001 11000 00100 ..... ..... @vv
+vclz_h 0111 00101001 11000 00101 ..... ..... @vv
+vclz_w 0111 00101001 11000 00110 ..... ..... @vv
+vclz_d 0111 00101001 11000 00111 ..... ..... @vv
+
+vpcnt_b 0111 00101001 11000 01000 ..... ..... @vv
+vpcnt_h 0111 00101001 11000 01001 ..... ..... @vv
+vpcnt_w 0111 00101001 11000 01010 ..... ..... @vv
+vpcnt_d 0111 00101001 11000 01011 ..... ..... @vv
+
+vbitclr_b 0111 00010000 11000 ..... ..... ..... @vvv
+vbitclr_h 0111 00010000 11001 ..... ..... ..... @vvv
+vbitclr_w 0111 00010000 11010 ..... ..... ..... @vvv
+vbitclr_d 0111 00010000 11011 ..... ..... ..... @vvv
+vbitclri_b 0111 00110001 00000 01 ... ..... ..... @vv_ui3
+vbitclri_h 0111 00110001 00000 1 .... ..... ..... @vv_ui4
+vbitclri_w 0111 00110001 00001 ..... ..... ..... @vv_ui5
+vbitclri_d 0111 00110001 0001 ...... ..... ..... @vv_ui6
+vbitset_b 0111 00010000 11100 ..... ..... ..... @vvv
+vbitset_h 0111 00010000 11101 ..... ..... ..... @vvv
+vbitset_w 0111 00010000 11110 ..... ..... ..... @vvv
+vbitset_d 0111 00010000 11111 ..... ..... ..... @vvv
+vbitseti_b 0111 00110001 01000 01 ... ..... ..... @vv_ui3
+vbitseti_h 0111 00110001 01000 1 .... ..... ..... @vv_ui4
+vbitseti_w 0111 00110001 01001 ..... ..... ..... @vv_ui5
+vbitseti_d 0111 00110001 0101 ...... ..... ..... @vv_ui6
+vbitrev_b 0111 00010001 00000 ..... ..... ..... @vvv
+vbitrev_h 0111 00010001 00001 ..... ..... ..... @vvv
+vbitrev_w 0111 00010001 00010 ..... ..... ..... @vvv
+vbitrev_d 0111 00010001 00011 ..... ..... ..... @vvv
+vbitrevi_b 0111 00110001 10000 01 ... ..... ..... @vv_ui3
+vbitrevi_h 0111 00110001 10000 1 .... ..... ..... @vv_ui4
+vbitrevi_w 0111 00110001 10001 ..... ..... ..... @vv_ui5
+vbitrevi_d 0111 00110001 1001 ...... ..... ..... @vv_ui6
+
+vfrstp_b 0111 00010010 10110 ..... ..... ..... @vvv
+vfrstp_h 0111 00010010 10111 ..... ..... ..... @vvv
+vfrstpi_b 0111 00101001 10100 ..... ..... ..... @vv_ui5
+vfrstpi_h 0111 00101001 10101 ..... ..... ..... @vv_ui5
+
+vfadd_s 0111 00010011 00001 ..... ..... ..... @vvv
+vfadd_d 0111 00010011 00010 ..... ..... ..... @vvv
+vfsub_s 0111 00010011 00101 ..... ..... ..... @vvv
+vfsub_d 0111 00010011 00110 ..... ..... ..... @vvv
+vfmul_s 0111 00010011 10001 ..... ..... ..... @vvv
+vfmul_d 0111 00010011 10010 ..... ..... ..... @vvv
+vfdiv_s 0111 00010011 10101 ..... ..... ..... @vvv
+vfdiv_d 0111 00010011 10110 ..... ..... ..... @vvv
+
+vfmadd_s 0000 10010001 ..... ..... ..... ..... @vvvv
+vfmadd_d 0000 10010010 ..... ..... ..... ..... @vvvv
+vfmsub_s 0000 10010101 ..... ..... ..... ..... @vvvv
+vfmsub_d 0000 10010110 ..... ..... ..... ..... @vvvv
+vfnmadd_s 0000 10011001 ..... ..... ..... ..... @vvvv
+vfnmadd_d 0000 10011010 ..... ..... ..... ..... @vvvv
+vfnmsub_s 0000 10011101 ..... ..... ..... ..... @vvvv
+vfnmsub_d 0000 10011110 ..... ..... ..... ..... @vvvv
+
+vfmax_s 0111 00010011 11001 ..... ..... ..... @vvv
+vfmax_d 0111 00010011 11010 ..... ..... ..... @vvv
+vfmin_s 0111 00010011 11101 ..... ..... ..... @vvv
+vfmin_d 0111 00010011 11110 ..... ..... ..... @vvv
+
+vfmaxa_s 0111 00010100 00001 ..... ..... ..... @vvv
+vfmaxa_d 0111 00010100 00010 ..... ..... ..... @vvv
+vfmina_s 0111 00010100 00101 ..... ..... ..... @vvv
+vfmina_d 0111 00010100 00110 ..... ..... ..... @vvv
+
+vflogb_s 0111 00101001 11001 10001 ..... ..... @vv
+vflogb_d 0111 00101001 11001 10010 ..... ..... @vv
+
+vfclass_s 0111 00101001 11001 10101 ..... ..... @vv
+vfclass_d 0111 00101001 11001 10110 ..... ..... @vv
+
+vfsqrt_s 0111 00101001 11001 11001 ..... ..... @vv
+vfsqrt_d 0111 00101001 11001 11010 ..... ..... @vv
+vfrecip_s 0111 00101001 11001 11101 ..... ..... @vv
+vfrecip_d 0111 00101001 11001 11110 ..... ..... @vv
+vfrsqrt_s 0111 00101001 11010 00001 ..... ..... @vv
+vfrsqrt_d 0111 00101001 11010 00010 ..... ..... @vv
+
+vfcvtl_s_h 0111 00101001 11011 11010 ..... ..... @vv
+vfcvth_s_h 0111 00101001 11011 11011 ..... ..... @vv
+vfcvtl_d_s 0111 00101001 11011 11100 ..... ..... @vv
+vfcvth_d_s 0111 00101001 11011 11101 ..... ..... @vv
+vfcvt_h_s 0111 00010100 01100 ..... ..... ..... @vvv
+vfcvt_s_d 0111 00010100 01101 ..... ..... ..... @vvv
+
+vfrint_s 0111 00101001 11010 01101 ..... ..... @vv
+vfrint_d 0111 00101001 11010 01110 ..... ..... @vv
+vfrintrm_s 0111 00101001 11010 10001 ..... ..... @vv
+vfrintrm_d 0111 00101001 11010 10010 ..... ..... @vv
+vfrintrp_s 0111 00101001 11010 10101 ..... ..... @vv
+vfrintrp_d 0111 00101001 11010 10110 ..... ..... @vv
+vfrintrz_s 0111 00101001 11010 11001 ..... ..... @vv
+vfrintrz_d 0111 00101001 11010 11010 ..... ..... @vv
+vfrintrne_s 0111 00101001 11010 11101 ..... ..... @vv
+vfrintrne_d 0111 00101001 11010 11110 ..... ..... @vv
+
+vftint_w_s 0111 00101001 11100 01100 ..... ..... @vv
+vftint_l_d 0111 00101001 11100 01101 ..... ..... @vv
+vftintrm_w_s 0111 00101001 11100 01110 ..... ..... @vv
+vftintrm_l_d 0111 00101001 11100 01111 ..... ..... @vv
+vftintrp_w_s 0111 00101001 11100 10000 ..... ..... @vv
+vftintrp_l_d 0111 00101001 11100 10001 ..... ..... @vv
+vftintrz_w_s 0111 00101001 11100 10010 ..... ..... @vv
+vftintrz_l_d 0111 00101001 11100 10011 ..... ..... @vv
+vftintrne_w_s 0111 00101001 11100 10100 ..... ..... @vv
+vftintrne_l_d 0111 00101001 11100 10101 ..... ..... @vv
+vftint_wu_s 0111 00101001 11100 10110 ..... ..... @vv
+vftint_lu_d 0111 00101001 11100 10111 ..... ..... @vv
+vftintrz_wu_s 0111 00101001 11100 11100 ..... ..... @vv
+vftintrz_lu_d 0111 00101001 11100 11101 ..... ..... @vv
+vftint_w_d 0111 00010100 10011 ..... ..... ..... @vvv
+vftintrm_w_d 0111 00010100 10100 ..... ..... ..... @vvv
+vftintrp_w_d 0111 00010100 10101 ..... ..... ..... @vvv
+vftintrz_w_d 0111 00010100 10110 ..... ..... ..... @vvv
+vftintrne_w_d 0111 00010100 10111 ..... ..... ..... @vvv
+vftintl_l_s 0111 00101001 11101 00000 ..... ..... @vv
+vftinth_l_s 0111 00101001 11101 00001 ..... ..... @vv
+vftintrml_l_s 0111 00101001 11101 00010 ..... ..... @vv
+vftintrmh_l_s 0111 00101001 11101 00011 ..... ..... @vv
+vftintrpl_l_s 0111 00101001 11101 00100 ..... ..... @vv
+vftintrph_l_s 0111 00101001 11101 00101 ..... ..... @vv
+vftintrzl_l_s 0111 00101001 11101 00110 ..... ..... @vv
+vftintrzh_l_s 0111 00101001 11101 00111 ..... ..... @vv
+vftintrnel_l_s 0111 00101001 11101 01000 ..... ..... @vv
+vftintrneh_l_s 0111 00101001 11101 01001 ..... ..... @vv
+
+vffint_s_w 0111 00101001 11100 00000 ..... ..... @vv
+vffint_s_wu 0111 00101001 11100 00001 ..... ..... @vv
+vffint_d_l 0111 00101001 11100 00010 ..... ..... @vv
+vffint_d_lu 0111 00101001 11100 00011 ..... ..... @vv
+vffintl_d_w 0111 00101001 11100 00100 ..... ..... @vv
+vffinth_d_w 0111 00101001 11100 00101 ..... ..... @vv
+vffint_s_l 0111 00010100 10000 ..... ..... ..... @vvv
+
+vseq_b 0111 00000000 00000 ..... ..... ..... @vvv
+vseq_h 0111 00000000 00001 ..... ..... ..... @vvv
+vseq_w 0111 00000000 00010 ..... ..... ..... @vvv
+vseq_d 0111 00000000 00011 ..... ..... ..... @vvv
+vseqi_b 0111 00101000 00000 ..... ..... ..... @vv_i5
+vseqi_h 0111 00101000 00001 ..... ..... ..... @vv_i5
+vseqi_w 0111 00101000 00010 ..... ..... ..... @vv_i5
+vseqi_d 0111 00101000 00011 ..... ..... ..... @vv_i5
+
+vsle_b 0111 00000000 00100 ..... ..... ..... @vvv
+vsle_h 0111 00000000 00101 ..... ..... ..... @vvv
+vsle_w 0111 00000000 00110 ..... ..... ..... @vvv
+vsle_d 0111 00000000 00111 ..... ..... ..... @vvv
+vslei_b 0111 00101000 00100 ..... ..... ..... @vv_i5
+vslei_h 0111 00101000 00101 ..... ..... ..... @vv_i5
+vslei_w 0111 00101000 00110 ..... ..... ..... @vv_i5
+vslei_d 0111 00101000 00111 ..... ..... ..... @vv_i5
+vsle_bu 0111 00000000 01000 ..... ..... ..... @vvv
+vsle_hu 0111 00000000 01001 ..... ..... ..... @vvv
+vsle_wu 0111 00000000 01010 ..... ..... ..... @vvv
+vsle_du 0111 00000000 01011 ..... ..... ..... @vvv
+vslei_bu 0111 00101000 01000 ..... ..... ..... @vv_ui5
+vslei_hu 0111 00101000 01001 ..... ..... ..... @vv_ui5
+vslei_wu 0111 00101000 01010 ..... ..... ..... @vv_ui5
+vslei_du 0111 00101000 01011 ..... ..... ..... @vv_ui5
+
+vslt_b 0111 00000000 01100 ..... ..... ..... @vvv
+vslt_h 0111 00000000 01101 ..... ..... ..... @vvv
+vslt_w 0111 00000000 01110 ..... ..... ..... @vvv
+vslt_d 0111 00000000 01111 ..... ..... ..... @vvv
+vslti_b 0111 00101000 01100 ..... ..... ..... @vv_i5
+vslti_h 0111 00101000 01101 ..... ..... ..... @vv_i5
+vslti_w 0111 00101000 01110 ..... ..... ..... @vv_i5
+vslti_d 0111 00101000 01111 ..... ..... ..... @vv_i5
+vslt_bu 0111 00000000 10000 ..... ..... ..... @vvv
+vslt_hu 0111 00000000 10001 ..... ..... ..... @vvv
+vslt_wu 0111 00000000 10010 ..... ..... ..... @vvv
+vslt_du 0111 00000000 10011 ..... ..... ..... @vvv
+vslti_bu 0111 00101000 10000 ..... ..... ..... @vv_ui5
+vslti_hu 0111 00101000 10001 ..... ..... ..... @vv_ui5
+vslti_wu 0111 00101000 10010 ..... ..... ..... @vv_ui5
+vslti_du 0111 00101000 10011 ..... ..... ..... @vv_ui5
+
+vfcmp_cond_s 0000 11000101 ..... ..... ..... ..... @vvv_fcond
+vfcmp_cond_d 0000 11000110 ..... ..... ..... ..... @vvv_fcond
+
+vbitsel_v 0000 11010001 ..... ..... ..... ..... @vvvv
+
+vbitseli_b 0111 00111100 01 ........ ..... ..... @vv_ui8
+
+vseteqz_v 0111 00101001 11001 00110 ..... 00 ... @cv
+vsetnez_v 0111 00101001 11001 00111 ..... 00 ... @cv
+vsetanyeqz_b 0111 00101001 11001 01000 ..... 00 ... @cv
+vsetanyeqz_h 0111 00101001 11001 01001 ..... 00 ... @cv
+vsetanyeqz_w 0111 00101001 11001 01010 ..... 00 ... @cv
+vsetanyeqz_d 0111 00101001 11001 01011 ..... 00 ... @cv
+vsetallnez_b 0111 00101001 11001 01100 ..... 00 ... @cv
+vsetallnez_h 0111 00101001 11001 01101 ..... 00 ... @cv
+vsetallnez_w 0111 00101001 11001 01110 ..... 00 ... @cv
+vsetallnez_d 0111 00101001 11001 01111 ..... 00 ... @cv
+
+vinsgr2vr_b 0111 00101110 10111 0 .... ..... ..... @vr_ui4
+vinsgr2vr_h 0111 00101110 10111 10 ... ..... ..... @vr_ui3
+vinsgr2vr_w 0111 00101110 10111 110 .. ..... ..... @vr_ui2
+vinsgr2vr_d 0111 00101110 10111 1110 . ..... ..... @vr_ui1
+vpickve2gr_b 0111 00101110 11111 0 .... ..... ..... @rv_ui4
+vpickve2gr_h 0111 00101110 11111 10 ... ..... ..... @rv_ui3
+vpickve2gr_w 0111 00101110 11111 110 .. ..... ..... @rv_ui2
+vpickve2gr_d 0111 00101110 11111 1110 . ..... ..... @rv_ui1
+vpickve2gr_bu 0111 00101111 00111 0 .... ..... ..... @rv_ui4
+vpickve2gr_hu 0111 00101111 00111 10 ... ..... ..... @rv_ui3
+vpickve2gr_wu 0111 00101111 00111 110 .. ..... ..... @rv_ui2
+vpickve2gr_du 0111 00101111 00111 1110 . ..... ..... @rv_ui1
+
+vreplgr2vr_b 0111 00101001 11110 00000 ..... ..... @vr
+vreplgr2vr_h 0111 00101001 11110 00001 ..... ..... @vr
+vreplgr2vr_w 0111 00101001 11110 00010 ..... ..... @vr
+vreplgr2vr_d 0111 00101001 11110 00011 ..... ..... @vr
+
+vreplve_b 0111 00010010 00100 ..... ..... ..... @vvr
+vreplve_h 0111 00010010 00101 ..... ..... ..... @vvr
+vreplve_w 0111 00010010 00110 ..... ..... ..... @vvr
+vreplve_d 0111 00010010 00111 ..... ..... ..... @vvr
+vreplvei_b 0111 00101111 01111 0 .... ..... ..... @vv_ui4
+vreplvei_h 0111 00101111 01111 10 ... ..... ..... @vv_ui3
+vreplvei_w 0111 00101111 01111 110 .. ..... ..... @vv_ui2
+vreplvei_d 0111 00101111 01111 1110 . ..... ..... @vv_ui1
+
+vbsll_v 0111 00101000 11100 ..... ..... ..... @vv_ui5
+vbsrl_v 0111 00101000 11101 ..... ..... ..... @vv_ui5
+
+vpackev_b 0111 00010001 01100 ..... ..... ..... @vvv
+vpackev_h 0111 00010001 01101 ..... ..... ..... @vvv
+vpackev_w 0111 00010001 01110 ..... ..... ..... @vvv
+vpackev_d 0111 00010001 01111 ..... ..... ..... @vvv
+vpackod_b 0111 00010001 10000 ..... ..... ..... @vvv
+vpackod_h 0111 00010001 10001 ..... ..... ..... @vvv
+vpackod_w 0111 00010001 10010 ..... ..... ..... @vvv
+vpackod_d 0111 00010001 10011 ..... ..... ..... @vvv
+
+vpickev_b 0111 00010001 11100 ..... ..... ..... @vvv
+vpickev_h 0111 00010001 11101 ..... ..... ..... @vvv
+vpickev_w 0111 00010001 11110 ..... ..... ..... @vvv
+vpickev_d 0111 00010001 11111 ..... ..... ..... @vvv
+vpickod_b 0111 00010010 00000 ..... ..... ..... @vvv
+vpickod_h 0111 00010010 00001 ..... ..... ..... @vvv
+vpickod_w 0111 00010010 00010 ..... ..... ..... @vvv
+vpickod_d 0111 00010010 00011 ..... ..... ..... @vvv
+
+vilvl_b 0111 00010001 10100 ..... ..... ..... @vvv
+vilvl_h 0111 00010001 10101 ..... ..... ..... @vvv
+vilvl_w 0111 00010001 10110 ..... ..... ..... @vvv
+vilvl_d 0111 00010001 10111 ..... ..... ..... @vvv
+vilvh_b 0111 00010001 11000 ..... ..... ..... @vvv
+vilvh_h 0111 00010001 11001 ..... ..... ..... @vvv
+vilvh_w 0111 00010001 11010 ..... ..... ..... @vvv
+vilvh_d 0111 00010001 11011 ..... ..... ..... @vvv
+
+vshuf_b 0000 11010101 ..... ..... ..... ..... @vvvv
+vshuf_h 0111 00010111 10101 ..... ..... ..... @vvv
+vshuf_w 0111 00010111 10110 ..... ..... ..... @vvv
+vshuf_d 0111 00010111 10111 ..... ..... ..... @vvv
+vshuf4i_b 0111 00111001 00 ........ ..... ..... @vv_ui8
+vshuf4i_h 0111 00111001 01 ........ ..... ..... @vv_ui8
+vshuf4i_w 0111 00111001 10 ........ ..... ..... @vv_ui8
+vshuf4i_d 0111 00111001 11 ........ ..... ..... @vv_ui8
+
+vpermi_w 0111 00111110 01 ........ ..... ..... @vv_ui8
+
+vextrins_d 0111 00111000 00 ........ ..... ..... @vv_ui8
+vextrins_w 0111 00111000 01 ........ ..... ..... @vv_ui8
+vextrins_h 0111 00111000 10 ........ ..... ..... @vv_ui8
+vextrins_b 0111 00111000 11 ........ ..... ..... @vv_ui8
+
+vld 0010 110000 ............ ..... ..... @vr_i12
+vst 0010 110001 ............ ..... ..... @vr_i12
+vldx 0011 10000100 00000 ..... ..... ..... @vrr
+vstx 0011 10000100 01000 ..... ..... ..... @vrr
+
+vldrepl_d 0011 00000001 0 ......... ..... ..... @vr_i9
+vldrepl_w 0011 00000010 .......... ..... ..... @vr_i10
+vldrepl_h 0011 0000010 ........... ..... ..... @vr_i11
+vldrepl_b 0011 000010 ............ ..... ..... @vr_i12
+vstelm_d 0011 00010001 0 . ........ ..... ..... @vr_i8i1
+vstelm_w 0011 00010010 .. ........ ..... ..... @vr_i8i2
+vstelm_h 0011 0001010 ... ........ ..... ..... @vr_i8i3
+vstelm_b 0011 000110 .... ........ ..... ..... @vr_i8i4
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
index f01635a..7b0f29c 100644
--- a/target/loongarch/internals.h
+++ b/target/loongarch/internals.h
@@ -21,6 +21,28 @@
/* Global bit for huge page */
#define LOONGARCH_HGLOBAL_SHIFT 12
+#if HOST_BIG_ENDIAN
+#define B(x) B[15 - (x)]
+#define H(x) H[7 - (x)]
+#define W(x) W[3 - (x)]
+#define D(x) D[1 - (x)]
+#define UB(x) UB[15 - (x)]
+#define UH(x) UH[7 - (x)]
+#define UW(x) UW[3 - (x)]
+#define UD(x) UD[1 -(x)]
+#define Q(x) Q[x]
+#else
+#define B(x) B[x]
+#define H(x) H[x]
+#define W(x) W[x]
+#define D(x) D[x]
+#define UB(x) UB[x]
+#define UH(x) UH[x]
+#define UW(x) UW[x]
+#define UD(x) UD[x]
+#define Q(x) Q[x]
+#endif
+
void loongarch_translate_init(void);
void loongarch_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
@@ -31,6 +53,7 @@ void G_NORETURN do_raise_exception(CPULoongArchState *env,
const char *loongarch_exception_name(int32_t exception);
+int ieee_ex_to_loongarch(int xcpt);
void restore_fp_status(CPULoongArchState *env);
#ifndef CONFIG_USER_ONLY
diff --git a/target/loongarch/lsx_helper.c b/target/loongarch/lsx_helper.c
new file mode 100644
index 0000000..9571f0a
--- /dev/null
+++ b/target/loongarch/lsx_helper.c
@@ -0,0 +1,3004 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * QEMU LoongArch LSX helper functions.
+ *
+ * Copyright (c) 2022-2023 Loongson Technology Corporation Limited
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+#include "internals.h"
+#include "tcg/tcg.h"
+
+#define DO_ADD(a, b) (a + b)
+#define DO_SUB(a, b) (a - b)
+
+#define DO_ODD_EVEN(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ typedef __typeof(Vd->E1(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i)); \
+ } \
+}
+
+DO_ODD_EVEN(vhaddw_h_b, 16, H, B, DO_ADD)
+DO_ODD_EVEN(vhaddw_w_h, 32, W, H, DO_ADD)
+DO_ODD_EVEN(vhaddw_d_w, 64, D, W, DO_ADD)
+
+void HELPER(vhaddw_q_d)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ Vd->Q(0) = int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(0)));
+}
+
+DO_ODD_EVEN(vhsubw_h_b, 16, H, B, DO_SUB)
+DO_ODD_EVEN(vhsubw_w_h, 32, W, H, DO_SUB)
+DO_ODD_EVEN(vhsubw_d_w, 64, D, W, DO_SUB)
+
+void HELPER(vhsubw_q_d)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ Vd->Q(0) = int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(0)));
+}
+
+DO_ODD_EVEN(vhaddw_hu_bu, 16, UH, UB, DO_ADD)
+DO_ODD_EVEN(vhaddw_wu_hu, 32, UW, UH, DO_ADD)
+DO_ODD_EVEN(vhaddw_du_wu, 64, UD, UW, DO_ADD)
+
+void HELPER(vhaddw_qu_du)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)),
+ int128_make64((uint64_t)Vk->D(0)));
+}
+
+DO_ODD_EVEN(vhsubw_hu_bu, 16, UH, UB, DO_SUB)
+DO_ODD_EVEN(vhsubw_wu_hu, 32, UW, UH, DO_SUB)
+DO_ODD_EVEN(vhsubw_du_wu, 64, UD, UW, DO_SUB)
+
+void HELPER(vhsubw_qu_du)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(1)),
+ int128_make64((uint64_t)Vk->D(0)));
+}
+
+#define DO_EVEN(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->E1(0)) TD; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i) ,(TD)Vk->E2(2 * i)); \
+ } \
+}
+
+#define DO_ODD(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->E1(0)) TD; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i + 1)); \
+ } \
+}
+
+void HELPER(vaddwev_q_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_add(int128_makes64(Vj->D(0)), int128_makes64(Vk->D(0)));
+}
+
+DO_EVEN(vaddwev_h_b, 16, H, B, DO_ADD)
+DO_EVEN(vaddwev_w_h, 32, W, H, DO_ADD)
+DO_EVEN(vaddwev_d_w, 64, D, W, DO_ADD)
+
+void HELPER(vaddwod_q_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(1)));
+}
+
+DO_ODD(vaddwod_h_b, 16, H, B, DO_ADD)
+DO_ODD(vaddwod_w_h, 32, W, H, DO_ADD)
+DO_ODD(vaddwod_d_w, 64, D, W, DO_ADD)
+
+void HELPER(vsubwev_q_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_sub(int128_makes64(Vj->D(0)), int128_makes64(Vk->D(0)));
+}
+
+DO_EVEN(vsubwev_h_b, 16, H, B, DO_SUB)
+DO_EVEN(vsubwev_w_h, 32, W, H, DO_SUB)
+DO_EVEN(vsubwev_d_w, 64, D, W, DO_SUB)
+
+void HELPER(vsubwod_q_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(1)));
+}
+
+DO_ODD(vsubwod_h_b, 16, H, B, DO_SUB)
+DO_ODD(vsubwod_w_h, 32, W, H, DO_SUB)
+DO_ODD(vsubwod_d_w, 64, D, W, DO_SUB)
+
+void HELPER(vaddwev_q_du)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(0)),
+ int128_make64((uint64_t)Vk->D(0)));
+}
+
+DO_EVEN(vaddwev_h_bu, 16, UH, UB, DO_ADD)
+DO_EVEN(vaddwev_w_hu, 32, UW, UH, DO_ADD)
+DO_EVEN(vaddwev_d_wu, 64, UD, UW, DO_ADD)
+
+void HELPER(vaddwod_q_du)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)),
+ int128_make64((uint64_t)Vk->D(1)));
+}
+
+DO_ODD(vaddwod_h_bu, 16, UH, UB, DO_ADD)
+DO_ODD(vaddwod_w_hu, 32, UW, UH, DO_ADD)
+DO_ODD(vaddwod_d_wu, 64, UD, UW, DO_ADD)
+
+void HELPER(vsubwev_q_du)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(0)),
+ int128_make64((uint64_t)Vk->D(0)));
+}
+
+DO_EVEN(vsubwev_h_bu, 16, UH, UB, DO_SUB)
+DO_EVEN(vsubwev_w_hu, 32, UW, UH, DO_SUB)
+DO_EVEN(vsubwev_d_wu, 64, UD, UW, DO_SUB)
+
+void HELPER(vsubwod_q_du)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(1)),
+ int128_make64((uint64_t)Vk->D(1)));
+}
+
+DO_ODD(vsubwod_h_bu, 16, UH, UB, DO_SUB)
+DO_ODD(vsubwod_w_hu, 32, UW, UH, DO_SUB)
+DO_ODD(vsubwod_d_wu, 64, UD, UW, DO_SUB)
+
+#define DO_EVEN_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->ES1(0)) TDS; \
+ typedef __typeof(Vd->EU1(0)) TDU; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i) ,(TDS)Vk->ES2(2 * i)); \
+ } \
+}
+
+#define DO_ODD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->ES1(0)) TDS; \
+ typedef __typeof(Vd->EU1(0)) TDU; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i + 1), (TDS)Vk->ES2(2 * i + 1)); \
+ } \
+}
+
+void HELPER(vaddwev_q_du_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(0)),
+ int128_makes64(Vk->D(0)));
+}
+
+DO_EVEN_U_S(vaddwev_h_bu_b, 16, H, UH, B, UB, DO_ADD)
+DO_EVEN_U_S(vaddwev_w_hu_h, 32, W, UW, H, UH, DO_ADD)
+DO_EVEN_U_S(vaddwev_d_wu_w, 64, D, UD, W, UW, DO_ADD)
+
+void HELPER(vaddwod_q_du_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)),
+ int128_makes64(Vk->D(1)));
+}
+
+DO_ODD_U_S(vaddwod_h_bu_b, 16, H, UH, B, UB, DO_ADD)
+DO_ODD_U_S(vaddwod_w_hu_h, 32, W, UW, H, UH, DO_ADD)
+DO_ODD_U_S(vaddwod_d_wu_w, 64, D, UD, W, UW, DO_ADD)
+
+#define DO_VAVG(a, b) ((a >> 1) + (b >> 1) + (a & b & 1))
+#define DO_VAVGR(a, b) ((a >> 1) + (b >> 1) + ((a | b) & 1))
+
+#define DO_3OP(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
+ } \
+}
+
+DO_3OP(vavg_b, 8, B, DO_VAVG)
+DO_3OP(vavg_h, 16, H, DO_VAVG)
+DO_3OP(vavg_w, 32, W, DO_VAVG)
+DO_3OP(vavg_d, 64, D, DO_VAVG)
+DO_3OP(vavgr_b, 8, B, DO_VAVGR)
+DO_3OP(vavgr_h, 16, H, DO_VAVGR)
+DO_3OP(vavgr_w, 32, W, DO_VAVGR)
+DO_3OP(vavgr_d, 64, D, DO_VAVGR)
+DO_3OP(vavg_bu, 8, UB, DO_VAVG)
+DO_3OP(vavg_hu, 16, UH, DO_VAVG)
+DO_3OP(vavg_wu, 32, UW, DO_VAVG)
+DO_3OP(vavg_du, 64, UD, DO_VAVG)
+DO_3OP(vavgr_bu, 8, UB, DO_VAVGR)
+DO_3OP(vavgr_hu, 16, UH, DO_VAVGR)
+DO_3OP(vavgr_wu, 32, UW, DO_VAVGR)
+DO_3OP(vavgr_du, 64, UD, DO_VAVGR)
+
+#define DO_VABSD(a, b) ((a > b) ? (a -b) : (b-a))
+
+DO_3OP(vabsd_b, 8, B, DO_VABSD)
+DO_3OP(vabsd_h, 16, H, DO_VABSD)
+DO_3OP(vabsd_w, 32, W, DO_VABSD)
+DO_3OP(vabsd_d, 64, D, DO_VABSD)
+DO_3OP(vabsd_bu, 8, UB, DO_VABSD)
+DO_3OP(vabsd_hu, 16, UH, DO_VABSD)
+DO_3OP(vabsd_wu, 32, UW, DO_VABSD)
+DO_3OP(vabsd_du, 64, UD, DO_VABSD)
+
+#define DO_VABS(a) ((a < 0) ? (-a) : (a))
+
+#define DO_VADDA(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i)) + DO_OP(Vk->E(i)); \
+ } \
+}
+
+DO_VADDA(vadda_b, 8, B, DO_VABS)
+DO_VADDA(vadda_h, 16, H, DO_VABS)
+DO_VADDA(vadda_w, 32, W, DO_VABS)
+DO_VADDA(vadda_d, 64, D, DO_VABS)
+
+#define DO_MIN(a, b) (a < b ? a : b)
+#define DO_MAX(a, b) (a > b ? a : b)
+
+#define VMINMAXI(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ typedef __typeof(Vd->E(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
+ } \
+}
+
+VMINMAXI(vmini_b, 8, B, DO_MIN)
+VMINMAXI(vmini_h, 16, H, DO_MIN)
+VMINMAXI(vmini_w, 32, W, DO_MIN)
+VMINMAXI(vmini_d, 64, D, DO_MIN)
+VMINMAXI(vmaxi_b, 8, B, DO_MAX)
+VMINMAXI(vmaxi_h, 16, H, DO_MAX)
+VMINMAXI(vmaxi_w, 32, W, DO_MAX)
+VMINMAXI(vmaxi_d, 64, D, DO_MAX)
+VMINMAXI(vmini_bu, 8, UB, DO_MIN)
+VMINMAXI(vmini_hu, 16, UH, DO_MIN)
+VMINMAXI(vmini_wu, 32, UW, DO_MIN)
+VMINMAXI(vmini_du, 64, UD, DO_MIN)
+VMINMAXI(vmaxi_bu, 8, UB, DO_MAX)
+VMINMAXI(vmaxi_hu, 16, UH, DO_MAX)
+VMINMAXI(vmaxi_wu, 32, UW, DO_MAX)
+VMINMAXI(vmaxi_du, 64, UD, DO_MAX)
+
+#define DO_VMUH(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->E1(0)) T; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E2(i) = ((T)Vj->E2(i)) * ((T)Vk->E2(i)) >> BIT; \
+ } \
+}
+
+void HELPER(vmuh_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ uint64_t l, h1, h2;
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ muls64(&l, &h1, Vj->D(0), Vk->D(0));
+ muls64(&l, &h2, Vj->D(1), Vk->D(1));
+
+ Vd->D(0) = h1;
+ Vd->D(1) = h2;
+}
+
+DO_VMUH(vmuh_b, 8, H, B, DO_MUH)
+DO_VMUH(vmuh_h, 16, W, H, DO_MUH)
+DO_VMUH(vmuh_w, 32, D, W, DO_MUH)
+
+void HELPER(vmuh_du)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ uint64_t l, h1, h2;
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ mulu64(&l, &h1, Vj->D(0), Vk->D(0));
+ mulu64(&l, &h2, Vj->D(1), Vk->D(1));
+
+ Vd->D(0) = h1;
+ Vd->D(1) = h2;
+}
+
+DO_VMUH(vmuh_bu, 8, UH, UB, DO_MUH)
+DO_VMUH(vmuh_hu, 16, UW, UH, DO_MUH)
+DO_VMUH(vmuh_wu, 32, UD, UW, DO_MUH)
+
+#define DO_MUL(a, b) (a * b)
+
+DO_EVEN(vmulwev_h_b, 16, H, B, DO_MUL)
+DO_EVEN(vmulwev_w_h, 32, W, H, DO_MUL)
+DO_EVEN(vmulwev_d_w, 64, D, W, DO_MUL)
+
+DO_ODD(vmulwod_h_b, 16, H, B, DO_MUL)
+DO_ODD(vmulwod_w_h, 32, W, H, DO_MUL)
+DO_ODD(vmulwod_d_w, 64, D, W, DO_MUL)
+
+DO_EVEN(vmulwev_h_bu, 16, UH, UB, DO_MUL)
+DO_EVEN(vmulwev_w_hu, 32, UW, UH, DO_MUL)
+DO_EVEN(vmulwev_d_wu, 64, UD, UW, DO_MUL)
+
+DO_ODD(vmulwod_h_bu, 16, UH, UB, DO_MUL)
+DO_ODD(vmulwod_w_hu, 32, UW, UH, DO_MUL)
+DO_ODD(vmulwod_d_wu, 64, UD, UW, DO_MUL)
+
+DO_EVEN_U_S(vmulwev_h_bu_b, 16, H, UH, B, UB, DO_MUL)
+DO_EVEN_U_S(vmulwev_w_hu_h, 32, W, UW, H, UH, DO_MUL)
+DO_EVEN_U_S(vmulwev_d_wu_w, 64, D, UD, W, UW, DO_MUL)
+
+DO_ODD_U_S(vmulwod_h_bu_b, 16, H, UH, B, UB, DO_MUL)
+DO_ODD_U_S(vmulwod_w_hu_h, 32, W, UW, H, UH, DO_MUL)
+DO_ODD_U_S(vmulwod_d_wu_w, 64, D, UD, W, UW, DO_MUL)
+
+#define DO_MADD(a, b, c) (a + b * c)
+#define DO_MSUB(a, b, c) (a - b * c)
+
+#define VMADDSUB(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vd->E(i), Vj->E(i) ,Vk->E(i)); \
+ } \
+}
+
+VMADDSUB(vmadd_b, 8, B, DO_MADD)
+VMADDSUB(vmadd_h, 16, H, DO_MADD)
+VMADDSUB(vmadd_w, 32, W, DO_MADD)
+VMADDSUB(vmadd_d, 64, D, DO_MADD)
+VMADDSUB(vmsub_b, 8, B, DO_MSUB)
+VMADDSUB(vmsub_h, 16, H, DO_MSUB)
+VMADDSUB(vmsub_w, 32, W, DO_MSUB)
+VMADDSUB(vmsub_d, 64, D, DO_MSUB)
+
+#define VMADDWEV(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->E1(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i), (TD)Vk->E2(2 * i)); \
+ } \
+}
+
+VMADDWEV(vmaddwev_h_b, 16, H, B, DO_MUL)
+VMADDWEV(vmaddwev_w_h, 32, W, H, DO_MUL)
+VMADDWEV(vmaddwev_d_w, 64, D, W, DO_MUL)
+VMADDWEV(vmaddwev_h_bu, 16, UH, UB, DO_MUL)
+VMADDWEV(vmaddwev_w_hu, 32, UW, UH, DO_MUL)
+VMADDWEV(vmaddwev_d_wu, 64, UD, UW, DO_MUL)
+
+#define VMADDWOD(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->E1(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i + 1), \
+ (TD)Vk->E2(2 * i + 1)); \
+ } \
+}
+
+VMADDWOD(vmaddwod_h_b, 16, H, B, DO_MUL)
+VMADDWOD(vmaddwod_w_h, 32, W, H, DO_MUL)
+VMADDWOD(vmaddwod_d_w, 64, D, W, DO_MUL)
+VMADDWOD(vmaddwod_h_bu, 16, UH, UB, DO_MUL)
+VMADDWOD(vmaddwod_w_hu, 32, UW, UH, DO_MUL)
+VMADDWOD(vmaddwod_d_wu, 64, UD, UW, DO_MUL)
+
+#define VMADDWEV_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->ES1(0)) TS1; \
+ typedef __typeof(Vd->EU1(0)) TU1; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i), \
+ (TS1)Vk->ES2(2 * i)); \
+ } \
+}
+
+VMADDWEV_U_S(vmaddwev_h_bu_b, 16, H, UH, B, UB, DO_MUL)
+VMADDWEV_U_S(vmaddwev_w_hu_h, 32, W, UW, H, UH, DO_MUL)
+VMADDWEV_U_S(vmaddwev_d_wu_w, 64, D, UD, W, UW, DO_MUL)
+
+#define VMADDWOD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->ES1(0)) TS1; \
+ typedef __typeof(Vd->EU1(0)) TU1; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i + 1), \
+ (TS1)Vk->ES2(2 * i + 1)); \
+ } \
+}
+
+VMADDWOD_U_S(vmaddwod_h_bu_b, 16, H, UH, B, UB, DO_MUL)
+VMADDWOD_U_S(vmaddwod_w_hu_h, 32, W, UW, H, UH, DO_MUL)
+VMADDWOD_U_S(vmaddwod_d_wu_w, 64, D, UD, W, UW, DO_MUL)
+
+#define DO_DIVU(N, M) (unlikely(M == 0) ? 0 : N / M)
+#define DO_REMU(N, M) (unlikely(M == 0) ? 0 : N % M)
+#define DO_DIV(N, M) (unlikely(M == 0) ? 0 :\
+ unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M)
+#define DO_REM(N, M) (unlikely(M == 0) ? 0 :\
+ unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M)
+
+#define VDIV(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
+ } \
+}
+
+VDIV(vdiv_b, 8, B, DO_DIV)
+VDIV(vdiv_h, 16, H, DO_DIV)
+VDIV(vdiv_w, 32, W, DO_DIV)
+VDIV(vdiv_d, 64, D, DO_DIV)
+VDIV(vdiv_bu, 8, UB, DO_DIVU)
+VDIV(vdiv_hu, 16, UH, DO_DIVU)
+VDIV(vdiv_wu, 32, UW, DO_DIVU)
+VDIV(vdiv_du, 64, UD, DO_DIVU)
+VDIV(vmod_b, 8, B, DO_REM)
+VDIV(vmod_h, 16, H, DO_REM)
+VDIV(vmod_w, 32, W, DO_REM)
+VDIV(vmod_d, 64, D, DO_REM)
+VDIV(vmod_bu, 8, UB, DO_REMU)
+VDIV(vmod_hu, 16, UH, DO_REMU)
+VDIV(vmod_wu, 32, UW, DO_REMU)
+VDIV(vmod_du, 64, UD, DO_REMU)
+
+#define VSAT_S(NAME, BIT, E) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ typedef __typeof(Vd->E(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : \
+ Vj->E(i) < (TD)~max ? (TD)~max: Vj->E(i); \
+ } \
+}
+
+VSAT_S(vsat_b, 8, B)
+VSAT_S(vsat_h, 16, H)
+VSAT_S(vsat_w, 32, W)
+VSAT_S(vsat_d, 64, D)
+
+#define VSAT_U(NAME, BIT, E) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ typedef __typeof(Vd->E(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : Vj->E(i); \
+ } \
+}
+
+VSAT_U(vsat_bu, 8, UB)
+VSAT_U(vsat_hu, 16, UH)
+VSAT_U(vsat_wu, 32, UW)
+VSAT_U(vsat_du, 64, UD)
+
+#define VEXTH(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = Vj->E2(i + LSX_LEN/BIT); \
+ } \
+}
+
+void HELPER(vexth_q_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ Vd->Q(0) = int128_makes64(Vj->D(1));
+}
+
+void HELPER(vexth_qu_du)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ Vd->Q(0) = int128_make64((uint64_t)Vj->D(1));
+}
+
+VEXTH(vexth_h_b, 16, H, B)
+VEXTH(vexth_w_h, 32, W, H)
+VEXTH(vexth_d_w, 64, D, W)
+VEXTH(vexth_hu_bu, 16, UH, UB)
+VEXTH(vexth_wu_hu, 32, UW, UH)
+VEXTH(vexth_du_wu, 64, UD, UW)
+
+#define DO_SIGNCOV(a, b) (a == 0 ? 0 : a < 0 ? -b : b)
+
+DO_3OP(vsigncov_b, 8, B, DO_SIGNCOV)
+DO_3OP(vsigncov_h, 16, H, DO_SIGNCOV)
+DO_3OP(vsigncov_w, 32, W, DO_SIGNCOV)
+DO_3OP(vsigncov_d, 64, D, DO_SIGNCOV)
+
+static uint64_t do_vmskltz_b(int64_t val)
+{
+ uint64_t m = 0x8080808080808080ULL;
+ uint64_t c = val & m;
+ c |= c << 7;
+ c |= c << 14;
+ c |= c << 28;
+ return c >> 56;
+}
+
+void HELPER(vmskltz_b)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ uint16_t temp = 0;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp = do_vmskltz_b(Vj->D(0));
+ temp |= (do_vmskltz_b(Vj->D(1)) << 8);
+ Vd->D(0) = temp;
+ Vd->D(1) = 0;
+}
+
+static uint64_t do_vmskltz_h(int64_t val)
+{
+ uint64_t m = 0x8000800080008000ULL;
+ uint64_t c = val & m;
+ c |= c << 15;
+ c |= c << 30;
+ return c >> 60;
+}
+
+void HELPER(vmskltz_h)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ uint16_t temp = 0;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp = do_vmskltz_h(Vj->D(0));
+ temp |= (do_vmskltz_h(Vj->D(1)) << 4);
+ Vd->D(0) = temp;
+ Vd->D(1) = 0;
+}
+
+static uint64_t do_vmskltz_w(int64_t val)
+{
+ uint64_t m = 0x8000000080000000ULL;
+ uint64_t c = val & m;
+ c |= c << 31;
+ return c >> 62;
+}
+
+void HELPER(vmskltz_w)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ uint16_t temp = 0;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp = do_vmskltz_w(Vj->D(0));
+ temp |= (do_vmskltz_w(Vj->D(1)) << 2);
+ Vd->D(0) = temp;
+ Vd->D(1) = 0;
+}
+
+static uint64_t do_vmskltz_d(int64_t val)
+{
+ return (uint64_t)val >> 63;
+}
+void HELPER(vmskltz_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ uint16_t temp = 0;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp = do_vmskltz_d(Vj->D(0));
+ temp |= (do_vmskltz_d(Vj->D(1)) << 1);
+ Vd->D(0) = temp;
+ Vd->D(1) = 0;
+}
+
+void HELPER(vmskgez_b)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ uint16_t temp = 0;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp = do_vmskltz_b(Vj->D(0));
+ temp |= (do_vmskltz_b(Vj->D(1)) << 8);
+ Vd->D(0) = (uint16_t)(~temp);
+ Vd->D(1) = 0;
+}
+
+static uint64_t do_vmskez_b(uint64_t a)
+{
+ uint64_t m = 0x7f7f7f7f7f7f7f7fULL;
+ uint64_t c = ~(((a & m) + m) | a | m);
+ c |= c << 7;
+ c |= c << 14;
+ c |= c << 28;
+ return c >> 56;
+}
+
+void HELPER(vmsknz_b)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ uint16_t temp = 0;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp = do_vmskez_b(Vj->D(0));
+ temp |= (do_vmskez_b(Vj->D(1)) << 8);
+ Vd->D(0) = (uint16_t)(~temp);
+ Vd->D(1) = 0;
+}
+
+void HELPER(vnori_b)(void *vd, void *vj, uint64_t imm, uint32_t v)
+{
+ int i;
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+
+ for (i = 0; i < LSX_LEN/8; i++) {
+ Vd->B(i) = ~(Vj->B(i) | (uint8_t)imm);
+ }
+}
+
+#define VSLLWIL(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ typedef __typeof(temp.E1(0)) TD; \
+ \
+ temp.D(0) = 0; \
+ temp.D(1) = 0; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = (TD)Vj->E2(i) << (imm % BIT); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vextl_q_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ Vd->Q(0) = int128_makes64(Vj->D(0));
+}
+
+void HELPER(vextl_qu_du)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ Vd->Q(0) = int128_make64(Vj->D(0));
+}
+
+VSLLWIL(vsllwil_h_b, 16, H, B)
+VSLLWIL(vsllwil_w_h, 32, W, H)
+VSLLWIL(vsllwil_d_w, 64, D, W)
+VSLLWIL(vsllwil_hu_bu, 16, UH, UB)
+VSLLWIL(vsllwil_wu_hu, 32, UW, UH)
+VSLLWIL(vsllwil_du_wu, 64, UD, UW)
+
+#define do_vsrlr(E, T) \
+static T do_vsrlr_ ##E(T s1, int sh) \
+{ \
+ if (sh == 0) { \
+ return s1; \
+ } else { \
+ return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
+ } \
+}
+
+do_vsrlr(B, uint8_t)
+do_vsrlr(H, uint16_t)
+do_vsrlr(W, uint32_t)
+do_vsrlr(D, uint64_t)
+
+#define VSRLR(NAME, BIT, T, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
+ } \
+}
+
+VSRLR(vsrlr_b, 8, uint8_t, B)
+VSRLR(vsrlr_h, 16, uint16_t, H)
+VSRLR(vsrlr_w, 32, uint32_t, W)
+VSRLR(vsrlr_d, 64, uint64_t, D)
+
+#define VSRLRI(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), imm); \
+ } \
+}
+
+VSRLRI(vsrlri_b, 8, B)
+VSRLRI(vsrlri_h, 16, H)
+VSRLRI(vsrlri_w, 32, W)
+VSRLRI(vsrlri_d, 64, D)
+
+#define do_vsrar(E, T) \
+static T do_vsrar_ ##E(T s1, int sh) \
+{ \
+ if (sh == 0) { \
+ return s1; \
+ } else { \
+ return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
+ } \
+}
+
+do_vsrar(B, int8_t)
+do_vsrar(H, int16_t)
+do_vsrar(W, int32_t)
+do_vsrar(D, int64_t)
+
+#define VSRAR(NAME, BIT, T, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = do_vsrar_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
+ } \
+}
+
+VSRAR(vsrar_b, 8, uint8_t, B)
+VSRAR(vsrar_h, 16, uint16_t, H)
+VSRAR(vsrar_w, 32, uint32_t, W)
+VSRAR(vsrar_d, 64, uint64_t, D)
+
+#define VSRARI(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = do_vsrar_ ## E(Vj->E(i), imm); \
+ } \
+}
+
+VSRARI(vsrari_b, 8, B)
+VSRARI(vsrari_h, 16, H)
+VSRARI(vsrari_w, 32, W)
+VSRARI(vsrari_d, 64, D)
+
+#define R_SHIFT(a, b) (a >> b)
+
+#define VSRLN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = R_SHIFT((T)Vj->E2(i),((T)Vk->E2(i)) % BIT); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSRLN(vsrln_b_h, 16, uint16_t, B, H)
+VSRLN(vsrln_h_w, 32, uint32_t, H, W)
+VSRLN(vsrln_w_d, 64, uint64_t, W, D)
+
+#define VSRAN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = R_SHIFT(Vj->E2(i), ((T)Vk->E2(i)) % BIT); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSRAN(vsran_b_h, 16, uint16_t, B, H)
+VSRAN(vsran_h_w, 32, uint32_t, H, W)
+VSRAN(vsran_w_d, 64, uint64_t, W, D)
+
+#define VSRLNI(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i, max; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ temp.D(0) = 0; \
+ temp.D(1) = 0; \
+ max = LSX_LEN/BIT; \
+ for (i = 0; i < max; i++) { \
+ temp.E1(i) = R_SHIFT((T)Vj->E2(i), imm); \
+ temp.E1(i + max) = R_SHIFT((T)Vd->E2(i), imm); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vsrlni_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp.D(0) = 0;
+ temp.D(1) = 0;
+ temp.D(0) = int128_getlo(int128_urshift(Vj->Q(0), imm % 128));
+ temp.D(1) = int128_getlo(int128_urshift(Vd->Q(0), imm % 128));
+ *Vd = temp;
+}
+
+VSRLNI(vsrlni_b_h, 16, uint16_t, B, H)
+VSRLNI(vsrlni_h_w, 32, uint32_t, H, W)
+VSRLNI(vsrlni_w_d, 64, uint64_t, W, D)
+
+#define VSRANI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i, max; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ temp.D(0) = 0; \
+ temp.D(1) = 0; \
+ max = LSX_LEN/BIT; \
+ for (i = 0; i < max; i++) { \
+ temp.E1(i) = R_SHIFT(Vj->E2(i), imm); \
+ temp.E1(i + max) = R_SHIFT(Vd->E2(i), imm); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vsrani_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp.D(0) = 0;
+ temp.D(1) = 0;
+ temp.D(0) = int128_getlo(int128_rshift(Vj->Q(0), imm % 128));
+ temp.D(1) = int128_getlo(int128_rshift(Vd->Q(0), imm % 128));
+ *Vd = temp;
+}
+
+VSRANI(vsrani_b_h, 16, B, H)
+VSRANI(vsrani_h_w, 32, H, W)
+VSRANI(vsrani_w_d, 64, W, D)
+
+#define VSRLRN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_vsrlr_ ## E2(Vj->E2(i), ((T)Vk->E2(i))%BIT); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSRLRN(vsrlrn_b_h, 16, uint16_t, B, H)
+VSRLRN(vsrlrn_h_w, 32, uint32_t, H, W)
+VSRLRN(vsrlrn_w_d, 64, uint64_t, W, D)
+
+#define VSRARN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_vsrar_ ## E2(Vj->E2(i), ((T)Vk->E2(i))%BIT); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSRARN(vsrarn_b_h, 16, uint8_t, B, H)
+VSRARN(vsrarn_h_w, 32, uint16_t, H, W)
+VSRARN(vsrarn_w_d, 64, uint32_t, W, D)
+
+#define VSRLRNI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i, max; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ temp.D(0) = 0; \
+ temp.D(1) = 0; \
+ max = LSX_LEN/BIT; \
+ for (i = 0; i < max; i++) { \
+ temp.E1(i) = do_vsrlr_ ## E2(Vj->E2(i), imm); \
+ temp.E1(i + max) = do_vsrlr_ ## E2(Vd->E2(i), imm); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vsrlrni_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ Int128 r1, r2;
+
+ if (imm == 0) {
+ temp.D(0) = int128_getlo(Vj->Q(0));
+ temp.D(1) = int128_getlo(Vd->Q(0));
+ } else {
+ r1 = int128_and(int128_urshift(Vj->Q(0), (imm -1)), int128_one());
+ r2 = int128_and(int128_urshift(Vd->Q(0), (imm -1)), int128_one());
+
+ temp.D(0) = int128_getlo(int128_add(int128_urshift(Vj->Q(0), imm), r1));
+ temp.D(1) = int128_getlo(int128_add(int128_urshift(Vd->Q(0), imm), r2));
+ }
+ *Vd = temp;
+}
+
+VSRLRNI(vsrlrni_b_h, 16, B, H)
+VSRLRNI(vsrlrni_h_w, 32, H, W)
+VSRLRNI(vsrlrni_w_d, 64, W, D)
+
+#define VSRARNI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i, max; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ temp.D(0) = 0; \
+ temp.D(1) = 0; \
+ max = LSX_LEN/BIT; \
+ for (i = 0; i < max; i++) { \
+ temp.E1(i) = do_vsrar_ ## E2(Vj->E2(i), imm); \
+ temp.E1(i + max) = do_vsrar_ ## E2(Vd->E2(i), imm); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vsrarni_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ Int128 r1, r2;
+
+ if (imm == 0) {
+ temp.D(0) = int128_getlo(Vj->Q(0));
+ temp.D(1) = int128_getlo(Vd->Q(0));
+ } else {
+ r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one());
+ r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one());
+
+ temp.D(0) = int128_getlo(int128_add(int128_rshift(Vj->Q(0), imm), r1));
+ temp.D(1) = int128_getlo(int128_add(int128_rshift(Vd->Q(0), imm), r2));
+ }
+ *Vd = temp;
+}
+
+VSRARNI(vsrarni_b_h, 16, B, H)
+VSRARNI(vsrarni_h_w, 32, H, W)
+VSRARNI(vsrarni_w_d, 64, W, D)
+
+#define SSRLNS(NAME, T1, T2, T3) \
+static T1 do_ssrlns_ ## NAME(T2 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ if (sa == 0) { \
+ shft_res = e2; \
+ } else { \
+ shft_res = (((T1)e2) >> sa); \
+ } \
+ T3 mask; \
+ mask = (1ull << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRLNS(B, uint16_t, int16_t, uint8_t)
+SSRLNS(H, uint32_t, int32_t, uint16_t)
+SSRLNS(W, uint64_t, int64_t, uint32_t)
+
+#define VSSRLN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrlns_ ## E1(Vj->E2(i), (T)Vk->E2(i)% BIT, BIT/2 -1); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRLN(vssrln_b_h, 16, uint16_t, B, H)
+VSSRLN(vssrln_h_w, 32, uint32_t, H, W)
+VSSRLN(vssrln_w_d, 64, uint64_t, W, D)
+
+#define SSRANS(E, T1, T2) \
+static T1 do_ssrans_ ## E(T1 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ if (sa == 0) { \
+ shft_res = e2; \
+ } else { \
+ shft_res = e2 >> sa; \
+ } \
+ T2 mask; \
+ mask = (1ll << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else if (shft_res < -(mask +1)) { \
+ return ~mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRANS(B, int16_t, int8_t)
+SSRANS(H, int32_t, int16_t)
+SSRANS(W, int64_t, int32_t)
+
+#define VSSRAN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrans_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRAN(vssran_b_h, 16, uint16_t, B, H)
+VSSRAN(vssran_h_w, 32, uint32_t, H, W)
+VSSRAN(vssran_w_d, 64, uint64_t, W, D)
+
+#define SSRLNU(E, T1, T2, T3) \
+static T1 do_ssrlnu_ ## E(T3 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ if (sa == 0) { \
+ shft_res = e2; \
+ } else { \
+ shft_res = (((T1)e2) >> sa); \
+ } \
+ T2 mask; \
+ mask = (1ull << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRLNU(B, uint16_t, uint8_t, int16_t)
+SSRLNU(H, uint32_t, uint16_t, int32_t)
+SSRLNU(W, uint64_t, uint32_t, int64_t)
+
+#define VSSRLNU(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrlnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRLNU(vssrln_bu_h, 16, uint16_t, B, H)
+VSSRLNU(vssrln_hu_w, 32, uint32_t, H, W)
+VSSRLNU(vssrln_wu_d, 64, uint64_t, W, D)
+
+#define SSRANU(E, T1, T2, T3) \
+static T1 do_ssranu_ ## E(T3 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ if (sa == 0) { \
+ shft_res = e2; \
+ } else { \
+ shft_res = e2 >> sa; \
+ } \
+ if (e2 < 0) { \
+ shft_res = 0; \
+ } \
+ T2 mask; \
+ mask = (1ull << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRANU(B, uint16_t, uint8_t, int16_t)
+SSRANU(H, uint32_t, uint16_t, int32_t)
+SSRANU(W, uint64_t, uint32_t, int64_t)
+
+#define VSSRANU(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssranu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRANU(vssran_bu_h, 16, uint16_t, B, H)
+VSSRANU(vssran_hu_w, 32, uint32_t, H, W)
+VSSRANU(vssran_wu_d, 64, uint64_t, W, D)
+
+#define VSSRLNI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrlns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrlns_ ## E1(Vd->E2(i), imm, BIT/2 -1);\
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vssrlni_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ Int128 shft_res1, shft_res2, mask;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ if (imm == 0) {
+ shft_res1 = Vj->Q(0);
+ shft_res2 = Vd->Q(0);
+ } else {
+ shft_res1 = int128_urshift(Vj->Q(0), imm);
+ shft_res2 = int128_urshift(Vd->Q(0), imm);
+ }
+ mask = int128_sub(int128_lshift(int128_one(), 63), int128_one());
+
+ if (int128_ult(mask, shft_res1)) {
+ Vd->D(0) = int128_getlo(mask);
+ }else {
+ Vd->D(0) = int128_getlo(shft_res1);
+ }
+
+ if (int128_ult(mask, shft_res2)) {
+ Vd->D(1) = int128_getlo(mask);
+ }else {
+ Vd->D(1) = int128_getlo(shft_res2);
+ }
+}
+
+VSSRLNI(vssrlni_b_h, 16, B, H)
+VSSRLNI(vssrlni_h_w, 32, H, W)
+VSSRLNI(vssrlni_w_d, 64, W, D)
+
+#define VSSRANI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrans_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrans_ ## E1(Vd->E2(i), imm, BIT/2 -1); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vssrani_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ Int128 shft_res1, shft_res2, mask, min;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ if (imm == 0) {
+ shft_res1 = Vj->Q(0);
+ shft_res2 = Vd->Q(0);
+ } else {
+ shft_res1 = int128_rshift(Vj->Q(0), imm);
+ shft_res2 = int128_rshift(Vd->Q(0), imm);
+ }
+ mask = int128_sub(int128_lshift(int128_one(), 63), int128_one());
+ min = int128_lshift(int128_one(), 63);
+
+ if (int128_gt(shft_res1, mask)) {
+ Vd->D(0) = int128_getlo(mask);
+ } else if (int128_lt(shft_res1, int128_neg(min))) {
+ Vd->D(0) = int128_getlo(min);
+ } else {
+ Vd->D(0) = int128_getlo(shft_res1);
+ }
+
+ if (int128_gt(shft_res2, mask)) {
+ Vd->D(1) = int128_getlo(mask);
+ } else if (int128_lt(shft_res2, int128_neg(min))) {
+ Vd->D(1) = int128_getlo(min);
+ } else {
+ Vd->D(1) = int128_getlo(shft_res2);
+ }
+}
+
+VSSRANI(vssrani_b_h, 16, B, H)
+VSSRANI(vssrani_h_w, 32, H, W)
+VSSRANI(vssrani_w_d, 64, W, D)
+
+#define VSSRLNUI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrlnu_ ## E1(Vj->E2(i), imm, BIT/2); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrlnu_ ## E1(Vd->E2(i), imm, BIT/2); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vssrlni_du_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ Int128 shft_res1, shft_res2, mask;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ if (imm == 0) {
+ shft_res1 = Vj->Q(0);
+ shft_res2 = Vd->Q(0);
+ } else {
+ shft_res1 = int128_urshift(Vj->Q(0), imm);
+ shft_res2 = int128_urshift(Vd->Q(0), imm);
+ }
+ mask = int128_sub(int128_lshift(int128_one(), 64), int128_one());
+
+ if (int128_ult(mask, shft_res1)) {
+ Vd->D(0) = int128_getlo(mask);
+ }else {
+ Vd->D(0) = int128_getlo(shft_res1);
+ }
+
+ if (int128_ult(mask, shft_res2)) {
+ Vd->D(1) = int128_getlo(mask);
+ }else {
+ Vd->D(1) = int128_getlo(shft_res2);
+ }
+}
+
+VSSRLNUI(vssrlni_bu_h, 16, B, H)
+VSSRLNUI(vssrlni_hu_w, 32, H, W)
+VSSRLNUI(vssrlni_wu_d, 64, W, D)
+
+#define VSSRANUI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssranu_ ## E1(Vj->E2(i), imm, BIT/2); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssranu_ ## E1(Vd->E2(i), imm, BIT/2); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vssrani_du_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ Int128 shft_res1, shft_res2, mask;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ if (imm == 0) {
+ shft_res1 = Vj->Q(0);
+ shft_res2 = Vd->Q(0);
+ } else {
+ shft_res1 = int128_rshift(Vj->Q(0), imm);
+ shft_res2 = int128_rshift(Vd->Q(0), imm);
+ }
+
+ if (int128_lt(Vj->Q(0), int128_zero())) {
+ shft_res1 = int128_zero();
+ }
+
+ if (int128_lt(Vd->Q(0), int128_zero())) {
+ shft_res2 = int128_zero();
+ }
+
+ mask = int128_sub(int128_lshift(int128_one(), 64), int128_one());
+
+ if (int128_ult(mask, shft_res1)) {
+ Vd->D(0) = int128_getlo(mask);
+ }else {
+ Vd->D(0) = int128_getlo(shft_res1);
+ }
+
+ if (int128_ult(mask, shft_res2)) {
+ Vd->D(1) = int128_getlo(mask);
+ }else {
+ Vd->D(1) = int128_getlo(shft_res2);
+ }
+}
+
+VSSRANUI(vssrani_bu_h, 16, B, H)
+VSSRANUI(vssrani_hu_w, 32, H, W)
+VSSRANUI(vssrani_wu_d, 64, W, D)
+
+#define SSRLRNS(E1, E2, T1, T2, T3) \
+static T1 do_ssrlrns_ ## E1(T2 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ \
+ shft_res = do_vsrlr_ ## E2(e2, sa); \
+ T1 mask; \
+ mask = (1ull << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRLRNS(B, H, uint16_t, int16_t, uint8_t)
+SSRLRNS(H, W, uint32_t, int32_t, uint16_t)
+SSRLRNS(W, D, uint64_t, int64_t, uint32_t)
+
+#define VSSRLRN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrlrns_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRLRN(vssrlrn_b_h, 16, uint16_t, B, H)
+VSSRLRN(vssrlrn_h_w, 32, uint32_t, H, W)
+VSSRLRN(vssrlrn_w_d, 64, uint64_t, W, D)
+
+#define SSRARNS(E1, E2, T1, T2) \
+static T1 do_ssrarns_ ## E1(T1 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ \
+ shft_res = do_vsrar_ ## E2(e2, sa); \
+ T2 mask; \
+ mask = (1ll << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else if (shft_res < -(mask +1)) { \
+ return ~mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRARNS(B, H, int16_t, int8_t)
+SSRARNS(H, W, int32_t, int16_t)
+SSRARNS(W, D, int64_t, int32_t)
+
+#define VSSRARN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrarns_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRARN(vssrarn_b_h, 16, uint16_t, B, H)
+VSSRARN(vssrarn_h_w, 32, uint32_t, H, W)
+VSSRARN(vssrarn_w_d, 64, uint64_t, W, D)
+
+#define SSRLRNU(E1, E2, T1, T2, T3) \
+static T1 do_ssrlrnu_ ## E1(T3 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ \
+ shft_res = do_vsrlr_ ## E2(e2, sa); \
+ \
+ T2 mask; \
+ mask = (1ull << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRLRNU(B, H, uint16_t, uint8_t, int16_t)
+SSRLRNU(H, W, uint32_t, uint16_t, int32_t)
+SSRLRNU(W, D, uint64_t, uint32_t, int64_t)
+
+#define VSSRLRNU(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrlrnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRLRNU(vssrlrn_bu_h, 16, uint16_t, B, H)
+VSSRLRNU(vssrlrn_hu_w, 32, uint32_t, H, W)
+VSSRLRNU(vssrlrn_wu_d, 64, uint64_t, W, D)
+
+#define SSRARNU(E1, E2, T1, T2, T3) \
+static T1 do_ssrarnu_ ## E1(T3 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ \
+ if (e2 < 0) { \
+ shft_res = 0; \
+ } else { \
+ shft_res = do_vsrar_ ## E2(e2, sa); \
+ } \
+ T2 mask; \
+ mask = (1ull << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRARNU(B, H, uint16_t, uint8_t, int16_t)
+SSRARNU(H, W, uint32_t, uint16_t, int32_t)
+SSRARNU(W, D, uint64_t, uint32_t, int64_t)
+
+#define VSSRARNU(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrarnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRARNU(vssrarn_bu_h, 16, uint16_t, B, H)
+VSSRARNU(vssrarn_hu_w, 32, uint32_t, H, W)
+VSSRARNU(vssrarn_wu_d, 64, uint64_t, W, D)
+
+#define VSSRLRNI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrlrns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrlrns_ ## E1(Vd->E2(i), imm, BIT/2 -1);\
+ } \
+ *Vd = temp; \
+}
+
+#define VSSRLRNI_Q(NAME, sh) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ Int128 shft_res1, shft_res2, mask, r1, r2; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ if (imm == 0) { \
+ shft_res1 = Vj->Q(0); \
+ shft_res2 = Vd->Q(0); \
+ } else { \
+ r1 = int128_and(int128_urshift(Vj->Q(0), (imm -1)), int128_one()); \
+ r2 = int128_and(int128_urshift(Vd->Q(0), (imm -1)), int128_one()); \
+ \
+ shft_res1 = (int128_add(int128_urshift(Vj->Q(0), imm), r1)); \
+ shft_res2 = (int128_add(int128_urshift(Vd->Q(0), imm), r2)); \
+ } \
+ \
+ mask = int128_sub(int128_lshift(int128_one(), sh), int128_one()); \
+ \
+ if (int128_ult(mask, shft_res1)) { \
+ Vd->D(0) = int128_getlo(mask); \
+ }else { \
+ Vd->D(0) = int128_getlo(shft_res1); \
+ } \
+ \
+ if (int128_ult(mask, shft_res2)) { \
+ Vd->D(1) = int128_getlo(mask); \
+ }else { \
+ Vd->D(1) = int128_getlo(shft_res2); \
+ } \
+}
+
+VSSRLRNI(vssrlrni_b_h, 16, B, H)
+VSSRLRNI(vssrlrni_h_w, 32, H, W)
+VSSRLRNI(vssrlrni_w_d, 64, W, D)
+VSSRLRNI_Q(vssrlrni_d_q, 63)
+
+#define VSSRARNI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrarns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrarns_ ## E1(Vd->E2(i), imm, BIT/2 -1); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vssrarni_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ Int128 shft_res1, shft_res2, mask1, mask2, r1, r2;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ if (imm == 0) {
+ shft_res1 = Vj->Q(0);
+ shft_res2 = Vd->Q(0);
+ } else {
+ r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one());
+ r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one());
+
+ shft_res1 = int128_add(int128_rshift(Vj->Q(0), imm), r1);
+ shft_res2 = int128_add(int128_rshift(Vd->Q(0), imm), r2);
+ }
+
+ mask1 = int128_sub(int128_lshift(int128_one(), 63), int128_one());
+ mask2 = int128_lshift(int128_one(), 63);
+
+ if (int128_gt(shft_res1, mask1)) {
+ Vd->D(0) = int128_getlo(mask1);
+ } else if (int128_lt(shft_res1, int128_neg(mask2))) {
+ Vd->D(0) = int128_getlo(mask2);
+ } else {
+ Vd->D(0) = int128_getlo(shft_res1);
+ }
+
+ if (int128_gt(shft_res2, mask1)) {
+ Vd->D(1) = int128_getlo(mask1);
+ } else if (int128_lt(shft_res2, int128_neg(mask2))) {
+ Vd->D(1) = int128_getlo(mask2);
+ } else {
+ Vd->D(1) = int128_getlo(shft_res2);
+ }
+}
+
+VSSRARNI(vssrarni_b_h, 16, B, H)
+VSSRARNI(vssrarni_h_w, 32, H, W)
+VSSRARNI(vssrarni_w_d, 64, W, D)
+
+#define VSSRLRNUI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrlrnu_ ## E1(Vj->E2(i), imm, BIT/2); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrlrnu_ ## E1(Vd->E2(i), imm, BIT/2); \
+ } \
+ *Vd = temp; \
+}
+
+VSSRLRNUI(vssrlrni_bu_h, 16, B, H)
+VSSRLRNUI(vssrlrni_hu_w, 32, H, W)
+VSSRLRNUI(vssrlrni_wu_d, 64, W, D)
+VSSRLRNI_Q(vssrlrni_du_q, 64)
+
+#define VSSRARNUI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrarnu_ ## E1(Vj->E2(i), imm, BIT/2); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrarnu_ ## E1(Vd->E2(i), imm, BIT/2); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vssrarni_du_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ Int128 shft_res1, shft_res2, mask1, mask2, r1, r2;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ if (imm == 0) {
+ shft_res1 = Vj->Q(0);
+ shft_res2 = Vd->Q(0);
+ } else {
+ r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one());
+ r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one());
+
+ shft_res1 = int128_add(int128_rshift(Vj->Q(0), imm), r1);
+ shft_res2 = int128_add(int128_rshift(Vd->Q(0), imm), r2);
+ }
+
+ if (int128_lt(Vj->Q(0), int128_zero())) {
+ shft_res1 = int128_zero();
+ }
+ if (int128_lt(Vd->Q(0), int128_zero())) {
+ shft_res2 = int128_zero();
+ }
+
+ mask1 = int128_sub(int128_lshift(int128_one(), 64), int128_one());
+ mask2 = int128_lshift(int128_one(), 64);
+
+ if (int128_gt(shft_res1, mask1)) {
+ Vd->D(0) = int128_getlo(mask1);
+ } else if (int128_lt(shft_res1, int128_neg(mask2))) {
+ Vd->D(0) = int128_getlo(mask2);
+ } else {
+ Vd->D(0) = int128_getlo(shft_res1);
+ }
+
+ if (int128_gt(shft_res2, mask1)) {
+ Vd->D(1) = int128_getlo(mask1);
+ } else if (int128_lt(shft_res2, int128_neg(mask2))) {
+ Vd->D(1) = int128_getlo(mask2);
+ } else {
+ Vd->D(1) = int128_getlo(shft_res2);
+ }
+}
+
+VSSRARNUI(vssrarni_bu_h, 16, B, H)
+VSSRARNUI(vssrarni_hu_w, 32, H, W)
+VSSRARNUI(vssrarni_wu_d, 64, W, D)
+
+#define DO_2OP(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) \
+ { \
+ Vd->E(i) = DO_OP(Vj->E(i)); \
+ } \
+}
+
+#define DO_CLO_B(N) (clz32(~N & 0xff) - 24)
+#define DO_CLO_H(N) (clz32(~N & 0xffff) - 16)
+#define DO_CLO_W(N) (clz32(~N))
+#define DO_CLO_D(N) (clz64(~N))
+#define DO_CLZ_B(N) (clz32(N) - 24)
+#define DO_CLZ_H(N) (clz32(N) - 16)
+#define DO_CLZ_W(N) (clz32(N))
+#define DO_CLZ_D(N) (clz64(N))
+
+DO_2OP(vclo_b, 8, UB, DO_CLO_B)
+DO_2OP(vclo_h, 16, UH, DO_CLO_H)
+DO_2OP(vclo_w, 32, UW, DO_CLO_W)
+DO_2OP(vclo_d, 64, UD, DO_CLO_D)
+DO_2OP(vclz_b, 8, UB, DO_CLZ_B)
+DO_2OP(vclz_h, 16, UH, DO_CLZ_H)
+DO_2OP(vclz_w, 32, UW, DO_CLZ_W)
+DO_2OP(vclz_d, 64, UD, DO_CLZ_D)
+
+#define VPCNT(NAME, BIT, E, FN) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) \
+ { \
+ Vd->E(i) = FN(Vj->E(i)); \
+ } \
+}
+
+VPCNT(vpcnt_b, 8, UB, ctpop8)
+VPCNT(vpcnt_h, 16, UH, ctpop16)
+VPCNT(vpcnt_w, 32, UW, ctpop32)
+VPCNT(vpcnt_d, 64, UD, ctpop64)
+
+#define DO_BITCLR(a, bit) (a & ~(1ull << bit))
+#define DO_BITSET(a, bit) (a | 1ull << bit)
+#define DO_BITREV(a, bit) (a ^ (1ull << bit))
+
+#define DO_BIT(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)%BIT); \
+ } \
+}
+
+DO_BIT(vbitclr_b, 8, UB, DO_BITCLR)
+DO_BIT(vbitclr_h, 16, UH, DO_BITCLR)
+DO_BIT(vbitclr_w, 32, UW, DO_BITCLR)
+DO_BIT(vbitclr_d, 64, UD, DO_BITCLR)
+DO_BIT(vbitset_b, 8, UB, DO_BITSET)
+DO_BIT(vbitset_h, 16, UH, DO_BITSET)
+DO_BIT(vbitset_w, 32, UW, DO_BITSET)
+DO_BIT(vbitset_d, 64, UD, DO_BITSET)
+DO_BIT(vbitrev_b, 8, UB, DO_BITREV)
+DO_BIT(vbitrev_h, 16, UH, DO_BITREV)
+DO_BIT(vbitrev_w, 32, UW, DO_BITREV)
+DO_BIT(vbitrev_d, 64, UD, DO_BITREV)
+
+#define DO_BITI(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), imm); \
+ } \
+}
+
+DO_BITI(vbitclri_b, 8, UB, DO_BITCLR)
+DO_BITI(vbitclri_h, 16, UH, DO_BITCLR)
+DO_BITI(vbitclri_w, 32, UW, DO_BITCLR)
+DO_BITI(vbitclri_d, 64, UD, DO_BITCLR)
+DO_BITI(vbitseti_b, 8, UB, DO_BITSET)
+DO_BITI(vbitseti_h, 16, UH, DO_BITSET)
+DO_BITI(vbitseti_w, 32, UW, DO_BITSET)
+DO_BITI(vbitseti_d, 64, UD, DO_BITSET)
+DO_BITI(vbitrevi_b, 8, UB, DO_BITREV)
+DO_BITI(vbitrevi_h, 16, UH, DO_BITREV)
+DO_BITI(vbitrevi_w, 32, UW, DO_BITREV)
+DO_BITI(vbitrevi_d, 64, UD, DO_BITREV)
+
+#define VFRSTP(NAME, BIT, MASK, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i, m; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ if (Vj->E(i) < 0) { \
+ break; \
+ } \
+ } \
+ m = Vk->E(0) & MASK; \
+ Vd->E(m) = i; \
+}
+
+VFRSTP(vfrstp_b, 8, 0xf, B)
+VFRSTP(vfrstp_h, 16, 0x7, H)
+
+#define VFRSTPI(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i, m; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ if (Vj->E(i) < 0) { \
+ break; \
+ } \
+ } \
+ m = imm % (LSX_LEN/BIT); \
+ Vd->E(m) = i; \
+}
+
+VFRSTPI(vfrstpi_b, 8, B)
+VFRSTPI(vfrstpi_h, 16, H)
+
+static void vec_update_fcsr0_mask(CPULoongArchState *env,
+ uintptr_t pc, int mask)
+{
+ int flags = get_float_exception_flags(&env->fp_status);
+
+ set_float_exception_flags(0, &env->fp_status);
+
+ flags &= ~mask;
+
+ if (flags) {
+ flags = ieee_ex_to_loongarch(flags);
+ UPDATE_FP_CAUSE(env->fcsr0, flags);
+ }
+
+ if (GET_FP_ENABLES(env->fcsr0) & flags) {
+ do_raise_exception(env, EXCCODE_FPE, pc);
+ } else {
+ UPDATE_FP_FLAGS(env->fcsr0, flags);
+ }
+}
+
+static void vec_update_fcsr0(CPULoongArchState *env, uintptr_t pc)
+{
+ vec_update_fcsr0_mask(env, pc, 0);
+}
+
+static inline void vec_clear_cause(CPULoongArchState *env)
+{
+ SET_FP_CAUSE(env->fcsr0, 0);
+}
+
+#define DO_3OP_F(NAME, BIT, E, FN) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = FN(Vj->E(i), Vk->E(i), &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ } \
+}
+
+DO_3OP_F(vfadd_s, 32, UW, float32_add)
+DO_3OP_F(vfadd_d, 64, UD, float64_add)
+DO_3OP_F(vfsub_s, 32, UW, float32_sub)
+DO_3OP_F(vfsub_d, 64, UD, float64_sub)
+DO_3OP_F(vfmul_s, 32, UW, float32_mul)
+DO_3OP_F(vfmul_d, 64, UD, float64_mul)
+DO_3OP_F(vfdiv_s, 32, UW, float32_div)
+DO_3OP_F(vfdiv_d, 64, UD, float64_div)
+DO_3OP_F(vfmax_s, 32, UW, float32_maxnum)
+DO_3OP_F(vfmax_d, 64, UD, float64_maxnum)
+DO_3OP_F(vfmin_s, 32, UW, float32_minnum)
+DO_3OP_F(vfmin_d, 64, UD, float64_minnum)
+DO_3OP_F(vfmaxa_s, 32, UW, float32_maxnummag)
+DO_3OP_F(vfmaxa_d, 64, UD, float64_maxnummag)
+DO_3OP_F(vfmina_s, 32, UW, float32_minnummag)
+DO_3OP_F(vfmina_d, 64, UD, float64_minnummag)
+
+#define DO_4OP_F(NAME, BIT, E, FN, flags) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk, uint32_t va) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ VReg *Va = &(env->fpr[va].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = FN(Vj->E(i), Vk->E(i), Va->E(i), flags, &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ } \
+}
+
+DO_4OP_F(vfmadd_s, 32, UW, float32_muladd, 0)
+DO_4OP_F(vfmadd_d, 64, UD, float64_muladd, 0)
+DO_4OP_F(vfmsub_s, 32, UW, float32_muladd, float_muladd_negate_c)
+DO_4OP_F(vfmsub_d, 64, UD, float64_muladd, float_muladd_negate_c)
+DO_4OP_F(vfnmadd_s, 32, UW, float32_muladd, float_muladd_negate_result)
+DO_4OP_F(vfnmadd_d, 64, UD, float64_muladd, float_muladd_negate_result)
+DO_4OP_F(vfnmsub_s, 32, UW, float32_muladd,
+ float_muladd_negate_c | float_muladd_negate_result)
+DO_4OP_F(vfnmsub_d, 64, UD, float64_muladd,
+ float_muladd_negate_c | float_muladd_negate_result)
+
+#define DO_2OP_F(NAME, BIT, E, FN) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = FN(env, Vj->E(i)); \
+ } \
+}
+
+#define FLOGB(BIT, T) \
+static T do_flogb_## BIT(CPULoongArchState *env, T fj) \
+{ \
+ T fp, fd; \
+ float_status *status = &env->fp_status; \
+ FloatRoundMode old_mode = get_float_rounding_mode(status); \
+ \
+ set_float_rounding_mode(float_round_down, status); \
+ fp = float ## BIT ##_log2(fj, status); \
+ fd = float ## BIT ##_round_to_int(fp, status); \
+ set_float_rounding_mode(old_mode, status); \
+ vec_update_fcsr0_mask(env, GETPC(), float_flag_inexact); \
+ return fd; \
+}
+
+FLOGB(32, uint32_t)
+FLOGB(64, uint64_t)
+
+#define FCLASS(NAME, BIT, E, FN) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = FN(env, Vj->E(i)); \
+ } \
+}
+
+FCLASS(vfclass_s, 32, UW, helper_fclass_s)
+FCLASS(vfclass_d, 64, UD, helper_fclass_d)
+
+#define FSQRT(BIT, T) \
+static T do_fsqrt_## BIT(CPULoongArchState *env, T fj) \
+{ \
+ T fd; \
+ fd = float ## BIT ##_sqrt(fj, &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ return fd; \
+}
+
+FSQRT(32, uint32_t)
+FSQRT(64, uint64_t)
+
+#define FRECIP(BIT, T) \
+static T do_frecip_## BIT(CPULoongArchState *env, T fj) \
+{ \
+ T fd; \
+ fd = float ## BIT ##_div(float ## BIT ##_one, fj, &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ return fd; \
+}
+
+FRECIP(32, uint32_t)
+FRECIP(64, uint64_t)
+
+#define FRSQRT(BIT, T) \
+static T do_frsqrt_## BIT(CPULoongArchState *env, T fj) \
+{ \
+ T fd, fp; \
+ fp = float ## BIT ##_sqrt(fj, &env->fp_status); \
+ fd = float ## BIT ##_div(float ## BIT ##_one, fp, &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ return fd; \
+}
+
+FRSQRT(32, uint32_t)
+FRSQRT(64, uint64_t)
+
+DO_2OP_F(vflogb_s, 32, UW, do_flogb_32)
+DO_2OP_F(vflogb_d, 64, UD, do_flogb_64)
+DO_2OP_F(vfsqrt_s, 32, UW, do_fsqrt_32)
+DO_2OP_F(vfsqrt_d, 64, UD, do_fsqrt_64)
+DO_2OP_F(vfrecip_s, 32, UW, do_frecip_32)
+DO_2OP_F(vfrecip_d, 64, UD, do_frecip_64)
+DO_2OP_F(vfrsqrt_s, 32, UW, do_frsqrt_32)
+DO_2OP_F(vfrsqrt_d, 64, UD, do_frsqrt_64)
+
+static uint32_t float16_cvt_float32(uint16_t h, float_status *status)
+{
+ return float16_to_float32(h, true, status);
+}
+static uint64_t float32_cvt_float64(uint32_t s, float_status *status)
+{
+ return float32_to_float64(s, status);
+}
+
+static uint16_t float32_cvt_float16(uint32_t s, float_status *status)
+{
+ return float32_to_float16(s, true, status);
+}
+static uint32_t float64_cvt_float32(uint64_t d, float_status *status)
+{
+ return float64_to_float32(d, status);
+}
+
+void HELPER(vfcvtl_s_h)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < LSX_LEN/32; i++) {
+ temp.UW(i) = float16_cvt_float32(Vj->UH(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vfcvtl_d_s)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < LSX_LEN/64; i++) {
+ temp.UD(i) = float32_cvt_float64(Vj->UW(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vfcvth_s_h)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < LSX_LEN/32; i++) {
+ temp.UW(i) = float16_cvt_float32(Vj->UH(i + 4), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vfcvth_d_s)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < LSX_LEN/64; i++) {
+ temp.UD(i) = float32_cvt_float64(Vj->UW(i + 2), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vfcvt_h_s)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ vec_clear_cause(env);
+ for(i = 0; i < LSX_LEN/32; i++) {
+ temp.UH(i + 4) = float32_cvt_float16(Vj->UW(i), &env->fp_status);
+ temp.UH(i) = float32_cvt_float16(Vk->UW(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vfcvt_s_d)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ vec_clear_cause(env);
+ for(i = 0; i < LSX_LEN/64; i++) {
+ temp.UW(i + 2) = float64_cvt_float32(Vj->UD(i), &env->fp_status);
+ temp.UW(i) = float64_cvt_float32(Vk->UD(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vfrint_s)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < 4; i++) {
+ Vd->W(i) = float32_round_to_int(Vj->UW(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+}
+
+void HELPER(vfrint_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < 2; i++) {
+ Vd->D(i) = float64_round_to_int(Vj->UD(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+}
+
+#define FCVT_2OP(NAME, BIT, E, MODE) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \
+ set_float_rounding_mode(MODE, &env->fp_status); \
+ Vd->E(i) = float## BIT ## _round_to_int(Vj->E(i), &env->fp_status); \
+ set_float_rounding_mode(old_mode, &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ } \
+}
+
+FCVT_2OP(vfrintrne_s, 32, UW, float_round_nearest_even)
+FCVT_2OP(vfrintrne_d, 64, UD, float_round_nearest_even)
+FCVT_2OP(vfrintrz_s, 32, UW, float_round_to_zero)
+FCVT_2OP(vfrintrz_d, 64, UD, float_round_to_zero)
+FCVT_2OP(vfrintrp_s, 32, UW, float_round_up)
+FCVT_2OP(vfrintrp_d, 64, UD, float_round_up)
+FCVT_2OP(vfrintrm_s, 32, UW, float_round_down)
+FCVT_2OP(vfrintrm_d, 64, UD, float_round_down)
+
+#define FTINT(NAME, FMT1, FMT2, T1, T2, MODE) \
+static T2 do_ftint ## NAME(CPULoongArchState *env, T1 fj) \
+{ \
+ T2 fd; \
+ FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \
+ \
+ set_float_rounding_mode(MODE, &env->fp_status); \
+ fd = do_## FMT1 ##_to_## FMT2(env, fj); \
+ set_float_rounding_mode(old_mode, &env->fp_status); \
+ return fd; \
+}
+
+#define DO_FTINT(FMT1, FMT2, T1, T2) \
+static T2 do_## FMT1 ##_to_## FMT2(CPULoongArchState *env, T1 fj) \
+{ \
+ T2 fd; \
+ \
+ fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \
+ if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { \
+ if (FMT1 ##_is_any_nan(fj)) { \
+ fd = 0; \
+ } \
+ } \
+ vec_update_fcsr0(env, GETPC()); \
+ return fd; \
+}
+
+DO_FTINT(float32, int32, uint32_t, uint32_t)
+DO_FTINT(float64, int64, uint64_t, uint64_t)
+DO_FTINT(float32, uint32, uint32_t, uint32_t)
+DO_FTINT(float64, uint64, uint64_t, uint64_t)
+DO_FTINT(float64, int32, uint64_t, uint32_t)
+DO_FTINT(float32, int64, uint32_t, uint64_t)
+
+FTINT(rne_w_s, float32, int32, uint32_t, uint32_t, float_round_nearest_even)
+FTINT(rne_l_d, float64, int64, uint64_t, uint64_t, float_round_nearest_even)
+FTINT(rp_w_s, float32, int32, uint32_t, uint32_t, float_round_up)
+FTINT(rp_l_d, float64, int64, uint64_t, uint64_t, float_round_up)
+FTINT(rz_w_s, float32, int32, uint32_t, uint32_t, float_round_to_zero)
+FTINT(rz_l_d, float64, int64, uint64_t, uint64_t, float_round_to_zero)
+FTINT(rm_w_s, float32, int32, uint32_t, uint32_t, float_round_down)
+FTINT(rm_l_d, float64, int64, uint64_t, uint64_t, float_round_down)
+
+DO_2OP_F(vftintrne_w_s, 32, UW, do_ftintrne_w_s)
+DO_2OP_F(vftintrne_l_d, 64, UD, do_ftintrne_l_d)
+DO_2OP_F(vftintrp_w_s, 32, UW, do_ftintrp_w_s)
+DO_2OP_F(vftintrp_l_d, 64, UD, do_ftintrp_l_d)
+DO_2OP_F(vftintrz_w_s, 32, UW, do_ftintrz_w_s)
+DO_2OP_F(vftintrz_l_d, 64, UD, do_ftintrz_l_d)
+DO_2OP_F(vftintrm_w_s, 32, UW, do_ftintrm_w_s)
+DO_2OP_F(vftintrm_l_d, 64, UD, do_ftintrm_l_d)
+DO_2OP_F(vftint_w_s, 32, UW, do_float32_to_int32)
+DO_2OP_F(vftint_l_d, 64, UD, do_float64_to_int64)
+
+FTINT(rz_wu_s, float32, uint32, uint32_t, uint32_t, float_round_to_zero)
+FTINT(rz_lu_d, float64, uint64, uint64_t, uint64_t, float_round_to_zero)
+
+DO_2OP_F(vftintrz_wu_s, 32, UW, do_ftintrz_wu_s)
+DO_2OP_F(vftintrz_lu_d, 64, UD, do_ftintrz_lu_d)
+DO_2OP_F(vftint_wu_s, 32, UW, do_float32_to_uint32)
+DO_2OP_F(vftint_lu_d, 64, UD, do_float64_to_uint64)
+
+FTINT(rm_w_d, float64, int32, uint64_t, uint32_t, float_round_down)
+FTINT(rp_w_d, float64, int32, uint64_t, uint32_t, float_round_up)
+FTINT(rz_w_d, float64, int32, uint64_t, uint32_t, float_round_to_zero)
+FTINT(rne_w_d, float64, int32, uint64_t, uint32_t, float_round_nearest_even)
+
+#define FTINT_W_D(NAME, FN) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < 2; i++) { \
+ temp.W(i + 2) = FN(env, Vj->UD(i)); \
+ temp.W(i) = FN(env, Vk->UD(i)); \
+ } \
+ *Vd = temp; \
+}
+
+FTINT_W_D(vftint_w_d, do_float64_to_int32)
+FTINT_W_D(vftintrm_w_d, do_ftintrm_w_d)
+FTINT_W_D(vftintrp_w_d, do_ftintrp_w_d)
+FTINT_W_D(vftintrz_w_d, do_ftintrz_w_d)
+FTINT_W_D(vftintrne_w_d, do_ftintrne_w_d)
+
+FTINT(rml_l_s, float32, int64, uint32_t, uint64_t, float_round_down)
+FTINT(rpl_l_s, float32, int64, uint32_t, uint64_t, float_round_up)
+FTINT(rzl_l_s, float32, int64, uint32_t, uint64_t, float_round_to_zero)
+FTINT(rnel_l_s, float32, int64, uint32_t, uint64_t, float_round_nearest_even)
+FTINT(rmh_l_s, float32, int64, uint32_t, uint64_t, float_round_down)
+FTINT(rph_l_s, float32, int64, uint32_t, uint64_t, float_round_up)
+FTINT(rzh_l_s, float32, int64, uint32_t, uint64_t, float_round_to_zero)
+FTINT(rneh_l_s, float32, int64, uint32_t, uint64_t, float_round_nearest_even)
+
+#define FTINTL_L_S(NAME, FN) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < 2; i++) { \
+ temp.D(i) = FN(env, Vj->UW(i)); \
+ } \
+ *Vd = temp; \
+}
+
+FTINTL_L_S(vftintl_l_s, do_float32_to_int64)
+FTINTL_L_S(vftintrml_l_s, do_ftintrml_l_s)
+FTINTL_L_S(vftintrpl_l_s, do_ftintrpl_l_s)
+FTINTL_L_S(vftintrzl_l_s, do_ftintrzl_l_s)
+FTINTL_L_S(vftintrnel_l_s, do_ftintrnel_l_s)
+
+#define FTINTH_L_S(NAME, FN) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < 2; i++) { \
+ temp.D(i) = FN(env, Vj->UW(i + 2)); \
+ } \
+ *Vd = temp; \
+}
+
+FTINTH_L_S(vftinth_l_s, do_float32_to_int64)
+FTINTH_L_S(vftintrmh_l_s, do_ftintrmh_l_s)
+FTINTH_L_S(vftintrph_l_s, do_ftintrph_l_s)
+FTINTH_L_S(vftintrzh_l_s, do_ftintrzh_l_s)
+FTINTH_L_S(vftintrneh_l_s, do_ftintrneh_l_s)
+
+#define FFINT(NAME, FMT1, FMT2, T1, T2) \
+static T2 do_ffint_ ## NAME(CPULoongArchState *env, T1 fj) \
+{ \
+ T2 fd; \
+ \
+ fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ return fd; \
+}
+
+FFINT(s_w, int32, float32, int32_t, uint32_t)
+FFINT(d_l, int64, float64, int64_t, uint64_t)
+FFINT(s_wu, uint32, float32, uint32_t, uint32_t)
+FFINT(d_lu, uint64, float64, uint64_t, uint64_t)
+
+DO_2OP_F(vffint_s_w, 32, W, do_ffint_s_w)
+DO_2OP_F(vffint_d_l, 64, D, do_ffint_d_l)
+DO_2OP_F(vffint_s_wu, 32, UW, do_ffint_s_wu)
+DO_2OP_F(vffint_d_lu, 64, UD, do_ffint_d_lu)
+
+void HELPER(vffintl_d_w)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < 2; i++) {
+ temp.D(i) = int32_to_float64(Vj->W(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vffinth_d_w)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < 2; i++) {
+ temp.D(i) = int32_to_float64(Vj->W(i + 2), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vffint_s_l)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < 2; i++) {
+ temp.W(i + 2) = int64_to_float32(Vj->D(i), &env->fp_status);
+ temp.W(i) = int64_to_float32(Vk->D(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+#define VSEQ(a, b) (a == b ? -1 : 0)
+#define VSLE(a, b) (a <= b ? -1 : 0)
+#define VSLT(a, b) (a < b ? -1 : 0)
+
+#define VCMPI(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ typedef __typeof(Vd->E(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
+ } \
+}
+
+VCMPI(vseqi_b, 8, B, VSEQ)
+VCMPI(vseqi_h, 16, H, VSEQ)
+VCMPI(vseqi_w, 32, W, VSEQ)
+VCMPI(vseqi_d, 64, D, VSEQ)
+VCMPI(vslei_b, 8, B, VSLE)
+VCMPI(vslei_h, 16, H, VSLE)
+VCMPI(vslei_w, 32, W, VSLE)
+VCMPI(vslei_d, 64, D, VSLE)
+VCMPI(vslei_bu, 8, UB, VSLE)
+VCMPI(vslei_hu, 16, UH, VSLE)
+VCMPI(vslei_wu, 32, UW, VSLE)
+VCMPI(vslei_du, 64, UD, VSLE)
+VCMPI(vslti_b, 8, B, VSLT)
+VCMPI(vslti_h, 16, H, VSLT)
+VCMPI(vslti_w, 32, W, VSLT)
+VCMPI(vslti_d, 64, D, VSLT)
+VCMPI(vslti_bu, 8, UB, VSLT)
+VCMPI(vslti_hu, 16, UH, VSLT)
+VCMPI(vslti_wu, 32, UW, VSLT)
+VCMPI(vslti_du, 64, UD, VSLT)
+
+static uint64_t vfcmp_common(CPULoongArchState *env,
+ FloatRelation cmp, uint32_t flags)
+{
+ uint64_t ret = 0;
+
+ switch (cmp) {
+ case float_relation_less:
+ ret = (flags & FCMP_LT);
+ break;
+ case float_relation_equal:
+ ret = (flags & FCMP_EQ);
+ break;
+ case float_relation_greater:
+ ret = (flags & FCMP_GT);
+ break;
+ case float_relation_unordered:
+ ret = (flags & FCMP_UN);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (ret) {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+#define VFCMP(NAME, BIT, E, FN) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk, uint32_t flags) \
+{ \
+ int i; \
+ VReg t; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < LSX_LEN/BIT ; i++) { \
+ FloatRelation cmp; \
+ cmp = FN(Vj->E(i), Vk->E(i), &env->fp_status); \
+ t.E(i) = vfcmp_common(env, cmp, flags); \
+ vec_update_fcsr0(env, GETPC()); \
+ } \
+ *Vd = t; \
+}
+
+VFCMP(vfcmp_c_s, 32, UW, float32_compare_quiet)
+VFCMP(vfcmp_s_s, 32, UW, float32_compare)
+VFCMP(vfcmp_c_d, 64, UD, float64_compare_quiet)
+VFCMP(vfcmp_s_d, 64, UD, float64_compare)
+
+void HELPER(vbitseli_b)(void *vd, void *vj, uint64_t imm, uint32_t v)
+{
+ int i;
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+
+ for (i = 0; i < 16; i++) {
+ Vd->B(i) = (~Vd->B(i) & Vj->B(i)) | (Vd->B(i) & imm);
+ }
+}
+
+/* Copy from target/arm/tcg/sve_helper.c */
+static inline bool do_match2(uint64_t n, uint64_t m0, uint64_t m1, int esz)
+{
+ uint64_t bits = 8 << esz;
+ uint64_t ones = dup_const(esz, 1);
+ uint64_t signs = ones << (bits - 1);
+ uint64_t cmp0, cmp1;
+
+ cmp1 = dup_const(esz, n);
+ cmp0 = cmp1 ^ m0;
+ cmp1 = cmp1 ^ m1;
+ cmp0 = (cmp0 - ones) & ~cmp0;
+ cmp1 = (cmp1 - ones) & ~cmp1;
+ return (cmp0 | cmp1) & signs;
+}
+
+#define SETANYEQZ(NAME, MO) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \
+{ \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ env->cf[cd & 0x7] = do_match2(0, Vj->D(0), Vj->D(1), MO); \
+}
+SETANYEQZ(vsetanyeqz_b, MO_8)
+SETANYEQZ(vsetanyeqz_h, MO_16)
+SETANYEQZ(vsetanyeqz_w, MO_32)
+SETANYEQZ(vsetanyeqz_d, MO_64)
+
+#define SETALLNEZ(NAME, MO) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \
+{ \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ env->cf[cd & 0x7]= !do_match2(0, Vj->D(0), Vj->D(1), MO); \
+}
+SETALLNEZ(vsetallnez_b, MO_8)
+SETALLNEZ(vsetallnez_h, MO_16)
+SETALLNEZ(vsetallnez_w, MO_32)
+SETALLNEZ(vsetallnez_d, MO_64)
+
+#define VPACKEV(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(2 * i + 1) = Vj->E(2 * i); \
+ temp.E(2 *i) = Vk->E(2 * i); \
+ } \
+ *Vd = temp; \
+}
+
+VPACKEV(vpackev_b, 16, B)
+VPACKEV(vpackev_h, 32, H)
+VPACKEV(vpackev_w, 64, W)
+VPACKEV(vpackev_d, 128, D)
+
+#define VPACKOD(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(2 * i + 1) = Vj->E(2 * i + 1); \
+ temp.E(2 * i) = Vk->E(2 * i + 1); \
+ } \
+ *Vd = temp; \
+}
+
+VPACKOD(vpackod_b, 16, B)
+VPACKOD(vpackod_h, 32, H)
+VPACKOD(vpackod_w, 64, W)
+VPACKOD(vpackod_d, 128, D)
+
+#define VPICKEV(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i); \
+ temp.E(i) = Vk->E(2 * i); \
+ } \
+ *Vd = temp; \
+}
+
+VPICKEV(vpickev_b, 16, B)
+VPICKEV(vpickev_h, 32, H)
+VPICKEV(vpickev_w, 64, W)
+VPICKEV(vpickev_d, 128, D)
+
+#define VPICKOD(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i + 1); \
+ temp.E(i) = Vk->E(2 * i + 1); \
+ } \
+ *Vd = temp; \
+}
+
+VPICKOD(vpickod_b, 16, B)
+VPICKOD(vpickod_h, 32, H)
+VPICKOD(vpickod_w, 64, W)
+VPICKOD(vpickod_d, 128, D)
+
+#define VILVL(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(2 * i + 1) = Vj->E(i); \
+ temp.E(2 * i) = Vk->E(i); \
+ } \
+ *Vd = temp; \
+}
+
+VILVL(vilvl_b, 16, B)
+VILVL(vilvl_h, 32, H)
+VILVL(vilvl_w, 64, W)
+VILVL(vilvl_d, 128, D)
+
+#define VILVH(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(2 * i + 1) = Vj->E(i + LSX_LEN/BIT); \
+ temp.E(2 * i) = Vk->E(i + LSX_LEN/BIT); \
+ } \
+ *Vd = temp; \
+}
+
+VILVH(vilvh_b, 16, B)
+VILVH(vilvh_h, 32, H)
+VILVH(vilvh_w, 64, W)
+VILVH(vilvh_d, 128, D)
+
+void HELPER(vshuf_b)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk, uint32_t va)
+{
+ int i, m;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+ VReg *Va = &(env->fpr[va].vreg);
+
+ m = LSX_LEN/8;
+ for (i = 0; i < m ; i++) {
+ uint64_t k = (uint8_t)Va->B(i) % (2 * m);
+ temp.B(i) = k < m ? Vk->B(k) : Vj->B(k - m);
+ }
+ *Vd = temp;
+}
+
+#define VSHUF(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i, m; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ m = LSX_LEN/BIT; \
+ for (i = 0; i < m; i++) { \
+ uint64_t k = ((uint8_t) Vd->E(i)) % (2 * m); \
+ temp.E(i) = k < m ? Vk->E(k) : Vj->E(k - m); \
+ } \
+ *Vd = temp; \
+}
+
+VSHUF(vshuf_h, 16, H)
+VSHUF(vshuf_w, 32, W)
+VSHUF(vshuf_d, 64, D)
+
+#define VSHUF4I(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(i) = Vj->E(((i) & 0xfc) + (((imm) >> \
+ (2 * ((i) & 0x03))) & 0x03)); \
+ } \
+ *Vd = temp; \
+}
+
+VSHUF4I(vshuf4i_b, 8, B)
+VSHUF4I(vshuf4i_h, 16, H)
+VSHUF4I(vshuf4i_w, 32, W)
+
+void HELPER(vshuf4i_d)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ VReg temp;
+ temp.D(0) = (imm & 2 ? Vj : Vd)->D(imm & 1);
+ temp.D(1) = (imm & 8 ? Vj : Vd)->D((imm >> 2) & 1);
+ *Vd = temp;
+}
+
+void HELPER(vpermi_w)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp.W(0) = Vj->W(imm & 0x3);
+ temp.W(1) = Vj->W((imm >> 2) & 0x3);
+ temp.W(2) = Vd->W((imm >> 4) & 0x3);
+ temp.W(3) = Vd->W((imm >> 6) & 0x3);
+ *Vd = temp;
+}
+
+#define VEXTRINS(NAME, BIT, E, MASK) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int ins, extr; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ ins = (imm >> 4) & MASK; \
+ extr = imm & MASK; \
+ Vd->E(ins) = Vj->E(extr); \
+}
+
+VEXTRINS(vextrins_b, 8, B, 0xf)
+VEXTRINS(vextrins_h, 16, H, 0x7)
+VEXTRINS(vextrins_w, 32, W, 0x3)
+VEXTRINS(vextrins_d, 64, D, 0x1)
diff --git a/target/loongarch/machine.c b/target/loongarch/machine.c
index b1e523e..7adc1bd 100644
--- a/target/loongarch/machine.c
+++ b/target/loongarch/machine.c
@@ -10,6 +10,72 @@
#include "migration/cpu.h"
#include "internals.h"
+static const VMStateDescription vmstate_fpu_reg = {
+ .name = "fpu_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(UD(0), VReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_FPU_REGS(_field, _state, _start) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, 32, 0, \
+ vmstate_fpu_reg, fpr_t)
+
+static bool fpu_needed(void *opaque)
+{
+ LoongArchCPU *cpu = opaque;
+
+ return FIELD_EX64(cpu->env.cpucfg[2], CPUCFG2, FP);
+}
+
+static const VMStateDescription vmstate_fpu = {
+ .name = "cpu/fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = fpu_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_FPU_REGS(env.fpr, LoongArchCPU, 0),
+ VMSTATE_UINT32(env.fcsr0, LoongArchCPU),
+ VMSTATE_BOOL_ARRAY(env.cf, LoongArchCPU, 8),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_lsxh_reg = {
+ .name = "lsxh_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(UD(1), VReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_LSXH_REGS(_field, _state, _start) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, 32, 0, \
+ vmstate_lsxh_reg, fpr_t)
+
+static bool lsx_needed(void *opaque)
+{
+ LoongArchCPU *cpu = opaque;
+
+ return FIELD_EX64(cpu->env.cpucfg[2], CPUCFG2, LSX);
+}
+
+static const VMStateDescription vmstate_lsx = {
+ .name = "cpu/lsx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = lsx_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_LSXH_REGS(env.fpr, LoongArchCPU, 0),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
/* TLB state */
const VMStateDescription vmstate_tlb = {
.name = "cpu/tlb",
@@ -24,18 +90,13 @@ const VMStateDescription vmstate_tlb = {
};
/* LoongArch CPU state */
-
const VMStateDescription vmstate_loongarch_cpu = {
.name = "cpu",
- .version_id = 0,
- .minimum_version_id = 0,
+ .version_id = 1,
+ .minimum_version_id = 1,
.fields = (VMStateField[]) {
-
VMSTATE_UINTTL_ARRAY(env.gpr, LoongArchCPU, 32),
VMSTATE_UINTTL(env.pc, LoongArchCPU),
- VMSTATE_UINT64_ARRAY(env.fpr, LoongArchCPU, 32),
- VMSTATE_UINT32(env.fcsr0, LoongArchCPU),
- VMSTATE_BOOL_ARRAY(env.cf, LoongArchCPU, 8),
/* Remaining CSRs */
VMSTATE_UINT64(env.CSR_CRMD, LoongArchCPU),
@@ -99,4 +160,8 @@ const VMStateDescription vmstate_loongarch_cpu = {
VMSTATE_END_OF_LIST()
},
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_fpu,
+ &vmstate_lsx,
+ }
};
diff --git a/target/loongarch/meson.build b/target/loongarch/meson.build
index 9293a8a..1117a51 100644
--- a/target/loongarch/meson.build
+++ b/target/loongarch/meson.build
@@ -11,6 +11,7 @@ loongarch_tcg_ss.add(files(
'op_helper.c',
'translate.c',
'gdbstub.c',
+ 'lsx_helper.c',
))
loongarch_tcg_ss.add(zlib)
diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c
index 21d8607..ae53f5e 100644
--- a/target/loongarch/translate.c
+++ b/target/loongarch/translate.c
@@ -8,6 +8,8 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "tcg/tcg-op.h"
+#include "tcg/tcg-op-gvec.h"
+
#include "exec/translator.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
@@ -21,7 +23,6 @@
/* Global register indices */
TCGv cpu_gpr[32], cpu_pc;
static TCGv cpu_lladdr, cpu_llval;
-TCGv_i64 cpu_fpr[32];
#include "exec/gen-icount.h"
@@ -29,16 +30,43 @@ TCGv_i64 cpu_fpr[32];
#define DISAS_EXIT DISAS_TARGET_1
#define DISAS_EXIT_UPDATE DISAS_TARGET_2
+static inline int vec_full_offset(int regno)
+{
+ return offsetof(CPULoongArchState, fpr[regno]);
+}
+
+static inline void get_vreg64(TCGv_i64 dest, int regno, int index)
+{
+ tcg_gen_ld_i64(dest, cpu_env,
+ offsetof(CPULoongArchState, fpr[regno].vreg.D(index)));
+}
+
+static inline void set_vreg64(TCGv_i64 src, int regno, int index)
+{
+ tcg_gen_st_i64(src, cpu_env,
+ offsetof(CPULoongArchState, fpr[regno].vreg.D(index)));
+}
+
static inline int plus_1(DisasContext *ctx, int x)
{
return x + 1;
}
+static inline int shl_1(DisasContext *ctx, int x)
+{
+ return x << 1;
+}
+
static inline int shl_2(DisasContext *ctx, int x)
{
return x << 2;
}
+static inline int shl_3(DisasContext *ctx, int x)
+{
+ return x << 3;
+}
+
/*
* LoongArch the upper 32 bits are undefined ("can be any value").
* QEMU chooses to nanbox, because it is most likely to show guest bugs early.
@@ -71,6 +99,7 @@ static void loongarch_tr_init_disas_context(DisasContextBase *dcbase,
CPUState *cs)
{
int64_t bound;
+ CPULoongArchState *env = cs->env_ptr;
DisasContext *ctx = container_of(dcbase, DisasContext, base);
ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
@@ -85,6 +114,10 @@ static void loongarch_tr_init_disas_context(DisasContextBase *dcbase,
bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
+ if (FIELD_EX64(env->cpucfg[2], CPUCFG2, LSX)) {
+ ctx->vl = LSX_LEN;
+ }
+
ctx->zero = tcg_constant_tl(0);
}
@@ -157,6 +190,20 @@ static void gen_set_gpr(int reg_num, TCGv t, DisasExtend dst_ext)
}
}
+static TCGv get_fpr(DisasContext *ctx, int reg_num)
+{
+ TCGv t = tcg_temp_new();
+ tcg_gen_ld_i64(t, cpu_env,
+ offsetof(CPULoongArchState, fpr[reg_num].vreg.D(0)));
+ return t;
+}
+
+static void set_fpr(int reg_num, TCGv val)
+{
+ tcg_gen_st_i64(val, cpu_env,
+ offsetof(CPULoongArchState, fpr[reg_num].vreg.D(0)));
+}
+
#include "decode-insns.c.inc"
#include "insn_trans/trans_arith.c.inc"
#include "insn_trans/trans_shift.c.inc"
@@ -171,6 +218,7 @@ static void gen_set_gpr(int reg_num, TCGv t, DisasExtend dst_ext)
#include "insn_trans/trans_fmemory.c.inc"
#include "insn_trans/trans_branch.c.inc"
#include "insn_trans/trans_privileged.c.inc"
+#include "insn_trans/trans_lsx.c.inc"
static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
@@ -250,11 +298,6 @@ void loongarch_translate_init(void)
regnames[i]);
}
- for (i = 0; i < 32; i++) {
- int off = offsetof(CPULoongArchState, fpr[i]);
- cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env, off, fregnames[i]);
- }
-
cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPULoongArchState, pc), "pc");
cpu_lladdr = tcg_global_mem_new(cpu_env,
offsetof(CPULoongArchState, lladdr), "lladdr");
diff --git a/target/loongarch/translate.h b/target/loongarch/translate.h
index 67bc74c..7f60090 100644
--- a/target/loongarch/translate.h
+++ b/target/loongarch/translate.h
@@ -31,6 +31,7 @@ typedef struct DisasContext {
uint32_t opcode;
uint16_t mem_idx;
uint16_t plv;
+ int vl; /* Vector length */
TCGv zero;
} DisasContext;