aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChih-Min Chao <chihmin.chao@sifive.com>2020-05-07 23:25:46 -0700
committerChih-Min Chao <chihmin.chao@sifive.com>2020-05-11 19:39:46 -0700
commite8da0d62c39756f893d1d18ab31b51b12a347f0e (patch)
treebbbc4d2a0ddb9024e5f7345edaeb14721882a026
parent3baafbe3559fb62b8a4d3f13288593035e4502d3 (diff)
downloadspike-e8da0d62c39756f893d1d18ab31b51b12a347f0e.zip
spike-e8da0d62c39756f893d1d18ab31b51b12a347f0e.tar.gz
spike-e8da0d62c39756f893d1d18ab31b51b12a347f0e.tar.bz2
rvv: change to 0.9 ldst
Signed-off-by: Chih-Min Chao <chihmin.chao@sifive.com>
-rw-r--r--riscv/decode.h152
-rw-r--r--riscv/encoding.h329
-rw-r--r--riscv/insns/vlb_v.h3
-rw-r--r--riscv/insns/vlbff_v.h2
-rw-r--r--riscv/insns/vlbu_v.h3
-rw-r--r--riscv/insns/vlbuff_v.h2
-rw-r--r--riscv/insns/vle16_v.h2
-rw-r--r--riscv/insns/vle16ff_v.h2
-rw-r--r--riscv/insns/vle32_v.h2
-rw-r--r--riscv/insns/vle32ff_v.h2
-rw-r--r--riscv/insns/vle64_v.h2
-rw-r--r--riscv/insns/vle64ff_v.h2
-rw-r--r--riscv/insns/vle8_v.h2
-rw-r--r--riscv/insns/vle8ff_v.h2
-rw-r--r--riscv/insns/vle_v.h13
-rw-r--r--riscv/insns/vleff_v.h12
-rw-r--r--riscv/insns/vlh_v.h3
-rw-r--r--riscv/insns/vlhff_v.h2
-rw-r--r--riscv/insns/vlhu_v.h3
-rw-r--r--riscv/insns/vlhuff_v.h2
-rw-r--r--riscv/insns/vlsb_v.h3
-rw-r--r--riscv/insns/vlsbu_v.h3
-rw-r--r--riscv/insns/vlse16_v.h2
-rw-r--r--riscv/insns/vlse32_v.h2
-rw-r--r--riscv/insns/vlse64_v.h2
-rw-r--r--riscv/insns/vlse8_v.h2
-rw-r--r--riscv/insns/vlse_v.h13
-rw-r--r--riscv/insns/vlsh_v.h3
-rw-r--r--riscv/insns/vlshu_v.h3
-rw-r--r--riscv/insns/vlsw_v.h3
-rw-r--r--riscv/insns/vlswu_v.h3
-rw-r--r--riscv/insns/vlw_v.h3
-rw-r--r--riscv/insns/vlwff_v.h2
-rw-r--r--riscv/insns/vlwu_v.h3
-rw-r--r--riscv/insns/vlwuff_v.h2
-rw-r--r--riscv/insns/vlxb_v.h4
-rw-r--r--riscv/insns/vlxbu_v.h4
-rw-r--r--riscv/insns/vlxe_v.h12
-rw-r--r--riscv/insns/vlxei16_v.h24
-rw-r--r--riscv/insns/vlxei32_v.h24
-rw-r--r--riscv/insns/vlxei64_v.h24
-rw-r--r--riscv/insns/vlxei8_v.h24
-rw-r--r--riscv/insns/vlxh_v.h4
-rw-r--r--riscv/insns/vlxhu_v.h4
-rw-r--r--riscv/insns/vlxw_v.h4
-rw-r--r--riscv/insns/vlxwu_v.h4
-rw-r--r--riscv/insns/vsb_v.h3
-rw-r--r--riscv/insns/vse16_v.h2
-rw-r--r--riscv/insns/vse32_v.h2
-rw-r--r--riscv/insns/vse64_v.h2
-rw-r--r--riscv/insns/vse8_v.h2
-rw-r--r--riscv/insns/vsh_v.h3
-rw-r--r--riscv/insns/vssb_v.h3
-rw-r--r--riscv/insns/vsse16_v.h2
-rw-r--r--riscv/insns/vsse32_v.h2
-rw-r--r--riscv/insns/vsse64_v.h2
-rw-r--r--riscv/insns/vsse8_v.h2
-rw-r--r--riscv/insns/vsse_v.h13
-rw-r--r--riscv/insns/vssh_v.h3
-rw-r--r--riscv/insns/vssw_v.h3
-rw-r--r--riscv/insns/vsuxb_v.h4
-rw-r--r--riscv/insns/vsuxe_v.h13
-rw-r--r--riscv/insns/vsuxei16_v.h24
-rw-r--r--riscv/insns/vsuxei32_v.h24
-rw-r--r--riscv/insns/vsuxei64_v.h24
-rw-r--r--riscv/insns/vsuxei8_v.h24
-rw-r--r--riscv/insns/vsuxh_v.h4
-rw-r--r--riscv/insns/vsuxw_v.h4
-rw-r--r--riscv/insns/vsw_v.h3
-rw-r--r--riscv/insns/vsxb_v.h4
-rw-r--r--riscv/insns/vsxe_v.h14
-rw-r--r--riscv/insns/vsxei16_v.h24
-rw-r--r--riscv/insns/vsxei32_v.h24
-rw-r--r--riscv/insns/vsxei64_v.h24
-rw-r--r--riscv/insns/vsxei8_v.h24
-rw-r--r--riscv/insns/vsxh_v.h4
-rw-r--r--riscv/insns/vsxw_v.h4
-rw-r--r--riscv/riscv.mk.in76
-rw-r--r--spike_main/disasm.cc123
79 files changed, 696 insertions, 516 deletions
diff --git a/riscv/decode.h b/riscv/decode.h
index 260956c..21428ed 100644
--- a/riscv/decode.h
+++ b/riscv/decode.h
@@ -1506,58 +1506,44 @@ VI_LOOP_END
reg_t vreg_inx = lmul_inx * elems_per_vreg + strip_index * elems_per_strip + index_in_strip;
-#define VI_DUPLICATE_VREG(v, vlmax) \
-reg_t index[vlmax]; \
-for (reg_t i = 0; i < vlmax && P.VU.vl != 0; ++i) { \
- switch(P.VU.vsew) { \
+#define VI_DUPLICATE_VREG(reg_num, idx_sew) \
+reg_t index[P.VU.vlmax]; \
+for (reg_t i = 0; i < P.VU.vlmax && P.VU.vl != 0; ++i) { \
+ switch(idx_sew) { \
case e8: \
- index[i] = P.VU.elt<uint8_t>(v, i); \
+ index[i] = P.VU.elt<uint8_t>(reg_num, i); \
break; \
case e16: \
- index[i] = P.VU.elt<uint16_t>(v, i); \
+ index[i] = P.VU.elt<uint16_t>(reg_num, i); \
break; \
case e32: \
- index[i] = P.VU.elt<uint32_t>(v, i); \
+ index[i] = P.VU.elt<uint32_t>(reg_num, i); \
break; \
case e64: \
- index[i] = P.VU.elt<uint64_t>(v, i); \
+ index[i] = P.VU.elt<uint64_t>(reg_num, i); \
break; \
} \
}
-#define VI_ST_COMMON(stride, offset, st_width, elt_byte, is_seg) \
+#define VI_LD(stride, offset, ld_width) \
const reg_t nf = insn.v_nf() + 1; \
const reg_t vl = P.VU.vl; \
const reg_t baseAddr = RS1; \
- const reg_t vs3 = insn.rd(); \
+ const reg_t vd = insn.rd(); \
require((nf * P.VU.vlmul) <= (NVPR / 4) && \
- vs3 + nf * P.VU.vlmul <= NVPR); \
- if (!is_seg) \
- require(nf == 1); \
+ (vd + nf * P.VU.vlmul) <= NVPR); \
const reg_t vlmul = P.VU.vlmul; \
for (reg_t i = 0; i < vl; ++i) { \
- VI_STRIP(i) \
VI_ELEMENT_SKIP(i); \
+ VI_STRIP(i); \
+ P.VU.vstart = i; \
for (reg_t fn = 0; fn < nf; ++fn) { \
- st_width##_t val = 0; \
- switch (P.VU.vsew) { \
- case e8: \
- val = P.VU.elt<uint8_t>(vs3 + fn * vlmul, vreg_inx); \
- break; \
- case e16: \
- val = P.VU.elt<uint16_t>(vs3 + fn * vlmul, vreg_inx); \
- break; \
- case e32: \
- val = P.VU.elt<uint32_t>(vs3 + fn * vlmul, vreg_inx); \
- break; \
- default: \
- val = P.VU.elt<uint64_t>(vs3 + fn * vlmul, vreg_inx); \
- break; \
- } \
- MMU.store_##st_width(baseAddr + (stride) + (offset) * elt_byte, val); \
+ ld_width##_t val = MMU.load_##ld_width( \
+ baseAddr + (stride) + (offset) * sizeof(ld_width##_t)); \
+ P.VU.elt<ld_width##_t>(vd + fn * vlmul, vreg_inx, true) = val; \
} \
} \
- P.VU.vstart = 0;
+ P.VU.vstart = 0;
#define VI_EEW(mew, width) \
reg_t base = mew? 128 : 8; \
@@ -1566,7 +1552,8 @@ for (reg_t i = 0; i < vlmax && P.VU.vl != 0; ++i) { \
P.VU.vemul = (P.VU.veew/P.VU.vsew) * P.VU.vlmul; \
assert((P.VU.veew/P.VU.vemul) == (P.VU.vsew/P.VU.vlmul));
-#define VI_LD_COMMON(stride, offset, ld_width, elt_byte, is_seg) \
+#define VI_LD_INDEX(stride, offset, ld_width, is_seg) \
+ VI_CHECK_LD_INDEX; \
const reg_t nf = insn.v_nf() + 1; \
const reg_t vl = P.VU.vl; \
const reg_t baseAddr = RS1; \
@@ -1578,13 +1565,17 @@ for (reg_t i = 0; i < vlmax && P.VU.vl != 0; ++i) { \
(vd + nf * P.VU.vlmul) <= NVPR); \
if (!is_seg) \
require(nf == 1); \
+ if (nf >= 2) \
+ require(!is_overlapped(vd, nf, insn.rs2(), 1)); \
const reg_t vlmul = P.VU.vlmul; \
for (reg_t i = 0; i < vl; ++i) { \
VI_ELEMENT_SKIP(i); \
VI_STRIP(i); \
+ P.VU.vstart = i; \
for (reg_t fn = 0; fn < nf; ++fn) { \
- ld_width##_t val = MMU.load_##ld_width(baseAddr + (stride) + (offset) * elt_byte); \
- switch(P.VU.veew){ \
+ ld_width##_t val = MMU.load_##ld_width( \
+ baseAddr + (stride) + (offset) * sizeof(ld_width##_t)); \
+ switch(P.VU.vsew){ \
case e8: \
P.VU.elt<uint8_t>(vd + fn * vlmul, vreg_inx, true) = val; \
break; \
@@ -1602,30 +1593,66 @@ for (reg_t i = 0; i < vlmax && P.VU.vl != 0; ++i) { \
} \
P.VU.vstart = 0;
-#define VI_LD(stride, offset, ld_width, elt_byte, is_seg) \
- VI_CHECK_SXX; \
- VI_LD_COMMON(stride, offset, ld_width, elt_byte, is_seg)
-
-#define VI_LD_INDEX(stride, offset, ld_width, elt_byte, is_seg) \
- VI_CHECK_LD_INDEX; \
- VI_LD_COMMON(stride, offset, ld_width, elt_byte, is_seg) \
- if (nf >= 2) \
- require(!is_overlapped(vd, nf, insn.rs2(), 1));
-
-#define VI_ST(stride, offset, st_width, elt_byte, is_seg) \
+#define VI_ST(stride, offset, st_width) \
VI_CHECK_STORE_SXX; \
- VI_ST_COMMON(stride, offset, st_width, elt_byte, is_seg) \
+ const reg_t nf = insn.v_nf() + 1; \
+ const reg_t vl = P.VU.vl; \
+ const reg_t baseAddr = RS1; \
+ const reg_t vs3 = insn.rd(); \
+ require((nf * P.VU.vlmul) <= (NVPR / 4) && \
+ vs3 + nf * P.VU.vlmul <= NVPR); \
+ const reg_t vlmul = P.VU.vlmul; \
+ for (reg_t i = 0; i < vl; ++i) { \
+ VI_STRIP(i) \
+ VI_ELEMENT_SKIP(i); \
+ P.VU.vstart = i; \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ st_width##_t val = P.VU.elt<st_width##_t>(vs3 + fn * vlmul, vreg_inx); \
+ MMU.store_##st_width( \
+ baseAddr + (stride) + (offset) * sizeof(st_width##_t), val); \
+ } \
+ } \
+ P.VU.vstart = 0;
-#define VI_ST_INDEX(stride, offset, st_width, elt_byte, is_seg) \
+#define VI_ST_INDEX(stride, offset, st_width, is_seg) \
VI_CHECK_ST_INDEX; \
- VI_ST_COMMON(stride, offset, st_width, elt_byte, is_seg) \
-
-#define VI_LDST_FF(itype, tsew, is_seg) \
- require(p->VU.vsew >= e##tsew && p->VU.vsew <= e64); \
const reg_t nf = insn.v_nf() + 1; \
- require((nf * P.VU.vlmul) <= (NVPR / 4)); \
+ const reg_t vl = P.VU.vl; \
+ const reg_t baseAddr = RS1; \
+ const reg_t vs3 = insn.rd(); \
+ require((nf * P.VU.vlmul) <= (NVPR / 4) && \
+ vs3 + nf * P.VU.vlmul <= NVPR); \
if (!is_seg) \
require(nf == 1); \
+ const reg_t vlmul = P.VU.vlmul; \
+ for (reg_t i = 0; i < vl; ++i) { \
+ VI_STRIP(i) \
+ VI_ELEMENT_SKIP(i); \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ st_width##_t val = 0; \
+ switch (P.VU.vsew) { \
+ case e8: \
+ val = P.VU.elt<uint8_t>(vs3 + fn * vlmul, vreg_inx); \
+ break; \
+ case e16: \
+ val = P.VU.elt<uint16_t>(vs3 + fn * vlmul, vreg_inx); \
+ break; \
+ case e32: \
+ val = P.VU.elt<uint32_t>(vs3 + fn * vlmul, vreg_inx); \
+ break; \
+ default: \
+ val = P.VU.elt<uint64_t>(vs3 + fn * vlmul, vreg_inx); \
+ break; \
+ } \
+ MMU.store_##st_width( \
+ baseAddr + (stride) + (offset) * sizeof(st_width##_t), val); \
+ } \
+ } \
+ P.VU.vstart = 0;
+
+#define VI_LDST_FF(ld_type) \
+ const reg_t nf = insn.v_nf() + 1; \
+ require((nf * P.VU.vlmul) <= (NVPR / 4)); \
VI_CHECK_SXX; \
const reg_t sew = p->VU.vsew; \
const reg_t vl = p->VU.vl; \
@@ -1642,9 +1669,10 @@ for (reg_t i = 0; i < vlmax && P.VU.vl != 0; ++i) { \
VI_ELEMENT_SKIP(i); \
\
for (reg_t fn = 0; fn < nf; ++fn) { \
- itype##64_t val; \
+ uint64_t val; \
try { \
- val = MMU.load_##itype##tsew(baseAddr + (i * nf + fn) * (tsew / 8)); \
+ val = MMU.load_##ld_type( \
+ baseAddr + (i * nf + fn) * sizeof(ld_type##_t)); \
} catch (trap_t& t) { \
if (i == 0) \
throw; /* Only take exception on zeroth element */ \
@@ -1653,21 +1681,7 @@ for (reg_t i = 0; i < vlmax && P.VU.vl != 0; ++i) { \
P.VU.vl = i; \
break; \
} \
- \
- switch (P.VU.veew) { \
- case e8: \
- p->VU.elt<uint8_t>(rd_num + fn * vlmul, vreg_inx, true) = val; \
- break; \
- case e16: \
- p->VU.elt<uint16_t>(rd_num + fn * vlmul, vreg_inx, true) = val; \
- break; \
- case e32: \
- p->VU.elt<uint32_t>(rd_num + fn * vlmul, vreg_inx, true) = val; \
- break; \
- case e64: \
- p->VU.elt<uint64_t>(rd_num + fn * vlmul, vreg_inx, true) = val; \
- break; \
- } \
+ p->VU.elt<ld_type##_t>(rd_num + fn * vlmul, vreg_inx, true) = val; \
} \
\
if (early_stop) { \
diff --git a/riscv/encoding.h b/riscv/encoding.h
index 53b770d..9bd0560 100644
--- a/riscv/encoding.h
+++ b/riscv/encoding.h
@@ -65,6 +65,7 @@
#define DCSR_CAUSE_DEBUGINT 3
#define DCSR_CAUSE_STEP 4
#define DCSR_CAUSE_HALT 5
+#define DCSR_CAUSE_GROUP 6
#define MCONTROL_TYPE(xlen) (0xfULL<<((xlen)-4))
#define MCONTROL_DMODE(xlen) (1ULL<<((xlen)-5))
@@ -898,97 +899,137 @@
#define MASK_VSETVLI 0x8000707f
#define MATCH_VSETVL 0x80007057
#define MASK_VSETVL 0xfe00707f
-#define MATCH_VLB_V 0x10000007
-#define MASK_VLB_V 0x1df0707f
-#define MATCH_VLH_V 0x10005007
-#define MASK_VLH_V 0x1df0707f
-#define MATCH_VLW_V 0x10006007
-#define MASK_VLW_V 0x1df0707f
-#define MATCH_VLE_V 0x7007
-#define MASK_VLE_V 0x1df0707f
-#define MATCH_VLBU_V 0x7
-#define MASK_VLBU_V 0x1df0707f
-#define MATCH_VLHU_V 0x5007
-#define MASK_VLHU_V 0x1df0707f
-#define MATCH_VLWU_V 0x6007
-#define MASK_VLWU_V 0x1df0707f
-#define MATCH_VSB_V 0x27
-#define MASK_VSB_V 0x1df0707f
-#define MATCH_VSH_V 0x5027
-#define MASK_VSH_V 0x1df0707f
-#define MATCH_VSW_V 0x6027
-#define MASK_VSW_V 0x1df0707f
-#define MATCH_VSE_V 0x7027
-#define MASK_VSE_V 0x1df0707f
-#define MATCH_VLSB_V 0x18000007
-#define MASK_VLSB_V 0x1c00707f
-#define MATCH_VLSH_V 0x18005007
-#define MASK_VLSH_V 0x1c00707f
-#define MATCH_VLSW_V 0x18006007
-#define MASK_VLSW_V 0x1c00707f
-#define MATCH_VLSE_V 0x8007007
-#define MASK_VLSE_V 0x1c00707f
-#define MATCH_VLSBU_V 0x8000007
-#define MASK_VLSBU_V 0x1c00707f
-#define MATCH_VLSHU_V 0x8005007
-#define MASK_VLSHU_V 0x1c00707f
-#define MATCH_VLSWU_V 0x8006007
-#define MASK_VLSWU_V 0x1c00707f
-#define MATCH_VSSB_V 0x8000027
-#define MASK_VSSB_V 0x1c00707f
-#define MATCH_VSSH_V 0x8005027
-#define MASK_VSSH_V 0x1c00707f
-#define MATCH_VSSW_V 0x8006027
-#define MASK_VSSW_V 0x1c00707f
-#define MATCH_VSSE_V 0x8007027
-#define MASK_VSSE_V 0x1c00707f
-#define MATCH_VLXB_V 0x1c000007
-#define MASK_VLXB_V 0x1c00707f
-#define MATCH_VLXH_V 0x1c005007
-#define MASK_VLXH_V 0x1c00707f
-#define MATCH_VLXW_V 0x1c006007
-#define MASK_VLXW_V 0x1c00707f
-#define MATCH_VLXE_V 0xc007007
-#define MASK_VLXE_V 0x1c00707f
-#define MATCH_VLXBU_V 0xc000007
-#define MASK_VLXBU_V 0x1c00707f
-#define MATCH_VLXHU_V 0xc005007
-#define MASK_VLXHU_V 0x1c00707f
-#define MATCH_VLXWU_V 0xc006007
-#define MASK_VLXWU_V 0x1c00707f
-#define MATCH_VSXB_V 0xc000027
-#define MASK_VSXB_V 0x1c00707f
-#define MATCH_VSXH_V 0xc005027
-#define MASK_VSXH_V 0x1c00707f
-#define MATCH_VSXW_V 0xc006027
-#define MASK_VSXW_V 0x1c00707f
-#define MATCH_VSXE_V 0xc007027
-#define MASK_VSXE_V 0x1c00707f
-#define MATCH_VSUXB_V 0x1c000027
-#define MASK_VSUXB_V 0xfc00707f
-#define MATCH_VSUXH_V 0x1c005027
-#define MASK_VSUXH_V 0xfc00707f
-#define MATCH_VSUXW_V 0x1c006027
-#define MASK_VSUXW_V 0xfc00707f
-#define MATCH_VSUXE_V 0x1c007027
-#define MASK_VSUXE_V 0xfc00707f
-#define MATCH_VLBFF_V 0x11000007
-#define MASK_VLBFF_V 0x1df0707f
-#define MATCH_VLHFF_V 0x11005007
-#define MASK_VLHFF_V 0x1df0707f
-#define MATCH_VLWFF_V 0x11006007
-#define MASK_VLWFF_V 0x1df0707f
-#define MATCH_VLEFF_V 0x1007007
-#define MASK_VLEFF_V 0x1df0707f
-#define MATCH_VLBUFF_V 0x1000007
-#define MASK_VLBUFF_V 0x1df0707f
-#define MATCH_VLHUFF_V 0x1005007
-#define MASK_VLHUFF_V 0x1df0707f
-#define MATCH_VLWUFF_V 0x1006007
-#define MASK_VLWUFF_V 0x1df0707f
-#define MATCH_VL1R_V 0x2807007
+#define MATCH_VLE8_V 0x7
+#define MASK_VLE8_V 0x1df0707f
+#define MATCH_VLE16_V 0x5007
+#define MASK_VLE16_V 0x1df0707f
+#define MATCH_VLE32_V 0x6007
+#define MASK_VLE32_V 0x1df0707f
+#define MATCH_VLE64_V 0x7007
+#define MASK_VLE64_V 0x1df0707f
+#define MATCH_VLE128_V 0x10000007
+#define MASK_VLE128_V 0x1df0707f
+#define MATCH_VLE256_V 0x10005007
+#define MASK_VLE256_V 0x1df0707f
+#define MATCH_VLE512_V 0x10006007
+#define MASK_VLE512_V 0x1df0707f
+#define MATCH_VLE1024_V 0x10007007
+#define MASK_VLE1024_V 0x1df0707f
+#define MATCH_VSE8_V 0x27
+#define MASK_VSE8_V 0x1df0707f
+#define MATCH_VSE16_V 0x5027
+#define MASK_VSE16_V 0x1df0707f
+#define MATCH_VSE32_V 0x6027
+#define MASK_VSE32_V 0x1df0707f
+#define MATCH_VSE64_V 0x7027
+#define MASK_VSE64_V 0x1df0707f
+#define MATCH_VSE128_V 0x10000027
+#define MASK_VSE128_V 0x1df0707f
+#define MATCH_VSE256_V 0x10005027
+#define MASK_VSE256_V 0x1df0707f
+#define MATCH_VSE512_V 0x10006027
+#define MASK_VSE512_V 0x1df0707f
+#define MATCH_VSE1024_V 0x10007027
+#define MASK_VSE1024_V 0x1df0707f
+#define MATCH_VLSE8_V 0x8000007
+#define MASK_VLSE8_V 0x1c00707f
+#define MATCH_VLSE16_V 0x8005007
+#define MASK_VLSE16_V 0x1c00707f
+#define MATCH_VLSE32_V 0x8006007
+#define MASK_VLSE32_V 0x1c00707f
+#define MATCH_VLSE64_V 0x8007007
+#define MASK_VLSE64_V 0x1c00707f
+#define MATCH_VLSE128_V 0x18000007
+#define MASK_VLSE128_V 0x1c00707f
+#define MATCH_VLSE256_V 0x18005007
+#define MASK_VLSE256_V 0x1c00707f
+#define MATCH_VLSE512_V 0x18006007
+#define MASK_VLSE512_V 0x1c00707f
+#define MATCH_VLSE1024_V 0x18007007
+#define MASK_VLSE1024_V 0x1c00707f
+#define MATCH_VSSE8_V 0x8000027
+#define MASK_VSSE8_V 0x1c00707f
+#define MATCH_VSSE16_V 0x8005027
+#define MASK_VSSE16_V 0x1c00707f
+#define MATCH_VSSE32_V 0x8006027
+#define MASK_VSSE32_V 0x1c00707f
+#define MATCH_VSSE64_V 0x8007027
+#define MASK_VSSE64_V 0x1c00707f
+#define MATCH_VSSE128_V 0x18000027
+#define MASK_VSSE128_V 0x1c00707f
+#define MATCH_VSSE256_V 0x18005027
+#define MASK_VSSE256_V 0x1c00707f
+#define MATCH_VSSE512_V 0x18006027
+#define MASK_VSSE512_V 0x1c00707f
+#define MATCH_VSSE1024_V 0x18007027
+#define MASK_VSSE1024_V 0x1c00707f
+#define MATCH_VLXEI8_V 0xc000007
+#define MASK_VLXEI8_V 0x1c00707f
+#define MATCH_VLXEI16_V 0xc005007
+#define MASK_VLXEI16_V 0x1c00707f
+#define MATCH_VLXEI32_V 0xc006007
+#define MASK_VLXEI32_V 0x1c00707f
+#define MATCH_VLXEI64_V 0xc007007
+#define MASK_VLXEI64_V 0x1c00707f
+#define MATCH_VLXEI128_V 0x1c000007
+#define MASK_VLXEI128_V 0x1c00707f
+#define MATCH_VLXEI256_V 0x1c005007
+#define MASK_VLXEI256_V 0x1c00707f
+#define MATCH_VLXEI512_V 0x1c006007
+#define MASK_VLXEI512_V 0x1c00707f
+#define MATCH_VLXEI1024_V 0x1c007007
+#define MASK_VLXEI1024_V 0x1c00707f
+#define MATCH_VSXEI8_V 0xc000027
+#define MASK_VSXEI8_V 0x1c00707f
+#define MATCH_VSXEI16_V 0xc005027
+#define MASK_VSXEI16_V 0x1c00707f
+#define MATCH_VSXEI32_V 0xc006027
+#define MASK_VSXEI32_V 0x1c00707f
+#define MATCH_VSXEI64_V 0xc007027
+#define MASK_VSXEI64_V 0x1c00707f
+#define MATCH_VSXEI128_V 0x1c000027
+#define MASK_VSXEI128_V 0x1c00707f
+#define MATCH_VSXEI256_V 0x1c005027
+#define MASK_VSXEI256_V 0x1c00707f
+#define MATCH_VSXEI512_V 0x1c006027
+#define MASK_VSXEI512_V 0x1c00707f
+#define MATCH_VSXEI1024_V 0x1c007027
+#define MASK_VSXEI1024_V 0x1c00707f
+#define MATCH_VSUXEI8_V 0x4000027
+#define MASK_VSUXEI8_V 0x1c00707f
+#define MATCH_VSUXEI16_V 0x4005027
+#define MASK_VSUXEI16_V 0x1c00707f
+#define MATCH_VSUXEI32_V 0x4006027
+#define MASK_VSUXEI32_V 0x1c00707f
+#define MATCH_VSUXEI64_V 0x4007027
+#define MASK_VSUXEI64_V 0x1c00707f
+#define MATCH_VSUXEI128_V 0x14000027
+#define MASK_VSUXEI128_V 0x1c00707f
+#define MATCH_VSUXEI256_V 0x14005027
+#define MASK_VSUXEI256_V 0x1c00707f
+#define MATCH_VSUXEI512_V 0x14006027
+#define MASK_VSUXEI512_V 0x1c00707f
+#define MATCH_VSUXEI1024_V 0x14007027
+#define MASK_VSUXEI1024_V 0x1c00707f
+#define MATCH_VLE8FF_V 0x1000007
+#define MASK_VLE8FF_V 0x1df0707f
+#define MATCH_VLE16FF_V 0x1005007
+#define MASK_VLE16FF_V 0x1df0707f
+#define MATCH_VLE32FF_V 0x1006007
+#define MASK_VLE32FF_V 0x1df0707f
+#define MATCH_VLE64FF_V 0x1007007
+#define MASK_VLE64FF_V 0x1df0707f
+#define MATCH_VLE128FF_V 0x11000007
+#define MASK_VLE128FF_V 0x1df0707f
+#define MATCH_VLE256FF_V 0x11005007
+#define MASK_VLE256FF_V 0x1df0707f
+#define MATCH_VLE512FF_V 0x11006007
+#define MASK_VLE512FF_V 0x1df0707f
+#define MATCH_VLE1024FF_V 0x11007007
+#define MASK_VLE1024FF_V 0x1df0707f
+#define MATCH_VL1R_V 0x2800007
#define MASK_VL1R_V 0xfff0707f
-#define MATCH_VS1R_V 0x2807027
+#define MATCH_VS1R_V 0x2800027
#define MASK_VS1R_V 0xfff0707f
#define MATCH_VFADD_VF 0x5057
#define MASK_VFADD_VF 0xfc00707f
@@ -2268,50 +2309,70 @@ DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1)
DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2)
DECLARE_INSN(vsetvli, MATCH_VSETVLI, MASK_VSETVLI)
DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL)
-DECLARE_INSN(vlb_v, MATCH_VLB_V, MASK_VLB_V)
-DECLARE_INSN(vlh_v, MATCH_VLH_V, MASK_VLH_V)
-DECLARE_INSN(vlw_v, MATCH_VLW_V, MASK_VLW_V)
-DECLARE_INSN(vle_v, MATCH_VLE_V, MASK_VLE_V)
-DECLARE_INSN(vlbu_v, MATCH_VLBU_V, MASK_VLBU_V)
-DECLARE_INSN(vlhu_v, MATCH_VLHU_V, MASK_VLHU_V)
-DECLARE_INSN(vlwu_v, MATCH_VLWU_V, MASK_VLWU_V)
-DECLARE_INSN(vsb_v, MATCH_VSB_V, MASK_VSB_V)
-DECLARE_INSN(vsh_v, MATCH_VSH_V, MASK_VSH_V)
-DECLARE_INSN(vsw_v, MATCH_VSW_V, MASK_VSW_V)
-DECLARE_INSN(vse_v, MATCH_VSE_V, MASK_VSE_V)
-DECLARE_INSN(vlsb_v, MATCH_VLSB_V, MASK_VLSB_V)
-DECLARE_INSN(vlsh_v, MATCH_VLSH_V, MASK_VLSH_V)
-DECLARE_INSN(vlsw_v, MATCH_VLSW_V, MASK_VLSW_V)
-DECLARE_INSN(vlse_v, MATCH_VLSE_V, MASK_VLSE_V)
-DECLARE_INSN(vlsbu_v, MATCH_VLSBU_V, MASK_VLSBU_V)
-DECLARE_INSN(vlshu_v, MATCH_VLSHU_V, MASK_VLSHU_V)
-DECLARE_INSN(vlswu_v, MATCH_VLSWU_V, MASK_VLSWU_V)
-DECLARE_INSN(vssb_v, MATCH_VSSB_V, MASK_VSSB_V)
-DECLARE_INSN(vssh_v, MATCH_VSSH_V, MASK_VSSH_V)
-DECLARE_INSN(vssw_v, MATCH_VSSW_V, MASK_VSSW_V)
-DECLARE_INSN(vsse_v, MATCH_VSSE_V, MASK_VSSE_V)
-DECLARE_INSN(vlxb_v, MATCH_VLXB_V, MASK_VLXB_V)
-DECLARE_INSN(vlxh_v, MATCH_VLXH_V, MASK_VLXH_V)
-DECLARE_INSN(vlxw_v, MATCH_VLXW_V, MASK_VLXW_V)
-DECLARE_INSN(vlxe_v, MATCH_VLXE_V, MASK_VLXE_V)
-DECLARE_INSN(vlxbu_v, MATCH_VLXBU_V, MASK_VLXBU_V)
-DECLARE_INSN(vlxhu_v, MATCH_VLXHU_V, MASK_VLXHU_V)
-DECLARE_INSN(vlxwu_v, MATCH_VLXWU_V, MASK_VLXWU_V)
-DECLARE_INSN(vsxb_v, MATCH_VSXB_V, MASK_VSXB_V)
-DECLARE_INSN(vsxh_v, MATCH_VSXH_V, MASK_VSXH_V)
-DECLARE_INSN(vsxw_v, MATCH_VSXW_V, MASK_VSXW_V)
-DECLARE_INSN(vsxe_v, MATCH_VSXE_V, MASK_VSXE_V)
-DECLARE_INSN(vsuxb_v, MATCH_VSUXB_V, MASK_VSUXB_V)
-DECLARE_INSN(vsuxh_v, MATCH_VSUXH_V, MASK_VSUXH_V)
-DECLARE_INSN(vsuxw_v, MATCH_VSUXW_V, MASK_VSUXW_V)
-DECLARE_INSN(vsuxe_v, MATCH_VSUXE_V, MASK_VSUXE_V)
-DECLARE_INSN(vlbff_v, MATCH_VLBFF_V, MASK_VLBFF_V)
-DECLARE_INSN(vlhff_v, MATCH_VLHFF_V, MASK_VLHFF_V)
-DECLARE_INSN(vlwff_v, MATCH_VLWFF_V, MASK_VLWFF_V)
-DECLARE_INSN(vleff_v, MATCH_VLEFF_V, MASK_VLEFF_V)
-DECLARE_INSN(vlbuff_v, MATCH_VLBUFF_V, MASK_VLBUFF_V)
-DECLARE_INSN(vlhuff_v, MATCH_VLHUFF_V, MASK_VLHUFF_V)
-DECLARE_INSN(vlwuff_v, MATCH_VLWUFF_V, MASK_VLWUFF_V)
+DECLARE_INSN(vle8_v, MATCH_VLE8_V, MASK_VLE8_V)
+DECLARE_INSN(vle16_v, MATCH_VLE16_V, MASK_VLE16_V)
+DECLARE_INSN(vle32_v, MATCH_VLE32_V, MASK_VLE32_V)
+DECLARE_INSN(vle64_v, MATCH_VLE64_V, MASK_VLE64_V)
+DECLARE_INSN(vle128_v, MATCH_VLE128_V, MASK_VLE128_V)
+DECLARE_INSN(vle256_v, MATCH_VLE256_V, MASK_VLE256_V)
+DECLARE_INSN(vle512_v, MATCH_VLE512_V, MASK_VLE512_V)
+DECLARE_INSN(vle1024_v, MATCH_VLE1024_V, MASK_VLE1024_V)
+DECLARE_INSN(vse8_v, MATCH_VSE8_V, MASK_VSE8_V)
+DECLARE_INSN(vse16_v, MATCH_VSE16_V, MASK_VSE16_V)
+DECLARE_INSN(vse32_v, MATCH_VSE32_V, MASK_VSE32_V)
+DECLARE_INSN(vse64_v, MATCH_VSE64_V, MASK_VSE64_V)
+DECLARE_INSN(vse128_v, MATCH_VSE128_V, MASK_VSE128_V)
+DECLARE_INSN(vse256_v, MATCH_VSE256_V, MASK_VSE256_V)
+DECLARE_INSN(vse512_v, MATCH_VSE512_V, MASK_VSE512_V)
+DECLARE_INSN(vse1024_v, MATCH_VSE1024_V, MASK_VSE1024_V)
+DECLARE_INSN(vlse8_v, MATCH_VLSE8_V, MASK_VLSE8_V)
+DECLARE_INSN(vlse16_v, MATCH_VLSE16_V, MASK_VLSE16_V)
+DECLARE_INSN(vlse32_v, MATCH_VLSE32_V, MASK_VLSE32_V)
+DECLARE_INSN(vlse64_v, MATCH_VLSE64_V, MASK_VLSE64_V)
+DECLARE_INSN(vlse128_v, MATCH_VLSE128_V, MASK_VLSE128_V)
+DECLARE_INSN(vlse256_v, MATCH_VLSE256_V, MASK_VLSE256_V)
+DECLARE_INSN(vlse512_v, MATCH_VLSE512_V, MASK_VLSE512_V)
+DECLARE_INSN(vlse1024_v, MATCH_VLSE1024_V, MASK_VLSE1024_V)
+DECLARE_INSN(vsse8_v, MATCH_VSSE8_V, MASK_VSSE8_V)
+DECLARE_INSN(vsse16_v, MATCH_VSSE16_V, MASK_VSSE16_V)
+DECLARE_INSN(vsse32_v, MATCH_VSSE32_V, MASK_VSSE32_V)
+DECLARE_INSN(vsse64_v, MATCH_VSSE64_V, MASK_VSSE64_V)
+DECLARE_INSN(vsse128_v, MATCH_VSSE128_V, MASK_VSSE128_V)
+DECLARE_INSN(vsse256_v, MATCH_VSSE256_V, MASK_VSSE256_V)
+DECLARE_INSN(vsse512_v, MATCH_VSSE512_V, MASK_VSSE512_V)
+DECLARE_INSN(vsse1024_v, MATCH_VSSE1024_V, MASK_VSSE1024_V)
+DECLARE_INSN(vlxei8_v, MATCH_VLXEI8_V, MASK_VLXEI8_V)
+DECLARE_INSN(vlxei16_v, MATCH_VLXEI16_V, MASK_VLXEI16_V)
+DECLARE_INSN(vlxei32_v, MATCH_VLXEI32_V, MASK_VLXEI32_V)
+DECLARE_INSN(vlxei64_v, MATCH_VLXEI64_V, MASK_VLXEI64_V)
+DECLARE_INSN(vlxei128_v, MATCH_VLXEI128_V, MASK_VLXEI128_V)
+DECLARE_INSN(vlxei256_v, MATCH_VLXEI256_V, MASK_VLXEI256_V)
+DECLARE_INSN(vlxei512_v, MATCH_VLXEI512_V, MASK_VLXEI512_V)
+DECLARE_INSN(vlxei1024_v, MATCH_VLXEI1024_V, MASK_VLXEI1024_V)
+DECLARE_INSN(vsxei8_v, MATCH_VSXEI8_V, MASK_VSXEI8_V)
+DECLARE_INSN(vsxei16_v, MATCH_VSXEI16_V, MASK_VSXEI16_V)
+DECLARE_INSN(vsxei32_v, MATCH_VSXEI32_V, MASK_VSXEI32_V)
+DECLARE_INSN(vsxei64_v, MATCH_VSXEI64_V, MASK_VSXEI64_V)
+DECLARE_INSN(vsxei128_v, MATCH_VSXEI128_V, MASK_VSXEI128_V)
+DECLARE_INSN(vsxei256_v, MATCH_VSXEI256_V, MASK_VSXEI256_V)
+DECLARE_INSN(vsxei512_v, MATCH_VSXEI512_V, MASK_VSXEI512_V)
+DECLARE_INSN(vsxei1024_v, MATCH_VSXEI1024_V, MASK_VSXEI1024_V)
+DECLARE_INSN(vsuxei8_v, MATCH_VSUXEI8_V, MASK_VSUXEI8_V)
+DECLARE_INSN(vsuxei16_v, MATCH_VSUXEI16_V, MASK_VSUXEI16_V)
+DECLARE_INSN(vsuxei32_v, MATCH_VSUXEI32_V, MASK_VSUXEI32_V)
+DECLARE_INSN(vsuxei64_v, MATCH_VSUXEI64_V, MASK_VSUXEI64_V)
+DECLARE_INSN(vsuxei128_v, MATCH_VSUXEI128_V, MASK_VSUXEI128_V)
+DECLARE_INSN(vsuxei256_v, MATCH_VSUXEI256_V, MASK_VSUXEI256_V)
+DECLARE_INSN(vsuxei512_v, MATCH_VSUXEI512_V, MASK_VSUXEI512_V)
+DECLARE_INSN(vsuxei1024_v, MATCH_VSUXEI1024_V, MASK_VSUXEI1024_V)
+DECLARE_INSN(vle8ff_v, MATCH_VLE8FF_V, MASK_VLE8FF_V)
+DECLARE_INSN(vle16ff_v, MATCH_VLE16FF_V, MASK_VLE16FF_V)
+DECLARE_INSN(vle32ff_v, MATCH_VLE32FF_V, MASK_VLE32FF_V)
+DECLARE_INSN(vle64ff_v, MATCH_VLE64FF_V, MASK_VLE64FF_V)
+DECLARE_INSN(vle128ff_v, MATCH_VLE128FF_V, MASK_VLE128FF_V)
+DECLARE_INSN(vle256ff_v, MATCH_VLE256FF_V, MASK_VLE256FF_V)
+DECLARE_INSN(vle512ff_v, MATCH_VLE512FF_V, MASK_VLE512FF_V)
+DECLARE_INSN(vle1024ff_v, MATCH_VLE1024FF_V, MASK_VLE1024FF_V)
DECLARE_INSN(vl1r_v, MATCH_VL1R_V, MASK_VL1R_V)
DECLARE_INSN(vs1r_v, MATCH_VS1R_V, MASK_VS1R_V)
DECLARE_INSN(vfadd_vf, MATCH_VFADD_VF, MASK_VFADD_VF)
diff --git a/riscv/insns/vlb_v.h b/riscv/insns/vlb_v.h
deleted file mode 100644
index f5aaa6e..0000000
--- a/riscv/insns/vlb_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vlb.v
-require(P.VU.vsew >= e8);
-VI_LD(0, i * nf + fn, int8, 1, false);
diff --git a/riscv/insns/vlbff_v.h b/riscv/insns/vlbff_v.h
deleted file mode 100644
index 06419dd..0000000
--- a/riscv/insns/vlbff_v.h
+++ /dev/null
@@ -1,2 +0,0 @@
-// vlbff.v
-VI_LDST_FF(int, 8, false);
diff --git a/riscv/insns/vlbu_v.h b/riscv/insns/vlbu_v.h
deleted file mode 100644
index 8f29679..0000000
--- a/riscv/insns/vlbu_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vlbu.v
-require(P.VU.vsew >= e8);
-VI_LD(0, i + fn, uint8, 1, false);
diff --git a/riscv/insns/vlbuff_v.h b/riscv/insns/vlbuff_v.h
deleted file mode 100644
index dbe3130..0000000
--- a/riscv/insns/vlbuff_v.h
+++ /dev/null
@@ -1,2 +0,0 @@
-// vlbuff.v
-VI_LDST_FF(uint, 8, false);
diff --git a/riscv/insns/vle16_v.h b/riscv/insns/vle16_v.h
new file mode 100644
index 0000000..7bd2e83
--- /dev/null
+++ b/riscv/insns/vle16_v.h
@@ -0,0 +1,2 @@
+// vle16.v and vlseg[2-8]e16.v
+VI_LD(0, (i * nf + fn), int16);
diff --git a/riscv/insns/vle16ff_v.h b/riscv/insns/vle16ff_v.h
new file mode 100644
index 0000000..53c8889
--- /dev/null
+++ b/riscv/insns/vle16ff_v.h
@@ -0,0 +1,2 @@
+// vle16ff.v and vlseg[2-8]e16ff.v
+VI_LDST_FF(int16);
diff --git a/riscv/insns/vle32_v.h b/riscv/insns/vle32_v.h
new file mode 100644
index 0000000..9399fd6
--- /dev/null
+++ b/riscv/insns/vle32_v.h
@@ -0,0 +1,2 @@
+// vle32.v and vlseg[2-8]e32.v
+VI_LD(0, (i * nf + fn), int32);
diff --git a/riscv/insns/vle32ff_v.h b/riscv/insns/vle32ff_v.h
new file mode 100644
index 0000000..7d03d7d
--- /dev/null
+++ b/riscv/insns/vle32ff_v.h
@@ -0,0 +1,2 @@
+// vle32ff.v and vlseg[2-8]e32ff.v
+VI_LDST_FF(int32);
diff --git a/riscv/insns/vle64_v.h b/riscv/insns/vle64_v.h
new file mode 100644
index 0000000..3f2654d
--- /dev/null
+++ b/riscv/insns/vle64_v.h
@@ -0,0 +1,2 @@
+// vle64.v and vlseg[2-8]e64.v
+VI_LD(0, (i * nf + fn), int64);
diff --git a/riscv/insns/vle64ff_v.h b/riscv/insns/vle64ff_v.h
new file mode 100644
index 0000000..39996da
--- /dev/null
+++ b/riscv/insns/vle64ff_v.h
@@ -0,0 +1,2 @@
+// vle64ff.v and vlseg[2-8]e64ff.v
+VI_LDST_FF(int64);
diff --git a/riscv/insns/vle8_v.h b/riscv/insns/vle8_v.h
new file mode 100644
index 0000000..5613a1d
--- /dev/null
+++ b/riscv/insns/vle8_v.h
@@ -0,0 +1,2 @@
+// vle8.v and vlseg[2-8]e8.v
+VI_LD(0, (i * nf + fn), int8);
diff --git a/riscv/insns/vle8ff_v.h b/riscv/insns/vle8ff_v.h
new file mode 100644
index 0000000..b56d1d3
--- /dev/null
+++ b/riscv/insns/vle8ff_v.h
@@ -0,0 +1,2 @@
+// vle8ff.v and vlseg[2-8]e8ff.v
+VI_LDST_FF(int8);
diff --git a/riscv/insns/vle_v.h b/riscv/insns/vle_v.h
deleted file mode 100644
index b733a3c..0000000
--- a/riscv/insns/vle_v.h
+++ /dev/null
@@ -1,13 +0,0 @@
-// vle.v and vlseg[2-8]e.v
-reg_t sew = P.VU.vsew;
-
-if (sew == e8) {
- VI_LD(0, (i * nf + fn), int8, 1, true);
-} else if (sew == e16) {
- VI_LD(0, (i * nf + fn), int16, 2, true);
-} else if (sew == e32) {
- VI_LD(0, (i * nf + fn), int32, 4, true);
-} else if (sew == e64) {
- VI_LD(0, (i * nf + fn), int64, 8, true);
-}
-
diff --git a/riscv/insns/vleff_v.h b/riscv/insns/vleff_v.h
deleted file mode 100644
index 09d0e58..0000000
--- a/riscv/insns/vleff_v.h
+++ /dev/null
@@ -1,12 +0,0 @@
-// vleff.v and vleseg[2-8]ff.v
-reg_t sew = P.VU.vsew;
-
-if (sew == e8) {
- VI_LDST_FF(int, 8, true);
-} else if (sew == e16) {
- VI_LDST_FF(int, 16, true);
-} else if (sew == e32) {
- VI_LDST_FF(int, 32, true);
-} else if (sew == e64) {
- VI_LDST_FF(int, 64, true);
-}
diff --git a/riscv/insns/vlh_v.h b/riscv/insns/vlh_v.h
deleted file mode 100644
index 3bdfd6e..0000000
--- a/riscv/insns/vlh_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vlh.v
-require(P.VU.vsew >= e16);
-VI_LD(0, i * nf + fn, int16, 2, false);
diff --git a/riscv/insns/vlhff_v.h b/riscv/insns/vlhff_v.h
deleted file mode 100644
index c003301..0000000
--- a/riscv/insns/vlhff_v.h
+++ /dev/null
@@ -1,2 +0,0 @@
-// vlh.v
-VI_LDST_FF(int, 16, false);
diff --git a/riscv/insns/vlhu_v.h b/riscv/insns/vlhu_v.h
deleted file mode 100644
index 98f3280..0000000
--- a/riscv/insns/vlhu_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vlhu.v
-require(P.VU.vsew >= e16);
-VI_LD(0, i * nf + fn, uint16, 2, false);
diff --git a/riscv/insns/vlhuff_v.h b/riscv/insns/vlhuff_v.h
deleted file mode 100644
index bed60ae..0000000
--- a/riscv/insns/vlhuff_v.h
+++ /dev/null
@@ -1,2 +0,0 @@
-// vlhuff.v
-VI_LDST_FF(uint, 16, false);
diff --git a/riscv/insns/vlsb_v.h b/riscv/insns/vlsb_v.h
deleted file mode 100644
index 1da0fc6..0000000
--- a/riscv/insns/vlsb_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vlsb.v
-require(P.VU.vsew >= e8);
-VI_LD(i * RS2, fn, int8, 1, false);
diff --git a/riscv/insns/vlsbu_v.h b/riscv/insns/vlsbu_v.h
deleted file mode 100644
index d92eb19..0000000
--- a/riscv/insns/vlsbu_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vlsb.v
-require(P.VU.vsew >= e8);
-VI_LD(i * RS2, fn, uint8, 1, false);
diff --git a/riscv/insns/vlse16_v.h b/riscv/insns/vlse16_v.h
new file mode 100644
index 0000000..7622ded
--- /dev/null
+++ b/riscv/insns/vlse16_v.h
@@ -0,0 +1,2 @@
+// vlse16.v and vlsseg[2-8]e16.v
+VI_LD(i * RS2, fn, int16);
diff --git a/riscv/insns/vlse32_v.h b/riscv/insns/vlse32_v.h
new file mode 100644
index 0000000..1afc5e9
--- /dev/null
+++ b/riscv/insns/vlse32_v.h
@@ -0,0 +1,2 @@
+// vlse32.v and vlsseg[2-8]e32.v
+VI_LD(i * RS2, fn, int32);
diff --git a/riscv/insns/vlse64_v.h b/riscv/insns/vlse64_v.h
new file mode 100644
index 0000000..c6d9999
--- /dev/null
+++ b/riscv/insns/vlse64_v.h
@@ -0,0 +1,2 @@
+// vlse64.v and vlsseg[2-8]e64.v
+VI_LD(i * RS2, fn, int64);
diff --git a/riscv/insns/vlse8_v.h b/riscv/insns/vlse8_v.h
new file mode 100644
index 0000000..021a1fb
--- /dev/null
+++ b/riscv/insns/vlse8_v.h
@@ -0,0 +1,2 @@
+// vlse8.v and vlsseg[2-8]e8.v
+VI_LD(i * RS2, fn, int8);
diff --git a/riscv/insns/vlse_v.h b/riscv/insns/vlse_v.h
deleted file mode 100644
index ff33dc9..0000000
--- a/riscv/insns/vlse_v.h
+++ /dev/null
@@ -1,13 +0,0 @@
-// vlse.v and vlsseg[2-8]e.v
-reg_t sew = P.VU.vsew;
-
-if (sew == e8) {
- VI_LD(i * RS2, fn, int8, 1, true);
-} else if (sew == e16) {
- VI_LD(i * RS2, fn, int16, 2, true);
-} else if (sew == e32) {
- VI_LD(i * RS2, fn, int32, 4, true);
-} else if (sew == e64) {
- VI_LD(i * RS2, fn, int64, 8, true);
-}
-
diff --git a/riscv/insns/vlsh_v.h b/riscv/insns/vlsh_v.h
deleted file mode 100644
index b9ab586..0000000
--- a/riscv/insns/vlsh_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vlsh.v
-require(P.VU.vsew >= e16);
-VI_LD(i * RS2, fn, int16, 2, false);
diff --git a/riscv/insns/vlshu_v.h b/riscv/insns/vlshu_v.h
deleted file mode 100644
index 9e61e53..0000000
--- a/riscv/insns/vlshu_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vlsh.v
-require(P.VU.vsew >= e16);
-VI_LD(i * RS2, fn, uint16, 2, false);
diff --git a/riscv/insns/vlsw_v.h b/riscv/insns/vlsw_v.h
deleted file mode 100644
index 14dba97..0000000
--- a/riscv/insns/vlsw_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vlsw.v
-require(P.VU.vsew >= e32);
-VI_LD(i * RS2, fn, int32, 4, false);
diff --git a/riscv/insns/vlswu_v.h b/riscv/insns/vlswu_v.h
deleted file mode 100644
index f509076..0000000
--- a/riscv/insns/vlswu_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vlsw.v
-require(P.VU.vsew >= e32);
-VI_LD(i * RS2, fn, uint32, 4, false);
diff --git a/riscv/insns/vlw_v.h b/riscv/insns/vlw_v.h
deleted file mode 100644
index 8970cd4..0000000
--- a/riscv/insns/vlw_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vlw.v
-require(P.VU.vsew >= e32);
-VI_LD(0, i * nf + fn, int32, 4, false);
diff --git a/riscv/insns/vlwff_v.h b/riscv/insns/vlwff_v.h
deleted file mode 100644
index ca224b6..0000000
--- a/riscv/insns/vlwff_v.h
+++ /dev/null
@@ -1,2 +0,0 @@
-// vlwff.v
-VI_LDST_FF(int, 32, false);
diff --git a/riscv/insns/vlwu_v.h b/riscv/insns/vlwu_v.h
deleted file mode 100644
index c1c77ff..0000000
--- a/riscv/insns/vlwu_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vlwu.v
-require(P.VU.vsew >= e32);
-VI_LD(0, i * nf + fn, uint32, 4, false);
diff --git a/riscv/insns/vlwuff_v.h b/riscv/insns/vlwuff_v.h
deleted file mode 100644
index eb2cba3..0000000
--- a/riscv/insns/vlwuff_v.h
+++ /dev/null
@@ -1,2 +0,0 @@
-// vlwuff.v
-VI_LDST_FF(uint, 32, false);
diff --git a/riscv/insns/vlxb_v.h b/riscv/insns/vlxb_v.h
deleted file mode 100644
index 449d918..0000000
--- a/riscv/insns/vlxb_v.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// vlxb.v
-require(P.VU.vsew >= e8);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-VI_LD_INDEX(index[i], fn, int8, 1, false);
diff --git a/riscv/insns/vlxbu_v.h b/riscv/insns/vlxbu_v.h
deleted file mode 100644
index 72735d6..0000000
--- a/riscv/insns/vlxbu_v.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// vlxbu.v
-require(P.VU.vsew >= e8);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-VI_LD_INDEX(index[i], fn, uint8, 1, false);
diff --git a/riscv/insns/vlxe_v.h b/riscv/insns/vlxe_v.h
deleted file mode 100644
index 889a19e..0000000
--- a/riscv/insns/vlxe_v.h
+++ /dev/null
@@ -1,12 +0,0 @@
-// vlxe.v and vlxseg[2-8]e.v
-reg_t sew = P.VU.vsew;
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-if (sew == e8) {
- VI_LD_INDEX(index[i], fn, int8, 1, true);
-} else if (sew == e16) {
- VI_LD_INDEX(index[i], fn, int16, 2, true);
-} else if (sew == e32) {
- VI_LD_INDEX(index[i], fn, int32, 4, true);
-} else if (sew == e64) {
- VI_LD_INDEX(index[i], fn, int64, 8, true);
-}
diff --git a/riscv/insns/vlxei16_v.h b/riscv/insns/vlxei16_v.h
new file mode 100644
index 0000000..0905b58
--- /dev/null
+++ b/riscv/insns/vlxei16_v.h
@@ -0,0 +1,24 @@
+// vlxei16.v and vlxseg[2-8]e16.v
+VI_DUPLICATE_VREG(insn.rs2(), e16);
+
+switch(P.VU.vsew) {
+ case e8: {
+ VI_LD_INDEX(index[i], fn, int8, true);
+ }
+ break;
+ case e16: {
+ VI_LD_INDEX(index[i], fn, int16, true);
+ }
+ break;
+ case e32: {
+ VI_LD_INDEX(index[i], fn, int32, true);
+ }
+ break;
+ case e64: {
+ VI_LD_INDEX(index[i], fn, int64, true);
+ }
+ break;
+ default:
+ require(0);
+ break;
+};
diff --git a/riscv/insns/vlxei32_v.h b/riscv/insns/vlxei32_v.h
new file mode 100644
index 0000000..a096cd3
--- /dev/null
+++ b/riscv/insns/vlxei32_v.h
@@ -0,0 +1,24 @@
+// vlxe32.v and vlxseg[2-8]ei32.v
+VI_DUPLICATE_VREG(insn.rs2(), e32);
+
+switch(P.VU.vsew) {
+ case e8: {
+ VI_LD_INDEX(index[i], fn, int8, true);
+ }
+ break;
+ case e16: {
+ VI_LD_INDEX(index[i], fn, int16, true);
+ }
+ break;
+ case e32: {
+ VI_LD_INDEX(index[i], fn, int32, true);
+ }
+ break;
+ case e64: {
+ VI_LD_INDEX(index[i], fn, int64, true);
+ }
+ break;
+ default:
+ require(0);
+ break;
+};
diff --git a/riscv/insns/vlxei64_v.h b/riscv/insns/vlxei64_v.h
new file mode 100644
index 0000000..0d04405
--- /dev/null
+++ b/riscv/insns/vlxei64_v.h
@@ -0,0 +1,24 @@
+// vlxei64.v and vlxseg[2-8]ei64.v
+VI_DUPLICATE_VREG(insn.rs2(), e64);
+
+switch(P.VU.vsew) {
+ case e8: {
+ VI_LD_INDEX(index[i], fn, int8, true);
+ }
+ break;
+ case e16: {
+ VI_LD_INDEX(index[i], fn, int16, true);
+ }
+ break;
+ case e32: {
+ VI_LD_INDEX(index[i], fn, int32, true);
+ }
+ break;
+ case e64: {
+ VI_LD_INDEX(index[i], fn, int64, true);
+ }
+ break;
+ default:
+ require(0);
+ break;
+};
diff --git a/riscv/insns/vlxei8_v.h b/riscv/insns/vlxei8_v.h
new file mode 100644
index 0000000..2db5047
--- /dev/null
+++ b/riscv/insns/vlxei8_v.h
@@ -0,0 +1,24 @@
+// vlxei8.v and vlxseg[2-8]ei8.v
+VI_DUPLICATE_VREG(insn.rs2(), e8);
+
+switch(P.VU.vsew) {
+ case e8: {
+ VI_LD_INDEX(index[i], fn, int8, true);
+ }
+ break;
+ case e16: {
+ VI_LD_INDEX(index[i], fn, int16, true);
+ }
+ break;
+ case e32: {
+ VI_LD_INDEX(index[i], fn, int32, true);
+ }
+ break;
+ case e64: {
+ VI_LD_INDEX(index[i], fn, int64, true);
+ }
+ break;
+ default:
+ require(0);
+ break;
+};
diff --git a/riscv/insns/vlxh_v.h b/riscv/insns/vlxh_v.h
deleted file mode 100644
index fd3a1fb..0000000
--- a/riscv/insns/vlxh_v.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// vlxh.v
-require(P.VU.vsew >= e16);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-VI_LD_INDEX(index[i], fn, int16, 2, false);
diff --git a/riscv/insns/vlxhu_v.h b/riscv/insns/vlxhu_v.h
deleted file mode 100644
index 25cb7ab..0000000
--- a/riscv/insns/vlxhu_v.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// vlxhu.v
-require(P.VU.vsew >= e16);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-VI_LD_INDEX(index[i], fn, uint16, 2, false);
diff --git a/riscv/insns/vlxw_v.h b/riscv/insns/vlxw_v.h
deleted file mode 100644
index 8bc613b..0000000
--- a/riscv/insns/vlxw_v.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// vlxw.v
-require(P.VU.vsew >= e32);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-VI_LD_INDEX(index[i], fn, int32, 4, false);
diff --git a/riscv/insns/vlxwu_v.h b/riscv/insns/vlxwu_v.h
deleted file mode 100644
index bccec95..0000000
--- a/riscv/insns/vlxwu_v.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// vlxwu.v
-require(P.VU.vsew >= e32);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-VI_LD_INDEX(index[i], fn, uint32, 4, false);
diff --git a/riscv/insns/vsb_v.h b/riscv/insns/vsb_v.h
deleted file mode 100644
index c5830cc..0000000
--- a/riscv/insns/vsb_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vsb.v
-require(P.VU.vsew >= e8);
-VI_ST(0, i * nf + fn, uint8, 1, false);
diff --git a/riscv/insns/vse16_v.h b/riscv/insns/vse16_v.h
new file mode 100644
index 0000000..20b04c8
--- /dev/null
+++ b/riscv/insns/vse16_v.h
@@ -0,0 +1,2 @@
+// vse16.v and vsseg[2-8]e16.v
+VI_ST(0, (i * nf + fn), uint16);
diff --git a/riscv/insns/vse32_v.h b/riscv/insns/vse32_v.h
new file mode 100644
index 0000000..efd2973
--- /dev/null
+++ b/riscv/insns/vse32_v.h
@@ -0,0 +1,2 @@
+// vse32.v and vsseg[2-8]e32.v
+VI_ST(0, (i * nf + fn), uint32);
diff --git a/riscv/insns/vse64_v.h b/riscv/insns/vse64_v.h
new file mode 100644
index 0000000..9b36c8d
--- /dev/null
+++ b/riscv/insns/vse64_v.h
@@ -0,0 +1,2 @@
+// vse64.v and vsseg[2-8]e64.v
+VI_ST(0, (i * nf + fn), uint64);
diff --git a/riscv/insns/vse8_v.h b/riscv/insns/vse8_v.h
new file mode 100644
index 0000000..32dee14
--- /dev/null
+++ b/riscv/insns/vse8_v.h
@@ -0,0 +1,2 @@
+// vse8.v and vsseg[2-8]e8.v
+VI_ST(0, (i * nf + fn), uint8);
diff --git a/riscv/insns/vsh_v.h b/riscv/insns/vsh_v.h
deleted file mode 100644
index 559c93e..0000000
--- a/riscv/insns/vsh_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vsh.v
-require(P.VU.vsew >= e16);
-VI_ST(0, i * nf + fn, uint16, 2, false);
diff --git a/riscv/insns/vssb_v.h b/riscv/insns/vssb_v.h
deleted file mode 100644
index 337b7c0..0000000
--- a/riscv/insns/vssb_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vssb.v
-require(P.VU.vsew >= e8);
-VI_ST(i * RS2, fn, uint8, 1, false);
diff --git a/riscv/insns/vsse16_v.h b/riscv/insns/vsse16_v.h
new file mode 100644
index 0000000..adbbcf5
--- /dev/null
+++ b/riscv/insns/vsse16_v.h
@@ -0,0 +1,2 @@
+// vsse16v and vssseg[2-8]e16.v
+VI_ST(i * RS2, fn, uint16);
diff --git a/riscv/insns/vsse32_v.h b/riscv/insns/vsse32_v.h
new file mode 100644
index 0000000..73bd272
--- /dev/null
+++ b/riscv/insns/vsse32_v.h
@@ -0,0 +1,2 @@
+// vsse32.v and vssseg[2-8]e32.v
+VI_ST(i * RS2, fn, uint32);
diff --git a/riscv/insns/vsse64_v.h b/riscv/insns/vsse64_v.h
new file mode 100644
index 0000000..1785a56
--- /dev/null
+++ b/riscv/insns/vsse64_v.h
@@ -0,0 +1,2 @@
+// vsse64.v and vssseg[2-8]e64.v
+VI_ST(i * RS2, fn, uint64);
diff --git a/riscv/insns/vsse8_v.h b/riscv/insns/vsse8_v.h
new file mode 100644
index 0000000..c5daf0b
--- /dev/null
+++ b/riscv/insns/vsse8_v.h
@@ -0,0 +1,2 @@
+// vsse8.v and vssseg[2-8]e8.v
+VI_ST(i * RS2, fn, uint8);
diff --git a/riscv/insns/vsse_v.h b/riscv/insns/vsse_v.h
deleted file mode 100644
index a682e2b..0000000
--- a/riscv/insns/vsse_v.h
+++ /dev/null
@@ -1,13 +0,0 @@
-// vsse.v and vssseg[2-8]e.v
-reg_t sew = P.VU.vsew;
-
-if (sew == e8) {
- VI_ST(i * RS2, fn, uint8, 1, true);
-} else if (sew == e16) {
- VI_ST(i * RS2, fn, uint16, 2, true);
-} else if (sew == e32) {
- VI_ST(i * RS2, fn, uint32, 4, true);
-} else if (sew == e64) {
- VI_ST(i * RS2, fn, uint64, 8, true);
-}
-
diff --git a/riscv/insns/vssh_v.h b/riscv/insns/vssh_v.h
deleted file mode 100644
index fcdcbda..0000000
--- a/riscv/insns/vssh_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vssh.v
-require(P.VU.vsew >= e16);
-VI_ST(i * RS2, fn, uint16, 2, false);
diff --git a/riscv/insns/vssw_v.h b/riscv/insns/vssw_v.h
deleted file mode 100644
index ceae158..0000000
--- a/riscv/insns/vssw_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vssw.v
-require(P.VU.vsew >= e32);
-VI_ST(i * RS2, fn, uint32, 4, false);
diff --git a/riscv/insns/vsuxb_v.h b/riscv/insns/vsuxb_v.h
deleted file mode 100644
index 779d37d..0000000
--- a/riscv/insns/vsuxb_v.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// vsuxb.v
-require(P.VU.vsew >= e8);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-VI_ST_INDEX(index[i], fn, uint8, 1, false);
diff --git a/riscv/insns/vsuxe_v.h b/riscv/insns/vsuxe_v.h
deleted file mode 100644
index 1434876..0000000
--- a/riscv/insns/vsuxe_v.h
+++ /dev/null
@@ -1,13 +0,0 @@
-// vsuxe.v
-reg_t sew = P.VU.vsew;
-require(sew >= e8 && sew <= e64);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-if (sew == e8) {
- VI_ST_INDEX(index[i], fn, uint8, 1, false);
-} else if (sew == e16) {
- VI_ST_INDEX(index[i], fn, uint16, 2, false);
-} else if (sew == e32) {
- VI_ST_INDEX(index[i], fn, uint32, 4, false);
-} else if (sew == e64) {
- VI_ST_INDEX(index[i], fn, uint64, 8, false);
-}
diff --git a/riscv/insns/vsuxei16_v.h b/riscv/insns/vsuxei16_v.h
new file mode 100644
index 0000000..98435f6
--- /dev/null
+++ b/riscv/insns/vsuxei16_v.h
@@ -0,0 +1,24 @@
+// vsuxe16.v
+VI_DUPLICATE_VREG(insn.rs2(), 16);
+
+switch(P.VU.vsew) {
+ case e8: {
+ VI_ST_INDEX(index[i], fn, uint8, false);
+ }
+ break;
+ case e16: {
+ VI_ST_INDEX(index[i], fn, uint16, false);
+ }
+ break;
+ case e32: {
+ VI_ST_INDEX(index[i], fn, uint32, false);
+ }
+ break;
+ case e64: {
+ VI_ST_INDEX(index[i], fn, uint64, false);
+ }
+ break;
+ default:
+ require(0);
+ break;
+};
diff --git a/riscv/insns/vsuxei32_v.h b/riscv/insns/vsuxei32_v.h
new file mode 100644
index 0000000..cdfb592
--- /dev/null
+++ b/riscv/insns/vsuxei32_v.h
@@ -0,0 +1,24 @@
+// vsuxe32.v
+VI_DUPLICATE_VREG(insn.rs2(), 32);
+
+switch(P.VU.vsew) {
+ case e8: {
+ VI_ST_INDEX(index[i], fn, uint8, false);
+ }
+ break;
+ case e16: {
+ VI_ST_INDEX(index[i], fn, uint16, false);
+ }
+ break;
+ case e32: {
+ VI_ST_INDEX(index[i], fn, uint32, false);
+ }
+ break;
+ case e64: {
+ VI_ST_INDEX(index[i], fn, uint64, false);
+ }
+ break;
+ default:
+ require(0);
+ break;
+};
diff --git a/riscv/insns/vsuxei64_v.h b/riscv/insns/vsuxei64_v.h
new file mode 100644
index 0000000..ed7cb50
--- /dev/null
+++ b/riscv/insns/vsuxei64_v.h
@@ -0,0 +1,24 @@
+// vsuxe64.v
+VI_DUPLICATE_VREG(insn.rs2(), 64);
+
+switch(P.VU.vsew) {
+ case e8: {
+ VI_ST_INDEX(index[i], fn, uint8, false);
+ }
+ break;
+ case e16: {
+ VI_ST_INDEX(index[i], fn, uint16, false);
+ }
+ break;
+ case e32: {
+ VI_ST_INDEX(index[i], fn, uint32, false);
+ }
+ break;
+ case e64: {
+ VI_ST_INDEX(index[i], fn, uint64, false);
+ }
+ break;
+ default:
+ require(0);
+ break;
+};
diff --git a/riscv/insns/vsuxei8_v.h b/riscv/insns/vsuxei8_v.h
new file mode 100644
index 0000000..ff7bde0
--- /dev/null
+++ b/riscv/insns/vsuxei8_v.h
@@ -0,0 +1,24 @@
+// vsuxe8.v
+VI_DUPLICATE_VREG(insn.rs2(), 8);
+
+switch(P.VU.vsew) {
+ case e8: {
+ VI_ST_INDEX(index[i], fn, uint8, false);
+ }
+ break;
+ case e16: {
+ VI_ST_INDEX(index[i], fn, uint16, false);
+ }
+ break;
+ case e32: {
+ VI_ST_INDEX(index[i], fn, uint32, false);
+ }
+ break;
+ case e64: {
+ VI_ST_INDEX(index[i], fn, uint64, false);
+ }
+ break;
+ default:
+ require(0);
+ break;
+};
diff --git a/riscv/insns/vsuxh_v.h b/riscv/insns/vsuxh_v.h
deleted file mode 100644
index ac89fd9..0000000
--- a/riscv/insns/vsuxh_v.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// vsuxh.v
-require(P.VU.vsew >= e16);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-VI_ST_INDEX(index[i], fn, uint16, 2, false);
diff --git a/riscv/insns/vsuxw_v.h b/riscv/insns/vsuxw_v.h
deleted file mode 100644
index 4787c29..0000000
--- a/riscv/insns/vsuxw_v.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// vsuxw.v
-require(P.VU.vsew >= e32);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-VI_ST_INDEX(index[i], fn, uint32, 4, false);
diff --git a/riscv/insns/vsw_v.h b/riscv/insns/vsw_v.h
deleted file mode 100644
index 62b816a..0000000
--- a/riscv/insns/vsw_v.h
+++ /dev/null
@@ -1,3 +0,0 @@
-// vsw.v
-require(P.VU.vsew >= e32);
-VI_ST(0, i * nf + fn, uint32, 4, false);
diff --git a/riscv/insns/vsxb_v.h b/riscv/insns/vsxb_v.h
deleted file mode 100644
index aa8ca71..0000000
--- a/riscv/insns/vsxb_v.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// vsxb.v
-require(P.VU.vsew >= e8);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-VI_ST_INDEX(index[i], fn, uint8, 1, false);
diff --git a/riscv/insns/vsxe_v.h b/riscv/insns/vsxe_v.h
deleted file mode 100644
index d4b2457..0000000
--- a/riscv/insns/vsxe_v.h
+++ /dev/null
@@ -1,14 +0,0 @@
-// vsxe.v and vsxseg[2-8]e.v
-reg_t sew = P.VU.vsew;
-require(sew >= e8 && sew <= e64);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-if (sew == e8) {
- VI_ST_INDEX(index[i], fn, uint8, 1, true);
-} else if (sew == e16) {
- VI_ST_INDEX(index[i], fn, uint16, 2, true);
-} else if (sew == e32) {
- VI_ST_INDEX(index[i], fn, uint32, 4, true);
-} else if (sew == e64) {
- VI_ST_INDEX(index[i], fn, uint64, 8, true);
-}
-
diff --git a/riscv/insns/vsxei16_v.h b/riscv/insns/vsxei16_v.h
new file mode 100644
index 0000000..72bb8e9
--- /dev/null
+++ b/riscv/insns/vsxei16_v.h
@@ -0,0 +1,24 @@
+// vsxei16.v and vsxseg[2-8]ei16.v
+VI_DUPLICATE_VREG(insn.rs2(), 16);
+
+switch(P.VU.vsew) {
+ case e8: {
+ VI_ST_INDEX(index[i], fn, uint8, true);
+ }
+ break;
+ case e16: {
+ VI_ST_INDEX(index[i], fn, uint16, true);
+ }
+ break;
+ case e32: {
+ VI_ST_INDEX(index[i], fn, uint32, true);
+ }
+ break;
+ case e64: {
+ VI_ST_INDEX(index[i], fn, uint64, true);
+ }
+ break;
+ default:
+ require(0);
+ break;
+};
diff --git a/riscv/insns/vsxei32_v.h b/riscv/insns/vsxei32_v.h
new file mode 100644
index 0000000..296c8bb
--- /dev/null
+++ b/riscv/insns/vsxei32_v.h
@@ -0,0 +1,24 @@
+// vsxei32.v and vsxseg[2-8]ei32.v
+VI_DUPLICATE_VREG(insn.rs2(), 32);
+
+switch(P.VU.vsew) {
+ case e8: {
+ VI_ST_INDEX(index[i], fn, uint8, true);
+ }
+ break;
+ case e16: {
+ VI_ST_INDEX(index[i], fn, uint16, true);
+ }
+ break;
+ case e32: {
+ VI_ST_INDEX(index[i], fn, uint32, true);
+ }
+ break;
+ case e64: {
+ VI_ST_INDEX(index[i], fn, uint64, true);
+ }
+ break;
+ default:
+ require(0);
+ break;
+};
diff --git a/riscv/insns/vsxei64_v.h b/riscv/insns/vsxei64_v.h
new file mode 100644
index 0000000..b5d286f
--- /dev/null
+++ b/riscv/insns/vsxei64_v.h
@@ -0,0 +1,24 @@
+// vsxei64.v and vsxseg[2-8]ei64.v
+VI_DUPLICATE_VREG(insn.rs2(), 64);
+
+switch(P.VU.vsew) {
+ case e8: {
+ VI_ST_INDEX(index[i], fn, uint8, true);
+ }
+ break;
+ case e16: {
+ VI_ST_INDEX(index[i], fn, uint16, true);
+ }
+ break;
+ case e32: {
+ VI_ST_INDEX(index[i], fn, uint32, true);
+ }
+ break;
+ case e64: {
+ VI_ST_INDEX(index[i], fn, uint64, true);
+ }
+ break;
+ default:
+ require(0);
+ break;
+};
diff --git a/riscv/insns/vsxei8_v.h b/riscv/insns/vsxei8_v.h
new file mode 100644
index 0000000..355223c
--- /dev/null
+++ b/riscv/insns/vsxei8_v.h
@@ -0,0 +1,24 @@
+// vsxei8.v and vsxseg[2-8]ei8.v
+VI_DUPLICATE_VREG(insn.rs2(), 8);
+
+switch(P.VU.vsew) {
+ case e8: {
+ VI_ST_INDEX(index[i], fn, uint8, true);
+ }
+ break;
+ case e16: {
+ VI_ST_INDEX(index[i], fn, uint16, true);
+ }
+ break;
+ case e32: {
+ VI_ST_INDEX(index[i], fn, uint32, true);
+ }
+ break;
+ case e64: {
+ VI_ST_INDEX(index[i], fn, uint64, true);
+ }
+ break;
+ default:
+ require(0);
+ break;
+};
diff --git a/riscv/insns/vsxh_v.h b/riscv/insns/vsxh_v.h
deleted file mode 100644
index 5f292f8..0000000
--- a/riscv/insns/vsxh_v.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// vsxh.v
-require(P.VU.vsew >= e16);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-VI_ST_INDEX(index[i], fn, uint16, 2, false);
diff --git a/riscv/insns/vsxw_v.h b/riscv/insns/vsxw_v.h
deleted file mode 100644
index 029347f..0000000
--- a/riscv/insns/vsxw_v.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// vsxw.v
-require(P.VU.vsew >= e32);
-VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
-VI_ST_INDEX(index[i], fn, uint32, 4, false);
diff --git a/riscv/riscv.mk.in b/riscv/riscv.mk.in
index 7a9b1d5..e597427 100644
--- a/riscv/riscv.mk.in
+++ b/riscv/riscv.mk.in
@@ -646,52 +646,40 @@ riscv_insn_ext_v_alu_fp = \
vmfne_vv \
riscv_insn_ext_v_ldst = \
+ vle8_v \
+ vle16_v \
+ vle32_v \
+ vle64_v \
+ vlse8_v \
+ vlse16_v \
+ vlse32_v \
+ vlse64_v \
+ vlxei8_v \
+ vlxei16_v \
+ vlxei32_v \
+ vlxei64_v \
+ vle8ff_v \
+ vle16ff_v \
+ vle32ff_v \
+ vle64ff_v \
+ vse8_v \
+ vse16_v \
+ vse32_v \
+ vse64_v \
+ vsse8_v \
+ vsse16_v \
+ vsse32_v \
+ vsse64_v \
+ vsxei8_v \
+ vsxei16_v \
+ vsxei32_v \
+ vsxei64_v \
+ vsuxei8_v \
+ vsuxei16_v \
+ vsuxei32_v \
+ vsuxei64_v \
vl1r_v \
- vlb_v \
- vlh_v \
- vlw_v \
- vle_v \
- vlbu_v \
- vlhu_v \
- vlwu_v \
- vlsb_v \
- vlsh_v \
- vlsw_v \
- vlse_v \
- vlxb_v \
- vlxh_v \
- vlxw_v \
- vlxe_v \
- vlsbu_v \
- vlshu_v \
- vlswu_v \
- vlxbu_v \
- vlxhu_v \
- vlxwu_v \
- vlbff_v \
- vlhff_v \
- vlwff_v \
- vleff_v \
- vlbuff_v \
- vlhuff_v \
- vlwuff_v \
vs1r_v \
- vsb_v \
- vsh_v \
- vsw_v \
- vse_v \
- vssb_v \
- vssh_v \
- vssw_v \
- vsse_v \
- vsxb_v \
- vsxh_v \
- vsxw_v \
- vsxe_v \
- vsuxb_v \
- vsuxh_v \
- vsuxw_v \
- vsuxe_v \
riscv_insn_ext_v_ctrl = \
vsetvli \
diff --git a/spike_main/disasm.cc b/spike_main/disasm.cc
index 8056ca9..60a31e6 100644
--- a/spike_main/disasm.cc
+++ b/spike_main/disasm.cc
@@ -326,6 +326,12 @@ struct : public arg_t {
}
} v_vtype;
+typedef struct {
+ reg_t match;
+ reg_t mask;
+ const char *fmt;
+ std::vector<const arg_t*>& arg;
+} ldst_seg_t;
std::string disassembler_t::disassemble(insn_t insn) const
{
@@ -345,6 +351,7 @@ disassembler_t::disassembler_t(int xlen)
const uint32_t mask_rvc_rs2 = 0x1fUL << 2;
const uint32_t mask_rvc_imm = mask_rvc_rs2 | 0x1000UL;
const uint32_t mask_nf = 0x7Ul << 29;
+ const uint32_t mask_vldst = 0x7Ul << 12 | 0x1UL << 28;
#define DECLARE_INSN(code, match, mask) \
const uint32_t match_##code = match; \
@@ -699,78 +706,64 @@ disassembler_t::disassembler_t(int xlen)
DISASM_INSN("vsetvli", vsetvli, 0, {&xrd, &xrs1, &v_vtype});
DISASM_INSN("vsetvl", vsetvl, 0, {&xrd, &xrs1, &xrs2});
- #define DISASM_VMEM_LD_INSN(name, ff, fmt) \
- add_insn(new disasm_insn_t("vl" #name "b" #ff ".v", match_vl##name##b##ff##_v, mask_vl##name##b##ff##_v | mask_nf, fmt)); \
- add_insn(new disasm_insn_t("vl" #name "h" #ff ".v", match_vl##name##h##ff##_v, mask_vl##name##h##ff##_v | mask_nf, fmt)); \
- add_insn(new disasm_insn_t("vl" #name "w" #ff ".v", match_vl##name##w##ff##_v, mask_vl##name##w##ff##_v | mask_nf, fmt)); \
- add_insn(new disasm_insn_t("vl" #name "e" #ff ".v", match_vl##name##e##ff##_v, mask_vl##name##e##ff##_v | mask_nf, fmt)); \
- add_insn(new disasm_insn_t("vl" #name "bu" #ff ".v", match_vl##name##bu##ff##_v, mask_vl##name##bu##ff##_v | mask_nf, fmt)); \
- add_insn(new disasm_insn_t("vl" #name "hu" #ff ".v", match_vl##name##hu##ff##_v, mask_vl##name##hu##ff##_v | mask_nf, fmt)); \
- add_insn(new disasm_insn_t("vl" #name "wu" #ff ".v", match_vl##name##wu##ff##_v, mask_vl##name##wu##ff##_v | mask_nf, fmt));
-
- #define DISASM_VMEM_ST_INSN(name, fmt) \
- add_insn(new disasm_insn_t("vs" #name "b.v", match_vs##name##b_v, mask_vs##name##b_v | mask_nf, fmt)); \
- add_insn(new disasm_insn_t("vs" #name "h.v", match_vs##name##h_v, mask_vs##name##h_v | mask_nf, fmt)); \
- add_insn(new disasm_insn_t("vs" #name "w.v", match_vs##name##w_v, mask_vs##name##w_v | mask_nf, fmt)); \
- add_insn(new disasm_insn_t("vs" #name "e.v", match_vs##name##e_v, mask_vs##name##e_v | mask_nf, fmt));
-
- const std::vector<const arg_t *> v_ld_unit = {&vd, &v_address, &opt, &vm};
- const std::vector<const arg_t *> v_st_unit = {&vs3, &v_address, &opt, &vm};
- const std::vector<const arg_t *> v_ld_stride = {&vd, &v_address, &xrs2, &opt, &vm};
- const std::vector<const arg_t *> v_st_stride = {&vs3, &v_address, &xrs2, &opt, &vm};
- const std::vector<const arg_t *> v_ld_index = {&vd, &v_address, &vs2, &opt, &vm};
- const std::vector<const arg_t *> v_st_index = {&vs3, &v_address, &vs2, &opt, &vm};
-
- DISASM_VMEM_LD_INSN( , , v_ld_unit);
- DISASM_VMEM_ST_INSN( , v_st_unit);
- DISASM_VMEM_LD_INSN(s, , v_ld_stride);
- DISASM_VMEM_ST_INSN(s, v_st_stride);
- DISASM_VMEM_LD_INSN(x, , v_ld_index);
- DISASM_VMEM_ST_INSN(x, v_st_index);
- DISASM_VMEM_LD_INSN( , ff, v_ld_unit);
-
- #undef DISASM_VMEM_LD_INSN
- #undef DISASM_VMEM_ST_INSN
+ #define DISASM_VMEM_INSN(name, fmt, ff) \
+ add_insn(new disasm_insn_t(#name "8" #ff ".v", match_##name##8##ff##_v, mask_##name##8##ff##_v | mask_nf, fmt)); \
+ add_insn(new disasm_insn_t(#name "16" #ff ".v", match_##name##16##ff##_v, mask_##name##16##ff##_v | mask_nf, fmt)); \
+ add_insn(new disasm_insn_t(#name "32" #ff ".v", match_##name##32##ff##_v, mask_##name##32##ff##_v | mask_nf, fmt)); \
+ add_insn(new disasm_insn_t(#name "64" #ff ".v", match_##name##64##ff##_v, mask_##name##64##ff##_v | mask_nf, fmt)); \
+ add_insn(new disasm_insn_t(#name "128" #ff ".v", match_##name##128##ff##_v, mask_##name##128##ff##_v | mask_nf, fmt)); \
+ add_insn(new disasm_insn_t(#name "256" #ff ".v", match_##name##256##ff##_v, mask_##name##256##ff##_v | mask_nf, fmt)); \
+ add_insn(new disasm_insn_t(#name "512" #ff ".v", match_##name##512##ff##_v, mask_##name##512##ff##_v | mask_nf, fmt)); \
+ add_insn(new disasm_insn_t(#name "1024" #ff ".v", match_##name##1024##ff##_v, mask_##name##1024##ff##_v | mask_nf, fmt)); \
+
+ std::vector<const arg_t *> v_ld_unit = {&vd, &v_address, &opt, &vm};
+ std::vector<const arg_t *> v_st_unit = {&vs3, &v_address, &opt, &vm};
+ std::vector<const arg_t *> v_ld_stride = {&vd, &v_address, &xrs2, &opt, &vm};
+ std::vector<const arg_t *> v_st_stride = {&vs3, &v_address, &xrs2, &opt, &vm};
+ std::vector<const arg_t *> v_ld_index = {&vd, &v_address, &vs2, &opt, &vm};
+ std::vector<const arg_t *> v_st_index = {&vs3, &v_address, &vs2, &opt, &vm};
+
+ DISASM_VMEM_INSN(vle, v_ld_unit, );
+ DISASM_VMEM_INSN(vlse, v_ld_stride, );
+ DISASM_VMEM_INSN(vlxei, v_ld_index, );
+ DISASM_VMEM_INSN(vle, v_ld_unit, ff);
+ DISASM_VMEM_INSN(vse, v_st_unit, );
+ DISASM_VMEM_INSN(vsse, v_st_stride, );
+ DISASM_VMEM_INSN(vsxei, v_st_index, );
+ DISASM_VMEM_INSN(vsuxei, v_st_unit, );
+
+ #undef DISASM_VMEM_INSN
// handle vector segment load/store
- for (size_t nf = 1; nf <= 7; ++nf) {
- std::pair<reg_t, reg_t> insn_code[] = {
- {match_vle_v, mask_vle_v},
- {match_vse_v, mask_vse_v},
+ for (size_t elt = 0; elt <= 7; ++elt) {
+ const ldst_seg_t template_insn[] = {
+ {match_vle8_v, mask_vle8_v, "vlseg%de%d.v", v_ld_unit},
+ {match_vse8_v, mask_vse8_v, "vsseg%de%d.v", v_st_unit},
- {match_vlse_v, mask_vlse_v},
- {match_vsse_v, mask_vssw_v},
+ {match_vlse8_v, mask_vlse8_v, "vlsseg%de%d.v", v_ld_stride},
+ {match_vsse8_v, mask_vsse8_v, "vssseg%de%d.v", v_st_stride},
- {match_vlxe_v, mask_vlxe_v},
- {match_vsxe_v, mask_vsxw_v},
+ {match_vlxei8_v, mask_vlxei8_v, "vlxseg%dei%d.v", v_ld_index},
+ {match_vsxei8_v, mask_vsxei8_v, "vsxseg%dei%d.v", v_st_index},
- {match_vleff_v, mask_vleff_v},
+ {match_vle8ff_v, mask_vle8ff_v, "vlseg%de%dff.v", v_ld_unit}
};
- std::pair<const char *, std::vector<const arg_t*>> fmts[] = {
- {"vlseg%de.v", {&vd, &v_address, &opt, &vm}},
- {"vsseg%de.v", {&vs3, &v_address, &opt, &vm}},
-
- {"vlsseg%de.v", {&vd, &v_address, &xrs2, &opt, &vm}},
- {"vssseg%de.v", {&vs3, &v_address, &xrs2, &opt, &vm}},
-
- {"vlxseg%de.v", {&vd, &v_address, &vs2, &opt, &vm}},
- {"vsxseg%de.v", {&vs3, &v_address, &vs2, &opt, &vm}},
-
- {"vlseg%deff.v", {&vd, &v_address, &opt, &vm}},
- };
-
-
-
- for (size_t idx_insn = 0; idx_insn < sizeof(insn_code) / sizeof(insn_code[0]); ++idx_insn) {
- const reg_t match_nf = nf << 29;
- char buf[128];
- sprintf(buf, fmts[idx_insn].first, nf + 1);
- add_insn(new disasm_insn_t(buf,
- insn_code[idx_insn].first | match_nf,
- insn_code[idx_insn].second | mask_nf,
- fmts[idx_insn].second
- ));
+ reg_t elt_map[] = {0x00000000, 0x00005000, 0x00006000, 0x00007000,
+ 0x10000000, 0x10005000, 0x10006000, 0x10007000};
+
+ for (size_t nf = 1; nf <= 7; ++nf) {
+ for (auto item : template_insn) {
+ const reg_t match_nf = nf << 29;
+ char buf[128];
+ sprintf(buf, item.fmt, nf + 1, 8 << elt);
+ add_insn(new disasm_insn_t(
+ buf,
+ ((item.match | match_nf) & ~mask_vldst) | elt_map[elt],
+ item.mask | mask_nf,
+ item.arg
+ ));
+ }
}
}