aboutsummaryrefslogtreecommitdiff
path: root/riscv/decode.h
diff options
context:
space:
mode:
authorChih-Min Chao <chihmin.chao@sifive.com>2019-10-07 02:27:37 -0700
committerChih-Min Chao <chihmin.chao@sifive.com>2019-11-11 19:02:34 -0800
commit1cd989add98a8796bf57902853e911608aa737dd (patch)
tree0e5e6ddc16b7bf74ef83ce7e818a72089320e944 /riscv/decode.h
parent60e3ed49523c2a1af8b7a115593da42d15515732 (diff)
downloadriscv-isa-sim-1cd989add98a8796bf57902853e911608aa737dd.zip
riscv-isa-sim-1cd989add98a8796bf57902853e911608aa737dd.tar.gz
riscv-isa-sim-1cd989add98a8796bf57902853e911608aa737dd.tar.bz2
rvv: add register using check for wide and narrow insn
include 1. narrow shift 2. narrow clip 3. wide mac Signed-off-by: Chih-Min Chao <chihmin.chao@sifive.com>
Diffstat (limited to 'riscv/decode.h')
-rw-r--r--riscv/decode.h81
1 files changed, 48 insertions, 33 deletions
diff --git a/riscv/decode.h b/riscv/decode.h
index 72e2dc8..490c2c7 100644
--- a/riscv/decode.h
+++ b/riscv/decode.h
@@ -356,7 +356,7 @@ inline long double to_f(float128_t f){long double r; memcpy(&r, &f, sizeof(r));
//
// vector: operation and register acccess check helper
//
-static inline bool is_overlaped(const int astart, const int asize,
+static inline bool is_overlapped(const int astart, const int asize,
const int bstart, const int bsize)
{
const int aend = astart + asize;
@@ -365,38 +365,54 @@ static inline bool is_overlaped(const int astart, const int asize,
}
#define VI_NARROW_CHECK_COMMON \
+ require_vector;\
require(P.VU.vlmul <= 4); \
require(P.VU.vsew * 2 <= P.VU.ELEN); \
- require(insn.rs2() + P.VU.vlmul * 2 <= 32);
+ require((insn.rs2() & (P.VU.vlmul * 2 - 1)) == 0); \
+ require((insn.rd() & (P.VU.vlmul - 1)) == 0); \
+ if (insn.v_vm() == 0 && P.VU.vlmul > 1) \
+ require(insn.rd() != 0);
#define VI_WIDE_CHECK_COMMON \
require_vector;\
require(P.VU.vlmul <= 4); \
require(P.VU.vsew * 2 <= P.VU.ELEN); \
- require(insn.rd() + P.VU.vlmul * 2 <= 32); \
+ require((insn.rd() & (P.VU.vlmul * 2 - 1)) == 0); \
if (insn.v_vm() == 0) \
require(insn.rd() != 0);
#define VI_CHECK_VREG_OVERLAP(v1, v2) \
- require(!is_overlaped(v1, P.VU.vlmul, v2, P.VU.vlmul));
+ require(!is_overlapped(v1, P.VU.vlmul, v2, P.VU.vlmul));
#define VI_CHECK_SS \
- require(!is_overlaped(insn.rd(), P.VU.vlmul, insn.rs2(), P.VU.vlmul));
+ require(!is_overlapped(insn.rd(), P.VU.vlmul, insn.rs2(), P.VU.vlmul));
#define VI_CHECK_SD \
- require(!is_overlaped(insn.rd(), P.VU.vlmul, insn.rs2(), P.VU.vlmul * 2));
+ require(!is_overlapped(insn.rd(), P.VU.vlmul, insn.rs2(), P.VU.vlmul * 2));
-#define VI_CHECK_DSS(is_rs) \
+#define VI_CHECK_DSS(is_vs1) \
VI_WIDE_CHECK_COMMON; \
- require(!is_overlaped(insn.rd(), P.VU.vlmul * 2, insn.rs2(), P.VU.vlmul)); \
- if (is_rs) \
- require(!is_overlaped(insn.rd(), P.VU.vlmul * 2, insn.rs1(), P.VU.vlmul));
+ require(!is_overlapped(insn.rd(), P.VU.vlmul * 2, insn.rs2(), P.VU.vlmul)); \
+ require((insn.rs2() & (P.VU.vlmul - 1)) == 0); \
+ if (is_vs1) {\
+ require(!is_overlapped(insn.rd(), P.VU.vlmul * 2, insn.rs1(), P.VU.vlmul)); \
+ require((insn.rs1() & (P.VU.vlmul - 1)) == 0); \
+ }
#define VI_CHECK_DDS(is_rs) \
VI_WIDE_CHECK_COMMON; \
- require(insn.rs2() + P.VU.vlmul * 2 <= 32); \
- if (is_rs) \
- require(!is_overlaped(insn.rd(), P.VU.vlmul * 2, insn.rs1(), P.VU.vlmul));
+ require((insn.rs2() & (P.VU.vlmul * 2 - 1)) == 0); \
+ if (is_rs) { \
+ require(!is_overlapped(insn.rd(), P.VU.vlmul * 2, insn.rs1(), P.VU.vlmul)); \
+ require((insn.rs1() & (P.VU.vlmul - 1)) == 0); \
+ }
+
+#define VI_CHECK_SDS(is_vs1) \
+ VI_NARROW_CHECK_COMMON; \
+ require(!is_overlapped(insn.rd(), P.VU.vlmul, insn.rs2(), P.VU.vlmul * 2)); \
+ if (is_vs1) \
+ require((insn.rs1() & (P.VU.vlmul - 1)) == 0); \
+
//
// vector: loop header and end helper
@@ -465,10 +481,6 @@ static inline bool is_overlaped(const int astart, const int asize,
P.VU.vstart = 0;
#define VI_LOOP_NSHIFT_BASE \
- require(P.VU.vsew <= e32); \
- if (insn.rd() != 0){ \
- VI_CHECK_SD; \
- } \
VI_GENERAL_LOOP_BASE; \
VI_LOOP_ELEMENT_SKIP({\
require(!(insn.rd() == 0 && P.VU.vlmul > 1));\
@@ -892,8 +904,8 @@ VI_LOOP_END
type_sew_t<sew1>::type vs1 = P.VU.elt<type_sew_t<sew1>::type>(rs1_num, i); \
type_sew_t<sew1>::type rs1 = (type_sew_t<sew1>::type)RS1;
-#define VI_VVXI_LOOP_NARROW(BODY) \
- require(P.VU.vsew <= e32); \
+#define VI_VVXI_LOOP_NARROW(BODY, is_vs1) \
+ VI_CHECK_SDS(is_vs1); \
VI_LOOP_BASE \
if (sew == e8){ \
VI_NARROW_SHIFT(e8, e16) \
@@ -907,7 +919,8 @@ VI_LOOP_END
} \
VI_LOOP_END
-#define VI_VI_LOOP_NSHIFT(BODY) \
+#define VI_VI_LOOP_NSHIFT(BODY, is_vs1) \
+ VI_CHECK_SDS(is_vs1); \
VI_LOOP_NSHIFT_BASE \
if (sew == e8){ \
VI_NSHIFT_PARAMS(e8, e16) \
@@ -921,7 +934,8 @@ VI_LOOP_END
} \
VI_LOOP_END
-#define VI_VX_LOOP_NSHIFT(BODY) \
+#define VI_VX_LOOP_NSHIFT(BODY, is_vs1) \
+ VI_CHECK_SDS(is_vs1); \
VI_LOOP_NSHIFT_BASE \
if (sew == e8){ \
VX_NSHIFT_PARAMS(e8, e16) \
@@ -935,7 +949,8 @@ VI_LOOP_END
} \
VI_LOOP_END
-#define VI_VV_LOOP_NSHIFT(BODY) \
+#define VI_VV_LOOP_NSHIFT(BODY, is_vs1) \
+ VI_CHECK_SDS(is_vs1); \
VI_LOOP_NSHIFT_BASE \
if (sew == e8){ \
VV_NSHIFT_PARAMS(e8, e16) \
@@ -1065,8 +1080,8 @@ VI_LOOP_END
vd = sat_add<int##sew2##_t, uint##sew2##_t>(vd, res, sat); \
P.VU.vxsat |= sat;
-#define VI_VVX_LOOP_WIDE_SSMA(opd) \
- VI_WIDE_CHECK_COMMON \
+#define VI_VVX_LOOP_WIDE_SSMA(opd, is_vs1) \
+ VI_CHECK_DSS(is_vs1) \
VI_LOOP_BASE \
if (sew == e8){ \
VI_WIDE_SSMA(8, 16, opd); \
@@ -1093,8 +1108,8 @@ VI_LOOP_END
vd = sat_addu<uint##sew2##_t>(vd, res, sat); \
P.VU.vxsat |= sat;
-#define VI_VVX_LOOP_WIDE_USSMA(opd) \
- VI_WIDE_CHECK_COMMON \
+#define VI_VVX_LOOP_WIDE_USSMA(opd, is_vs1) \
+ VI_CHECK_DSS(is_vs1) \
VI_LOOP_BASE \
if (sew == e8){ \
VI_WIDE_USSMA(8, 16, opd); \
@@ -1121,8 +1136,8 @@ VI_LOOP_END
vd = sat_sub<int##sew2##_t, uint##sew2##_t>(vd, res, sat); \
P.VU.vxsat |= sat;
-#define VI_VVX_LOOP_WIDE_SU_SSMA(opd) \
- VI_WIDE_CHECK_COMMON \
+#define VI_VVX_LOOP_WIDE_SU_SSMA(opd, is_vs1) \
+ VI_CHECK_DSS(is_vs1) \
VI_LOOP_BASE \
if (sew == e8){ \
VI_WIDE_SU_SSMA(8, 16, opd); \
@@ -1150,7 +1165,7 @@ VI_LOOP_END
P.VU.vxsat |= sat;
#define VI_VVX_LOOP_WIDE_US_SSMA(opd) \
- VI_WIDE_CHECK_COMMON \
+ VI_CHECK_DSS(false) \
VI_LOOP_BASE \
if (sew == e8){ \
VI_WIDE_US_SSMA(8, 16, opd); \
@@ -1583,8 +1598,8 @@ for (reg_t i = 0; i < vlmax; ++i) { \
VI_VFP_LOOP_CMP_END \
#define VI_VFP_VF_LOOP_WIDE(BODY) \
- VI_VFP_LOOP_BASE \
VI_CHECK_DSS(false); \
+ VI_VFP_LOOP_BASE \
switch(P.VU.vsew) { \
case e32: {\
float64_t &vd = P.VU.elt<float64_t>(rd_num, i); \
@@ -1605,8 +1620,8 @@ for (reg_t i = 0; i < vlmax; ++i) { \
#define VI_VFP_VV_LOOP_WIDE(BODY) \
- VI_VFP_LOOP_BASE \
VI_CHECK_DSS(true); \
+ VI_VFP_LOOP_BASE \
switch(P.VU.vsew) { \
case e32: {\
float64_t &vd = P.VU.elt<float64_t>(rd_num, i); \
@@ -1626,8 +1641,8 @@ for (reg_t i = 0; i < vlmax; ++i) { \
VI_VFP_LOOP_WIDE_END
#define VI_VFP_WF_LOOP_WIDE(BODY) \
- VI_VFP_LOOP_BASE \
VI_CHECK_DDS(false); \
+ VI_VFP_LOOP_BASE \
switch(P.VU.vsew) { \
case e32: {\
float64_t &vd = P.VU.elt<float64_t>(rd_num, i); \
@@ -1646,8 +1661,8 @@ for (reg_t i = 0; i < vlmax; ++i) { \
VI_VFP_LOOP_WIDE_END
#define VI_VFP_WV_LOOP_WIDE(BODY) \
- VI_VFP_LOOP_BASE \
VI_CHECK_DDS(true); \
+ VI_VFP_LOOP_BASE \
switch(P.VU.vsew) { \
case e32: {\
float64_t &vd = P.VU.elt<float64_t>(rd_num, i); \