aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoreopXD <yueh.ting.chen@gmail.com>2021-11-28 16:13:54 +0800
committereopXD <yueh.ting.chen@gmail.com>2021-11-28 16:14:01 +0800
commitd177f05b7bd01f25a16e8cc0712fa551bdaeb954 (patch)
treea51cd7fae50884c0c24c51386cda720bd26ce017
parentab7521de6a76be7fc57665998679a7f6ecfafcb9 (diff)
downloadspike-d177f05b7bd01f25a16e8cc0712fa551bdaeb954.zip
spike-d177f05b7bd01f25a16e8cc0712fa551bdaeb954.tar.gz
spike-d177f05b7bd01f25a16e8cc0712fa551bdaeb954.tar.bz2
Have nclip_{wv/wx/wi} use different macros
This allows them to share PARAM macro with narrowing right-shift instructions. Rename VV_NSHIFT_PARAMS -> VV_NARROW_PARAMS so nclip, nsra, nsrl can share it. (Same goes to VX_NSHIFT_PARAMS and VI_NSHIFT_PARAMS)
-rw-r--r--riscv/decode.h89
-rw-r--r--riscv/insns/vnclip_wi.h4
-rw-r--r--riscv/insns/vnclip_wv.h4
-rw-r--r--riscv/insns/vnclip_wx.h4
-rw-r--r--riscv/insns/vnclipu_wi.h4
-rw-r--r--riscv/insns/vnclipu_wv.h4
-rw-r--r--riscv/insns/vnclipu_wx.h4
7 files changed, 60 insertions, 53 deletions
diff --git a/riscv/decode.h b/riscv/decode.h
index e709bb4..3487ec6 100644
--- a/riscv/decode.h
+++ b/riscv/decode.h
@@ -769,19 +769,19 @@ static inline bool is_aligned(const unsigned val, const unsigned pos)
auto &vd = P.VU.elt<type_sew_t<x>::type>(rd_num, i, true); \
auto vs2 = P.VU.elt<type_sew_t<x>::type>(rs2_num, i - offset);
-#define VI_NSHIFT_PARAMS(sew1, sew2) \
+#define VI_NARROW_PARAMS(sew1, sew2) \
auto &vd = P.VU.elt<type_usew_t<sew1>::type>(rd_num, i, true); \
auto vs2_u = P.VU.elt<type_usew_t<sew2>::type>(rs2_num, i); \
auto vs2 = P.VU.elt<type_sew_t<sew2>::type>(rs2_num, i); \
auto zimm5 = (type_usew_t<sew1>::type)insn.v_zimm5();
-#define VX_NSHIFT_PARAMS(sew1, sew2) \
+#define VX_NARROW_PARAMS(sew1, sew2) \
auto &vd = P.VU.elt<type_usew_t<sew1>::type>(rd_num, i, true); \
auto vs2_u = P.VU.elt<type_usew_t<sew2>::type>(rs2_num, i); \
auto vs2 = P.VU.elt<type_sew_t<sew2>::type>(rs2_num, i); \
auto rs1 = (type_sew_t<sew1>::type)RS1;
-#define VV_NSHIFT_PARAMS(sew1, sew2) \
+#define VV_NARROW_PARAMS(sew1, sew2) \
auto &vd = P.VU.elt<type_usew_t<sew1>::type>(rd_num, i, true); \
auto vs2_u = P.VU.elt<type_usew_t<sew2>::type>(rs2_num, i); \
auto vs2 = P.VU.elt<type_sew_t<sew2>::type>(rs2_num, i); \
@@ -1114,39 +1114,46 @@ static inline bool is_aligned(const unsigned val, const unsigned pos)
// narrow operation loop
#define VI_VV_LOOP_NARROW(BODY) \
-VI_NARROW_CHECK_COMMON; \
-VI_LOOP_BASE \
-if (sew == e8){ \
- VI_NARROW_SHIFT(e8, e16) \
- BODY; \
-}else if(sew == e16){ \
- VI_NARROW_SHIFT(e16, e32) \
- BODY; \
-}else if(sew == e32){ \
- VI_NARROW_SHIFT(e32, e64) \
- BODY; \
-} \
-VI_LOOP_END
-
-#define VI_NARROW_SHIFT(sew1, sew2) \
- type_usew_t<sew1>::type &vd = P.VU.elt<type_usew_t<sew1>::type>(rd_num, i, true); \
- type_usew_t<sew2>::type vs2_u = P.VU.elt<type_usew_t<sew2>::type>(rs2_num, i); \
- type_usew_t<sew1>::type zimm5 = (type_usew_t<sew1>::type)insn.v_zimm5(); \
- type_sew_t<sew2>::type vs2 = P.VU.elt<type_sew_t<sew2>::type>(rs2_num, i); \
- type_sew_t<sew1>::type vs1 = P.VU.elt<type_sew_t<sew1>::type>(rs1_num, i); \
- type_sew_t<sew1>::type rs1 = (type_sew_t<sew1>::type)RS1;
-
-#define VI_VVXI_LOOP_NARROW(BODY, is_vs1) \
- VI_CHECK_SDS(is_vs1); \
+ VI_CHECK_SDS(true); \
VI_LOOP_BASE \
if (sew == e8){ \
- VI_NARROW_SHIFT(e8, e16) \
+ VV_NARROW_PARAMS(e8, e16) \
BODY; \
- } else if (sew == e16) { \
- VI_NARROW_SHIFT(e16, e32) \
+ }else if(sew == e16){ \
+ VV_NARROW_PARAMS(e16, e32) \
BODY; \
- } else if (sew == e32) { \
- VI_NARROW_SHIFT(e32, e64) \
+ }else if(sew == e32){ \
+ VV_NARROW_PARAMS(e32, e64) \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VX_LOOP_NARROW(BODY) \
+ VI_CHECK_SDS(false); \
+ VI_LOOP_BASE \
+ if (sew == e8){ \
+ VX_NARROW_PARAMS(e8, e16) \
+ BODY; \
+ }else if(sew == e16){ \
+ VX_NARROW_PARAMS(e16, e32) \
+ BODY; \
+ }else if(sew == e32){ \
+ VX_NARROW_PARAMS(e32, e64) \
+ BODY; \
+ } \
+ VI_LOOP_END
+
+#define VI_VI_LOOP_NARROW(BODY) \
+ VI_CHECK_SDS(false); \
+ VI_LOOP_BASE \
+ if (sew == e8){ \
+ VI_NARROW_PARAMS(e8, e16) \
+ BODY; \
+ }else if(sew == e16){ \
+ VI_NARROW_PARAMS(e16, e32) \
+ BODY; \
+ }else if(sew == e32){ \
+ VI_NARROW_PARAMS(e32, e64) \
BODY; \
} \
VI_LOOP_END
@@ -1155,13 +1162,13 @@ VI_LOOP_END
VI_CHECK_SDS(false); \
VI_LOOP_NSHIFT_BASE \
if (sew == e8){ \
- VI_NSHIFT_PARAMS(e8, e16) \
+ VI_NARROW_PARAMS(e8, e16) \
BODY; \
} else if (sew == e16) { \
- VI_NSHIFT_PARAMS(e16, e32) \
+ VI_NARROW_PARAMS(e16, e32) \
BODY; \
} else if (sew == e32) { \
- VI_NSHIFT_PARAMS(e32, e64) \
+ VI_NARROW_PARAMS(e32, e64) \
BODY; \
} \
VI_LOOP_END
@@ -1170,13 +1177,13 @@ VI_LOOP_END
VI_CHECK_SDS(false); \
VI_LOOP_NSHIFT_BASE \
if (sew == e8){ \
- VX_NSHIFT_PARAMS(e8, e16) \
+ VX_NARROW_PARAMS(e8, e16) \
BODY; \
} else if (sew == e16) { \
- VX_NSHIFT_PARAMS(e16, e32) \
+ VX_NARROW_PARAMS(e16, e32) \
BODY; \
} else if (sew == e32) { \
- VX_NSHIFT_PARAMS(e32, e64) \
+ VX_NARROW_PARAMS(e32, e64) \
BODY; \
} \
VI_LOOP_END
@@ -1185,13 +1192,13 @@ VI_LOOP_END
VI_CHECK_SDS(true); \
VI_LOOP_NSHIFT_BASE \
if (sew == e8){ \
- VV_NSHIFT_PARAMS(e8, e16) \
+ VV_NARROW_PARAMS(e8, e16) \
BODY; \
} else if (sew == e16) { \
- VV_NSHIFT_PARAMS(e16, e32) \
+ VV_NARROW_PARAMS(e16, e32) \
BODY; \
} else if (sew == e32) { \
- VV_NSHIFT_PARAMS(e32, e64) \
+ VV_NARROW_PARAMS(e32, e64) \
BODY; \
} \
VI_LOOP_END
diff --git a/riscv/insns/vnclip_wi.h b/riscv/insns/vnclip_wi.h
index 6b68e1d..ea6898c 100644
--- a/riscv/insns/vnclip_wi.h
+++ b/riscv/insns/vnclip_wi.h
@@ -2,7 +2,7 @@
VRM xrm = P.VU.get_vround_mode();
int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
-VI_VVXI_LOOP_NARROW
+VI_VI_LOOP_NARROW
({
int128_t result = vs2;
unsigned shift = zimm5 & ((sew * 2) - 1);
@@ -22,4 +22,4 @@ VI_VVXI_LOOP_NARROW
}
vd = result;
-}, false)
+})
diff --git a/riscv/insns/vnclip_wv.h b/riscv/insns/vnclip_wv.h
index 5f87697..63b84c6 100644
--- a/riscv/insns/vnclip_wv.h
+++ b/riscv/insns/vnclip_wv.h
@@ -2,7 +2,7 @@
VRM xrm = P.VU.get_vround_mode();
int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
-VI_VVXI_LOOP_NARROW
+VI_VV_LOOP_NARROW
({
int128_t result = vs2;
unsigned shift = vs1 & ((sew * 2) - 1);
@@ -22,4 +22,4 @@ VI_VVXI_LOOP_NARROW
}
vd = result;
-}, true)
+})
diff --git a/riscv/insns/vnclip_wx.h b/riscv/insns/vnclip_wx.h
index 5436936..482eace 100644
--- a/riscv/insns/vnclip_wx.h
+++ b/riscv/insns/vnclip_wx.h
@@ -2,7 +2,7 @@
VRM xrm = P.VU.get_vround_mode();
int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
-VI_VVXI_LOOP_NARROW
+VI_VX_LOOP_NARROW
({
int128_t result = vs2;
unsigned shift = rs1 & ((sew * 2) - 1);
@@ -22,4 +22,4 @@ VI_VVXI_LOOP_NARROW
}
vd = result;
-}, false)
+})
diff --git a/riscv/insns/vnclipu_wi.h b/riscv/insns/vnclipu_wi.h
index ba39905..441a3a7 100644
--- a/riscv/insns/vnclipu_wi.h
+++ b/riscv/insns/vnclipu_wi.h
@@ -2,7 +2,7 @@
VRM xrm = P.VU.get_vround_mode();
uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew);
uint64_t sign_mask = UINT64_MAX << P.VU.vsew;
-VI_VVXI_LOOP_NARROW
+VI_VI_LOOP_NARROW
({
uint128_t result = vs2_u;
unsigned shift = zimm5 & ((sew * 2) - 1);
@@ -20,4 +20,4 @@ VI_VVXI_LOOP_NARROW
}
vd = result;
-}, false)
+})
diff --git a/riscv/insns/vnclipu_wv.h b/riscv/insns/vnclipu_wv.h
index d6647c6..8072489 100644
--- a/riscv/insns/vnclipu_wv.h
+++ b/riscv/insns/vnclipu_wv.h
@@ -2,7 +2,7 @@
VRM xrm = P.VU.get_vround_mode();
uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew);
uint64_t sign_mask = UINT64_MAX << P.VU.vsew;
-VI_VVXI_LOOP_NARROW
+VI_VV_LOOP_NARROW
({
uint128_t result = vs2_u;
unsigned shift = vs1 & ((sew * 2) - 1);
@@ -19,4 +19,4 @@ VI_VVXI_LOOP_NARROW
}
vd = result;
-}, true)
+})
diff --git a/riscv/insns/vnclipu_wx.h b/riscv/insns/vnclipu_wx.h
index bf44233..b2d91c3 100644
--- a/riscv/insns/vnclipu_wx.h
+++ b/riscv/insns/vnclipu_wx.h
@@ -2,7 +2,7 @@
VRM xrm = P.VU.get_vround_mode();
uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew);
uint64_t sign_mask = UINT64_MAX << P.VU.vsew;
-VI_VVXI_LOOP_NARROW
+VI_VX_LOOP_NARROW
({
uint128_t result = vs2_u;
unsigned shift = rs1 & ((sew * 2) - 1);
@@ -19,4 +19,4 @@ VI_VVXI_LOOP_NARROW
}
vd = result;
-}, false)
+})