aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Waterman <andrew@sifive.com>2021-07-26 07:13:12 -0700
committerGitHub <noreply@github.com>2021-07-26 07:13:12 -0700
commit5aec3dd51998ed5e99100094facd59d246ed0eca (patch)
tree067016ab78400be586d206769b81cd9db286263a
parent39070797c1239dd18d286b500c8c76cc02783c47 (diff)
parentffcbc2767f2f243320ca9c2f74f0e682330962cc (diff)
downloadspike-5aec3dd51998ed5e99100094facd59d246ed0eca.zip
spike-5aec3dd51998ed5e99100094facd59d246ed0eca.tar.gz
spike-5aec3dd51998ed5e99100094facd59d246ed0eca.tar.bz2
Merge pull request #754 from chihminchao/clean-vqmac-vdot
decode: op: remove quad related macro and define
-rw-r--r--riscv/decode.h79
-rw-r--r--riscv/encoding.h18
2 files changed, 0 insertions, 97 deletions
diff --git a/riscv/decode.h b/riscv/decode.h
index 9a7f113..16bf70c 100644
--- a/riscv/decode.h
+++ b/riscv/decode.h
@@ -578,28 +578,6 @@ static inline bool is_aligned(const unsigned val, const unsigned pos)
} \
}
-#define VI_CHECK_QSS(is_vs1) \
- require_vector(true);\
- p->supports_extension(EXT_ZVQMAC); \
- require(P.VU.vflmul <= 2); \
- require(P.VU.vsew * 4 <= P.VU.ELEN); \
- require_align(insn.rd(), P.VU.vflmul * 4); \
- require_align(insn.rs2(), P.VU.vflmul); \
- require_vm; \
- if (P.VU.vflmul < 1) {\
- require_noover(insn.rd(), P.VU.vflmul * 4, insn.rs2(), P.VU.vflmul); \
- } else {\
- require_noover_widen(insn.rd(), P.VU.vflmul * 4, insn.rs2(), P.VU.vflmul); \
- } \
- if (is_vs1) {\
- require_align(insn.rs1(), P.VU.vflmul); \
- if (P.VU.vflmul < 1) {\
- require_noover(insn.rd(), P.VU.vflmul * 4, insn.rs1(), P.VU.vflmul); \
- } else {\
- require_noover_widen(insn.rd(), P.VU.vflmul * 4, insn.rs1(), P.VU.vflmul); \
- } \
- }
-
#define VI_CHECK_DDS(is_rs) \
VI_WIDE_CHECK_COMMON; \
require_align(insn.rs2(), P.VU.vflmul * 2); \
@@ -1324,63 +1302,6 @@ VI_LOOP_END
break; \
}
-// quad operation loop
-#define VI_VV_LOOP_QUAD(BODY) \
- VI_CHECK_QSS(true); \
- VI_LOOP_BASE \
- if (sew == e8){ \
- VV_PARAMS(e8); \
- BODY; \
- }else if(sew == e16){ \
- VV_PARAMS(e16); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_VX_LOOP_QUAD(BODY) \
- VI_CHECK_QSS(false); \
- VI_LOOP_BASE \
- if (sew == e8){ \
- VX_PARAMS(e8); \
- BODY; \
- }else if(sew == e16){ \
- VX_PARAMS(e16); \
- BODY; \
- } \
- VI_LOOP_END
-
-#define VI_QUAD_OP_AND_ASSIGN(var0, var1, var2, op0, op1, sign) \
- switch(P.VU.vsew) { \
- case e8: { \
- sign##32_t vd_w = P.VU.elt<sign##32_t>(rd_num, i); \
- P.VU.elt<uint32_t>(rd_num, i, true) = \
- op1((sign##32_t)(sign##8_t)var0 op0 (sign##32_t)(sign##8_t)var1) + var2; \
- } \
- break; \
- default: { \
- sign##64_t vd_w = P.VU.elt<sign##64_t>(rd_num, i); \
- P.VU.elt<uint64_t>(rd_num, i, true) = \
- op1((sign##64_t)(sign##16_t)var0 op0 (sign##64_t)(sign##16_t)var1) + var2; \
- } \
- break; \
- }
-
-#define VI_QUAD_OP_AND_ASSIGN_MIX(var0, var1, var2, op0, op1, sign_d, sign_1, sign_2) \
- switch(P.VU.vsew) { \
- case e8: { \
- sign_d##32_t vd_w = P.VU.elt<sign_d##32_t>(rd_num, i); \
- P.VU.elt<uint32_t>(rd_num, i, true) = \
- op1((sign_1##32_t)(sign_1##8_t)var0 op0 (sign_2##32_t)(sign_2##8_t)var1) + var2; \
- } \
- break; \
- default: { \
- sign_d##64_t vd_w = P.VU.elt<sign_d##64_t>(rd_num, i); \
- P.VU.elt<uint64_t>(rd_num, i, true) = \
- op1((sign_1##64_t)(sign_1##16_t)var0 op0 (sign_2##64_t)(sign_2##16_t)var1) + var2; \
- } \
- break; \
- }
-
// wide reduction loop - signed
#define VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \
reg_t vl = P.VU.vl; \
diff --git a/riscv/encoding.h b/riscv/encoding.h
index bde53a4..39e9aa0 100644
--- a/riscv/encoding.h
+++ b/riscv/encoding.h
@@ -1631,8 +1631,6 @@
#define MASK_VFWSUB_WV 0xfc00707f
#define MATCH_VFWMUL_VV 0xe0001057
#define MASK_VFWMUL_VV 0xfc00707f
-#define MATCH_VFDOT_VV 0xe4001057
-#define MASK_VFDOT_VV 0xfc00707f
#define MATCH_VFWMACC_VV 0xf0001057
#define MASK_VFWMACC_VV 0xfc00707f
#define MATCH_VFWNMACC_VV 0xf4001057
@@ -1801,16 +1799,6 @@
#define MASK_VWREDSUMU_VS 0xfc00707f
#define MATCH_VWREDSUM_VS 0xc4000057
#define MASK_VWREDSUM_VS 0xfc00707f
-#define MATCH_VDOTU_VV 0xe0000057
-#define MASK_VDOTU_VV 0xfc00707f
-#define MATCH_VDOT_VV 0xe4000057
-#define MASK_VDOT_VV 0xfc00707f
-#define MATCH_VQMACCU_VV 0xf0000057
-#define MASK_VQMACCU_VV 0xfc00707f
-#define MATCH_VQMACC_VV 0xf4000057
-#define MASK_VQMACC_VV 0xfc00707f
-#define MATCH_VQMACCSU_VV 0xfc000057
-#define MASK_VQMACCSU_VV 0xfc00707f
#define MATCH_VADD_VI 0x3057
#define MASK_VADD_VI 0xfc00707f
#define MATCH_VRSUB_VI 0xc003057
@@ -3792,7 +3780,6 @@ DECLARE_INSN(vfwredosum_vs, MATCH_VFWREDOSUM_VS, MASK_VFWREDOSUM_VS)
DECLARE_INSN(vfwadd_wv, MATCH_VFWADD_WV, MASK_VFWADD_WV)
DECLARE_INSN(vfwsub_wv, MATCH_VFWSUB_WV, MASK_VFWSUB_WV)
DECLARE_INSN(vfwmul_vv, MATCH_VFWMUL_VV, MASK_VFWMUL_VV)
-DECLARE_INSN(vfdot_vv, MATCH_VFDOT_VV, MASK_VFDOT_VV)
DECLARE_INSN(vfwmacc_vv, MATCH_VFWMACC_VV, MASK_VFWMACC_VV)
DECLARE_INSN(vfwnmacc_vv, MATCH_VFWNMACC_VV, MASK_VFWNMACC_VV)
DECLARE_INSN(vfwmsac_vv, MATCH_VFWMSAC_VV, MASK_VFWMSAC_VV)
@@ -3877,11 +3864,6 @@ DECLARE_INSN(vnclipu_wv, MATCH_VNCLIPU_WV, MASK_VNCLIPU_WV)
DECLARE_INSN(vnclip_wv, MATCH_VNCLIP_WV, MASK_VNCLIP_WV)
DECLARE_INSN(vwredsumu_vs, MATCH_VWREDSUMU_VS, MASK_VWREDSUMU_VS)
DECLARE_INSN(vwredsum_vs, MATCH_VWREDSUM_VS, MASK_VWREDSUM_VS)
-DECLARE_INSN(vdotu_vv, MATCH_VDOTU_VV, MASK_VDOTU_VV)
-DECLARE_INSN(vdot_vv, MATCH_VDOT_VV, MASK_VDOT_VV)
-DECLARE_INSN(vqmaccu_vv, MATCH_VQMACCU_VV, MASK_VQMACCU_VV)
-DECLARE_INSN(vqmacc_vv, MATCH_VQMACC_VV, MASK_VQMACC_VV)
-DECLARE_INSN(vqmaccsu_vv, MATCH_VQMACCSU_VV, MASK_VQMACCSU_VV)
DECLARE_INSN(vadd_vi, MATCH_VADD_VI, MASK_VADD_VI)
DECLARE_INSN(vrsub_vi, MATCH_VRSUB_VI, MASK_VRSUB_VI)
DECLARE_INSN(vand_vi, MATCH_VAND_VI, MASK_VAND_VI)