aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChih-Min Chao <chihmin.chao@sifive.com>2019-10-22 22:59:27 -0700
committerChih-Min Chao <chihmin.chao@sifive.com>2019-11-11 19:02:34 -0800
commitc655b1cf465f59912ddb5b0dbd6f83f1d5516406 (patch)
tree643e7308487622f336cf2fba868ae9be1ca32d9a
parentf5a68933e509620326d6ff90b449dd074ae915ea (diff)
downloadriscv-isa-sim-c655b1cf465f59912ddb5b0dbd6f83f1d5516406.zip
riscv-isa-sim-c655b1cf465f59912ddb5b0dbd6f83f1d5516406.tar.gz
riscv-isa-sim-c655b1cf465f59912ddb5b0dbd6f83f1d5516406.tar.bz2
rvv: add reg checking rule for ldst
include 1. unit-stride 2. strided 3. indexed 4. fault-first Signed-off-by: Chih-Min Chao <chihmin.chao@sifive.com>
-rw-r--r--riscv/decode.h15
-rw-r--r--riscv/insns/vleff_v.h2
-rw-r--r--riscv/insns/vlxb_v.h1
-rw-r--r--riscv/insns/vlxbu_v.h1
-rw-r--r--riscv/insns/vlxe_v.h1
-rw-r--r--riscv/insns/vlxh_v.h1
-rw-r--r--riscv/insns/vlxhu_v.h1
-rw-r--r--riscv/insns/vlxw_v.h1
-rw-r--r--riscv/insns/vlxwu_v.h1
-rw-r--r--riscv/insns/vsuxb_v.h3
-rw-r--r--riscv/insns/vsuxe_v.h3
-rw-r--r--riscv/insns/vsuxh_v.h3
-rw-r--r--riscv/insns/vsuxw_v.h3
-rw-r--r--riscv/insns/vsxb_v.h1
-rw-r--r--riscv/insns/vsxe_v.h1
-rw-r--r--riscv/insns/vsxh_v.h1
-rw-r--r--riscv/insns/vsxw_v.h1
17 files changed, 32 insertions, 8 deletions
diff --git a/riscv/decode.h b/riscv/decode.h
index 2fb34ad..dd98f8b 100644
--- a/riscv/decode.h
+++ b/riscv/decode.h
@@ -407,6 +407,14 @@ static inline bool is_overlapped(const int astart, const int asize,
require(insn.rd() != 0); \
}
+#define VI_CHECK_SXX \
+ require_vector; \
+ if (P.VU.vlmul > 1) { \
+ require((insn.rd() & (P.VU.vlmul - 1)) == 0); \
+ if (insn.v_vm() == 0) \
+ require(insn.rd() != 0); \
+ }
+
#define VI_CHECK_SD \
require(!is_overlapped(insn.rd(), P.VU.vlmul, insn.rs2(), P.VU.vlmul * 2));
@@ -733,6 +741,7 @@ static inline bool is_overlapped(const int astart, const int asize,
// merge and copy loop
#define VI_VVXI_MERGE_LOOP(BODY) \
+ VI_CHECK_SXX; \
VI_GENERAL_LOOP_BASE \
if (sew == e8){ \
VXI_PARAMS(e8); \
@@ -1384,8 +1393,8 @@ for (reg_t i = 0; i < vlmax; ++i) { \
#define VI_ST(stride, offset, st_width, elt_byte) \
const reg_t nf = insn.v_nf() + 1; \
- require_vector; \
require((nf * P.VU.vlmul) <= (NVPR / 4)); \
+ VI_CHECK_SXX; \
const reg_t vl = P.VU.vl; \
const reg_t baseAddr = RS1; \
const reg_t vs3 = insn.rd(); \
@@ -1417,8 +1426,8 @@ for (reg_t i = 0; i < vlmax; ++i) { \
#define VI_LD(stride, offset, ld_width, elt_byte) \
const reg_t nf = insn.v_nf() + 1; \
- require_vector; \
require((nf * P.VU.vlmul) <= (NVPR / 4)); \
+ VI_CHECK_SXX; \
const reg_t vl = P.VU.vl; \
const reg_t baseAddr = RS1; \
const reg_t vd = insn.rd(); \
@@ -1452,10 +1461,10 @@ for (reg_t i = 0; i < vlmax; ++i) { \
#define VI_LDST_FF(itype, tsew) \
- require_vector; \
require(p->VU.vsew >= e##tsew && p->VU.vsew <= e64); \
const reg_t nf = insn.v_nf() + 1; \
require((nf * P.VU.vlmul) <= (NVPR / 4)); \
+ VI_CHECK_SXX; \
const reg_t sew = p->VU.vsew; \
const reg_t vl = p->VU.vl; \
const reg_t baseAddr = RS1; \
diff --git a/riscv/insns/vleff_v.h b/riscv/insns/vleff_v.h
index 7b870ca..e858de9 100644
--- a/riscv/insns/vleff_v.h
+++ b/riscv/insns/vleff_v.h
@@ -1,7 +1,7 @@
-require_vector;
require(P.VU.vsew >= e8 && P.VU.vsew <= e64);
const reg_t nf = insn.v_nf() + 1;
require((nf * P.VU.vlmul) <= (NVPR / 4));
+VI_CHECK_SXX;
const reg_t sew = P.VU.vsew;
const reg_t vl = P.VU.vl;
const reg_t baseAddr = RS1;
diff --git a/riscv/insns/vlxb_v.h b/riscv/insns/vlxb_v.h
index 5a99bd3..57ce8c8 100644
--- a/riscv/insns/vlxb_v.h
+++ b/riscv/insns/vlxb_v.h
@@ -1,4 +1,5 @@
// vlxb.v and vlsseg[2-8]b.v
require(P.VU.vsew >= e8);
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0);
VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
VI_LD(index[i], fn, int8, 1);
diff --git a/riscv/insns/vlxbu_v.h b/riscv/insns/vlxbu_v.h
index daf2d2b..d8e3dd6 100644
--- a/riscv/insns/vlxbu_v.h
+++ b/riscv/insns/vlxbu_v.h
@@ -1,4 +1,5 @@
// vlxbu.v and vlxseg[2-8]bu.v
require(P.VU.vsew >= e8);
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0);
VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
VI_LD(index[i], fn, uint8, 1);
diff --git a/riscv/insns/vlxe_v.h b/riscv/insns/vlxe_v.h
index b1190a8..1055eca 100644
--- a/riscv/insns/vlxe_v.h
+++ b/riscv/insns/vlxe_v.h
@@ -1,5 +1,6 @@
// vlxe.v and vlxseg[2-8]e.v
reg_t sew = P.VU.vsew;
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0);
VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
if (sew == e8) {
VI_LD(index[i], fn, int8, 1);
diff --git a/riscv/insns/vlxh_v.h b/riscv/insns/vlxh_v.h
index 98145db..9f4c3a1 100644
--- a/riscv/insns/vlxh_v.h
+++ b/riscv/insns/vlxh_v.h
@@ -1,4 +1,5 @@
// vlxh.v and vlxseg[2-8]h.v
require(P.VU.vsew >= e16);
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0);
VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
VI_LD(index[i], fn, int16, 2);
diff --git a/riscv/insns/vlxhu_v.h b/riscv/insns/vlxhu_v.h
index 27d549c..9283127 100644
--- a/riscv/insns/vlxhu_v.h
+++ b/riscv/insns/vlxhu_v.h
@@ -1,4 +1,5 @@
// vlxh.v and vlxseg[2-8]h.v
require(P.VU.vsew >= e16);
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0);
VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
VI_LD(index[i], fn, uint16, 2);
diff --git a/riscv/insns/vlxw_v.h b/riscv/insns/vlxw_v.h
index 83300f0..c1117a2 100644
--- a/riscv/insns/vlxw_v.h
+++ b/riscv/insns/vlxw_v.h
@@ -1,5 +1,6 @@
// vlxw.v and vlxseg[2-8]w.v
require(P.VU.vsew >= e32);
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0);
VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
VI_LD(index[i], fn, int32, 4);
diff --git a/riscv/insns/vlxwu_v.h b/riscv/insns/vlxwu_v.h
index a2f9913..d3034bd 100644
--- a/riscv/insns/vlxwu_v.h
+++ b/riscv/insns/vlxwu_v.h
@@ -1,4 +1,5 @@
// vlxwu.v and vlxseg[2-8]wu.v
require(P.VU.vsew >= e32);
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0);
VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
VI_LD(index[i], fn, uint32, 4);
diff --git a/riscv/insns/vsuxb_v.h b/riscv/insns/vsuxb_v.h
index 0dfe024..03f1980 100644
--- a/riscv/insns/vsuxb_v.h
+++ b/riscv/insns/vsuxb_v.h
@@ -1,6 +1,7 @@
// vsuxb.v and vsxseg[2-8]b.v
-require_vector;
require(P.VU.vsew >= e8);
+VI_CHECK_SXX;
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0); \
reg_t vl = P.VU.vl;
reg_t baseAddr = RS1;
reg_t stride = insn.rs2();
diff --git a/riscv/insns/vsuxe_v.h b/riscv/insns/vsuxe_v.h
index 5e4d3a2..22d6fb5 100644
--- a/riscv/insns/vsuxe_v.h
+++ b/riscv/insns/vsuxe_v.h
@@ -1,8 +1,9 @@
// vsxe.v and vsxseg[2-8]e.v
-require_vector;
const reg_t sew = P.VU.vsew;
const reg_t vl = P.VU.vl;
require(sew >= e8 && sew <= e64);
+VI_CHECK_SXX;
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0); \
reg_t baseAddr = RS1;
reg_t stride = insn.rs2();
reg_t vs3 = insn.rd();
diff --git a/riscv/insns/vsuxh_v.h b/riscv/insns/vsuxh_v.h
index c6f8be4..a34bc27 100644
--- a/riscv/insns/vsuxh_v.h
+++ b/riscv/insns/vsuxh_v.h
@@ -1,6 +1,7 @@
// vsxh.v and vsxseg[2-8]h.v
-require_vector;
require(P.VU.vsew >= e16);
+VI_CHECK_SXX;
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0); \
reg_t vl = P.VU.vl;
reg_t baseAddr = RS1;
reg_t stride = insn.rs2();
diff --git a/riscv/insns/vsuxw_v.h b/riscv/insns/vsuxw_v.h
index f133e77..f42092d 100644
--- a/riscv/insns/vsuxw_v.h
+++ b/riscv/insns/vsuxw_v.h
@@ -1,6 +1,7 @@
// vsxw.v and vsxseg[2-8]w.v
-require_vector;
require(P.VU.vsew >= e32);
+VI_CHECK_SXX;
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0); \
reg_t vl = P.VU.vl;
reg_t baseAddr = RS1;
reg_t stride = insn.rs2();
diff --git a/riscv/insns/vsxb_v.h b/riscv/insns/vsxb_v.h
index 3e50597..fb567fb 100644
--- a/riscv/insns/vsxb_v.h
+++ b/riscv/insns/vsxb_v.h
@@ -1,4 +1,5 @@
// vsxb.v and vsxseg[2-8]b.v
require(P.VU.vsew >= e8);
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0);
VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
VI_ST(index[i], fn, uint8, 1);
diff --git a/riscv/insns/vsxe_v.h b/riscv/insns/vsxe_v.h
index 28984ac..78c6605 100644
--- a/riscv/insns/vsxe_v.h
+++ b/riscv/insns/vsxe_v.h
@@ -1,6 +1,7 @@
// vsxe.v and vsxseg[2-8]e.v
reg_t sew = P.VU.vsew;
require(sew >= e8 && sew <= e64);
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0);
VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
if (sew == e8) {
VI_ST(index[i], fn, uint8, 1);
diff --git a/riscv/insns/vsxh_v.h b/riscv/insns/vsxh_v.h
index 2e5506a..6b0fcfd 100644
--- a/riscv/insns/vsxh_v.h
+++ b/riscv/insns/vsxh_v.h
@@ -1,4 +1,5 @@
// vsxh.v and vsxseg[2-8]h.v
require(P.VU.vsew >= e16);
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0);
VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
VI_ST(index[i], fn, uint16, 2);
diff --git a/riscv/insns/vsxw_v.h b/riscv/insns/vsxw_v.h
index 9a2119f..2223d5b 100644
--- a/riscv/insns/vsxw_v.h
+++ b/riscv/insns/vsxw_v.h
@@ -1,4 +1,5 @@
// vsxw.v and vsxseg[2-8]w.v
require(P.VU.vsew >= e32);
+require((insn.rs2() & (P.VU.vlmul - 1)) == 0);
VI_DUPLICATE_VREG(insn.rs2(), P.VU.vlmax);
VI_ST(index[i], fn, uint32, 4);