aboutsummaryrefslogtreecommitdiff
path: root/riscv
diff options
context:
space:
mode:
authorChih-Min Chao <chihmin.chao@sifive.com>2020-05-21 20:01:32 -0700
committerChih-Min Chao <chihmin.chao@sifive.com>2020-05-21 20:45:57 -0700
commit857ebb501199938327e9d25db45ccc13d646d4f6 (patch)
treeebc5cf3ea58f60311bffc49dadb40118c6c0c017 /riscv
parent7eed9371f79f5c8f4675078d11a0228af9b5f571 (diff)
downloadspike-857ebb501199938327e9d25db45ccc13d646d4f6.zip
spike-857ebb501199938327e9d25db45ccc13d646d4f6.tar.gz
spike-857ebb501199938327e9d25db45ccc13d646d4f6.tar.bz2
rvv: remove vmlen
Signed-off-by: Chih-Min Chao <chihmin.chao@sifive.com>
Diffstat (limited to 'riscv')
-rw-r--r--riscv/decode.h16
-rw-r--r--riscv/insns/vcompress_vm.h5
-rw-r--r--riscv/insns/vfmerge_vfm.h12
-rw-r--r--riscv/insns/viota_m.h5
-rw-r--r--riscv/insns/vmadc_vim.h2
-rw-r--r--riscv/insns/vmadc_vvm.h2
-rw-r--r--riscv/insns/vmadc_vxm.h2
-rw-r--r--riscv/insns/vmerge_vim.h4
-rw-r--r--riscv/insns/vmerge_vvm.h4
-rw-r--r--riscv/insns/vmerge_vxm.h4
-rw-r--r--riscv/insns/vmsbc_vvm.h2
-rw-r--r--riscv/insns/vmsbc_vxm.h2
-rw-r--r--riscv/insns/vmsbf_m.h7
-rw-r--r--riscv/insns/vmsif_m.h7
-rw-r--r--riscv/insns/vmsof_m.h7
-rw-r--r--riscv/insns/vpopc_m.h5
-rw-r--r--riscv/processor.cc1
-rw-r--r--riscv/processor.h2
18 files changed, 40 insertions, 49 deletions
diff --git a/riscv/decode.h b/riscv/decode.h
index 55cbfbe..5baef00 100644
--- a/riscv/decode.h
+++ b/riscv/decode.h
@@ -393,9 +393,8 @@ inline long double to_f(float128_t f){long double r; memcpy(&r, &f, sizeof(r));
// vector: masking skip helper
//
#define VI_MASK_VARS \
- const int mlen = P.VU.vmlen; \
- const int midx = (mlen * i) / 64; \
- const int mpos = (mlen * i) % 64;
+ const int midx = i / 64; \
+ const int mpos = i % 64;
#define VI_LOOP_ELEMENT_SKIP(BODY) \
VI_MASK_VARS \
@@ -607,7 +606,7 @@ static inline bool is_aligned(const unsigned val, const unsigned pos)
reg_t rs2_num = insn.rs2(); \
for (reg_t i=P.VU.vstart; i<vl; ++i){ \
VI_LOOP_ELEMENT_SKIP(); \
- uint64_t mmask = (UINT64_MAX << (64 - mlen)) >> (64 - mlen - mpos); \
+ uint64_t mmask = UINT64_C(1) << mpos; \
uint64_t &vdi = P.VU.elt<uint64_t>(insn.rd(), midx, true); \
uint64_t res = 0;
@@ -621,10 +620,9 @@ static inline bool is_aligned(const unsigned val, const unsigned pos)
require_vector;\
reg_t vl = P.VU.vl; \
for (reg_t i = P.VU.vstart; i < vl; ++i) { \
- int mlen = P.VU.vmlen; \
- int midx = (mlen * i) / 64; \
- int mpos = (mlen * i) % 64; \
- uint64_t mmask = (UINT64_MAX << (64 - mlen)) >> (64 - mlen - mpos); \
+ int midx = i / 64; \
+ int mpos = i % 64; \
+ uint64_t mmask = UINT64_C(1) << mpos; \
uint64_t vs2 = P.VU.elt<uint64_t>(insn.rs2(), midx); \
uint64_t vs1 = P.VU.elt<uint64_t>(insn.rs1(), midx); \
uint64_t &res = P.VU.elt<uint64_t>(insn.rd(), midx, true); \
@@ -1826,7 +1824,7 @@ for (reg_t i = 0; i < P.VU.vlmax && P.VU.vl != 0; ++i) { \
float32_t vs1 = P.VU.elt<float32_t>(rs1_num, i); \
float32_t rs1 = f32(READ_FREG(rs1_num)); \
VI_LOOP_ELEMENT_SKIP(); \
- uint64_t mmask = (UINT64_MAX << (64 - mlen)) >> (64 - mlen - mpos); \
+ uint64_t mmask = UINT64_C(1) << mpos; \
uint64_t &vdi = P.VU.elt<uint64_t>(rd_num, midx, true); \
uint64_t res = 0;
diff --git a/riscv/insns/vcompress_vm.h b/riscv/insns/vcompress_vm.h
index 2efb99d..a2b810a 100644
--- a/riscv/insns/vcompress_vm.h
+++ b/riscv/insns/vcompress_vm.h
@@ -8,9 +8,8 @@ require(!is_overlapped(insn.rd(), P.VU.vlmul, insn.rs1(), 1));
reg_t pos = 0;
VI_GENERAL_LOOP_BASE
- const int mlen = P.VU.vmlen;
- const int midx = (mlen * i) / 64;
- const int mpos = (mlen * i) % 64;
+ const int midx = i / 64;
+ const int mpos = i % 64;
bool do_mask = (P.VU.elt<uint64_t>(rs1_num, midx) >> mpos) & 0x1;
if (do_mask) {
diff --git a/riscv/insns/vfmerge_vfm.h b/riscv/insns/vfmerge_vfm.h
index bd00e32..c9b39fe 100644
--- a/riscv/insns/vfmerge_vfm.h
+++ b/riscv/insns/vfmerge_vfm.h
@@ -9,8 +9,8 @@ switch(P.VU.vsew) {
auto rs1 = f16(READ_FREG(rs1_num));
auto vs2 = P.VU.elt<float16_t>(rs2_num, i);
- int midx = (P.VU.vmlen * i) / 64;
- int mpos = (P.VU.vmlen * i) % 64;
+ int midx = i / 64;
+ int mpos = i % 64;
bool use_first = (P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1;
vd = use_first ? rs1 : vs2;
@@ -22,8 +22,8 @@ switch(P.VU.vsew) {
auto rs1 = f32(READ_FREG(rs1_num));
auto vs2 = P.VU.elt<float32_t>(rs2_num, i);
- int midx = (P.VU.vmlen * i) / 64;
- int mpos = (P.VU.vmlen * i) % 64;
+ int midx = i / 64;
+ int mpos = i % 64;
bool use_first = (P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1;
vd = use_first ? rs1 : vs2;
@@ -35,8 +35,8 @@ switch(P.VU.vsew) {
auto rs1 = f64(READ_FREG(rs1_num));
auto vs2 = P.VU.elt<float64_t>(rs2_num, i);
- int midx = (P.VU.vmlen * i) / 64;
- int mpos = (P.VU.vmlen * i) % 64;
+ int midx = i / 64;
+ int mpos = i % 64;
bool use_first = (P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1;
vd = use_first ? rs1 : vs2;
diff --git a/riscv/insns/viota_m.h b/riscv/insns/viota_m.h
index f0ef62a..642a7f9 100644
--- a/riscv/insns/viota_m.h
+++ b/riscv/insns/viota_m.h
@@ -14,9 +14,8 @@ require((rd_num & (P.VU.vlmul - 1)) == 0);
int cnt = 0;
for (reg_t i = 0; i < vl; ++i) {
- const int mlen = P.VU.vmlen;
- const int midx = (mlen * i) / 64;
- const int mpos = (mlen * i) % 64;
+ const int midx = i / 64;
+ const int mpos = i % 64;
bool vs2_lsb = ((P.VU.elt<uint64_t>(rs2_num, midx) >> mpos) & 0x1) == 1;
bool do_mask = (P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1;
diff --git a/riscv/insns/vmadc_vim.h b/riscv/insns/vmadc_vim.h
index 36722d7..afdca7e 100644
--- a/riscv/insns/vmadc_vim.h
+++ b/riscv/insns/vmadc_vim.h
@@ -2,7 +2,7 @@
VI_XI_LOOP_CARRY
({
auto v0 = P.VU.elt<uint64_t>(0, midx);
- const uint64_t mmask = (UINT64_MAX << (64 - mlen)) >> (64 - mlen - mpos);
+ const uint64_t mmask = UINT64_C(1) << mpos; \
const uint128_t op_mask = (UINT64_MAX >> (64 - sew));
uint64_t carry = insn.v_vm() == 0 ? (v0 >> mpos) & 0x1 : 0;
diff --git a/riscv/insns/vmadc_vvm.h b/riscv/insns/vmadc_vvm.h
index eae71d2..a5d54c6 100644
--- a/riscv/insns/vmadc_vvm.h
+++ b/riscv/insns/vmadc_vvm.h
@@ -2,7 +2,7 @@
VI_VV_LOOP_CARRY
({
auto v0 = P.VU.elt<uint64_t>(0, midx);
- const uint64_t mmask = (UINT64_MAX << (64 - mlen)) >> (64 - mlen - mpos);
+ const uint64_t mmask = UINT64_C(1) << mpos; \
const uint128_t op_mask = (UINT64_MAX >> (64 - sew));
uint64_t carry = insn.v_vm() == 0 ? (v0 >> mpos) & 0x1 : 0;
diff --git a/riscv/insns/vmadc_vxm.h b/riscv/insns/vmadc_vxm.h
index 3b84e6d..ca0342e 100644
--- a/riscv/insns/vmadc_vxm.h
+++ b/riscv/insns/vmadc_vxm.h
@@ -2,7 +2,7 @@
VI_XI_LOOP_CARRY
({
auto v0 = P.VU.elt<uint64_t>(0, midx);
- const uint64_t mmask = (UINT64_MAX << (64 - mlen)) >> (64 - mlen - mpos);
+ const uint64_t mmask = UINT64_C(1) << mpos; \
const uint128_t op_mask = (UINT64_MAX >> (64 - sew));
uint64_t carry = insn.v_vm() == 0 ? (v0 >> mpos) & 0x1 : 0;
diff --git a/riscv/insns/vmerge_vim.h b/riscv/insns/vmerge_vim.h
index 6185da5..b20bcde 100644
--- a/riscv/insns/vmerge_vim.h
+++ b/riscv/insns/vmerge_vim.h
@@ -3,8 +3,8 @@ require_vector;
VI_CHECK_SSS(false);
VI_VVXI_MERGE_LOOP
({
- int midx = (P.VU.vmlen * i) / 64;
- int mpos = (P.VU.vmlen * i) % 64;
+ int midx = i / 64;
+ int mpos = i % 64;
bool use_first = (P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1;
vd = use_first ? simm5 : vs2;
diff --git a/riscv/insns/vmerge_vvm.h b/riscv/insns/vmerge_vvm.h
index f0a3fd5..d670554 100644
--- a/riscv/insns/vmerge_vvm.h
+++ b/riscv/insns/vmerge_vvm.h
@@ -3,8 +3,8 @@ require_vector;
VI_CHECK_SSS(true);
VI_VVXI_MERGE_LOOP
({
- int midx = (P.VU.vmlen * i) / 64;
- int mpos = (P.VU.vmlen * i) % 64;
+ int midx = i / 64;
+ int mpos = i % 64;
bool use_first = (P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1;
vd = use_first ? vs1 : vs2;
diff --git a/riscv/insns/vmerge_vxm.h b/riscv/insns/vmerge_vxm.h
index 505b32f..3fd68cb 100644
--- a/riscv/insns/vmerge_vxm.h
+++ b/riscv/insns/vmerge_vxm.h
@@ -3,8 +3,8 @@ require_vector;
VI_CHECK_SSS(false);
VI_VVXI_MERGE_LOOP
({
- int midx = (P.VU.vmlen * i) / 64;
- int mpos = (P.VU.vmlen * i) % 64;
+ int midx = i / 64;
+ int mpos = i % 64;
bool use_first = (P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1;
vd = use_first ? rs1 : vs2;
diff --git a/riscv/insns/vmsbc_vvm.h b/riscv/insns/vmsbc_vvm.h
index 2f41f01..b6f1521 100644
--- a/riscv/insns/vmsbc_vvm.h
+++ b/riscv/insns/vmsbc_vvm.h
@@ -2,7 +2,7 @@
VI_VV_LOOP_CARRY
({
auto v0 = P.VU.elt<uint64_t>(0, midx);
- const uint64_t mmask = (UINT64_MAX << (64 - mlen)) >> (64 - mlen - mpos);
+ const uint64_t mmask = UINT64_C(1) << mpos; \
const uint128_t op_mask = (UINT64_MAX >> (64 - sew));
uint64_t carry = insn.v_vm() == 0 ? (v0 >> mpos) & 0x1 : 0;
diff --git a/riscv/insns/vmsbc_vxm.h b/riscv/insns/vmsbc_vxm.h
index 87ce085..29fa012 100644
--- a/riscv/insns/vmsbc_vxm.h
+++ b/riscv/insns/vmsbc_vxm.h
@@ -2,7 +2,7 @@
VI_XI_LOOP_CARRY
({
auto &v0 = P.VU.elt<uint64_t>(0, midx);
- const uint64_t mmask = (UINT64_MAX << (64 - mlen)) >> (64 - mlen - mpos);
+ const uint64_t mmask = UINT64_C(1) << mpos; \
const uint128_t op_mask = (UINT64_MAX >> (64 - sew));
uint64_t carry = insn.v_vm() == 0 ? (v0 >> mpos) & 0x1 : 0;
diff --git a/riscv/insns/vmsbf_m.h b/riscv/insns/vmsbf_m.h
index c7ec7f0..2be6f41 100644
--- a/riscv/insns/vmsbf_m.h
+++ b/riscv/insns/vmsbf_m.h
@@ -11,10 +11,9 @@ reg_t rs2_num = insn.rs2();
bool has_one = false;
for (reg_t i = P.VU.vstart; i < vl; ++i) {
- const int mlen = P.VU.vmlen;
- const int midx = (mlen * i) / 64;
- const int mpos = (mlen * i) % 64;
- const uint64_t mmask = (UINT64_MAX << (64 - mlen)) >> (64 - mlen - mpos);
+ const int midx = i / 64;
+ const int mpos = i % 64;
+ const uint64_t mmask = UINT64_C(1) << mpos; \
bool vs2_lsb = ((P.VU.elt<uint64_t>(rs2_num, midx) >> mpos) & 0x1) == 1;
bool do_mask = (P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1;
diff --git a/riscv/insns/vmsif_m.h b/riscv/insns/vmsif_m.h
index cda4313..73aadbc 100644
--- a/riscv/insns/vmsif_m.h
+++ b/riscv/insns/vmsif_m.h
@@ -11,10 +11,9 @@ reg_t rs2_num = insn.rs2();
bool has_one = false;
for (reg_t i = P.VU.vstart ; i < vl; ++i) {
- const int mlen = P.VU.vmlen;
- const int midx = (mlen * i) / 64;
- const int mpos = (mlen * i) % 64;
- const uint64_t mmask = (UINT64_MAX << (64 - mlen)) >> (64 - mlen - mpos);
+ const int midx = i / 64;
+ const int mpos = i % 64;
+ const uint64_t mmask = UINT64_C(1) << mpos; \
bool vs2_lsb = ((P.VU.elt<uint64_t>(rs2_num, midx ) >> mpos) & 0x1) == 1;
bool do_mask = (P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1;
diff --git a/riscv/insns/vmsof_m.h b/riscv/insns/vmsof_m.h
index a84815b..218ac61 100644
--- a/riscv/insns/vmsof_m.h
+++ b/riscv/insns/vmsof_m.h
@@ -11,10 +11,9 @@ reg_t rs2_num = insn.rs2();
bool has_one = false;
for (reg_t i = P.VU.vstart ; i < vl; ++i) {
- const int mlen = P.VU.vmlen;
- const int midx = (mlen * i) / 64;
- const int mpos = (mlen * i) % 64;
- const uint64_t mmask = (UINT64_MAX << (64 - mlen)) >> (64 - mlen - mpos);
+ const int midx = i / 64;
+ const int mpos = i % 64;
+ const uint64_t mmask = UINT64_C(1) << mpos; \
bool vs2_lsb = ((P.VU.elt<uint64_t>(rs2_num, midx ) >> mpos) & 0x1) == 1;
bool do_mask = (P.VU.elt<uint64_t>(0, midx) >> mpos) & 0x1;
diff --git a/riscv/insns/vpopc_m.h b/riscv/insns/vpopc_m.h
index de50943..9eaca1e 100644
--- a/riscv/insns/vpopc_m.h
+++ b/riscv/insns/vpopc_m.h
@@ -8,9 +8,8 @@ reg_t rs2_num = insn.rs2();
require(P.VU.vstart == 0);
reg_t popcount = 0;
for (reg_t i=P.VU.vstart; i<vl; ++i) {
- const int mlen = P.VU.vmlen;
- const int midx = (mlen * i) / 32;
- const int mpos = (mlen * i) % 32;
+ const int midx = i / 32;
+ const int mpos = i % 32;
bool vs2_lsb = ((P.VU.elt<uint32_t>(rs2_num, midx ) >> mpos) & 0x1) == 1;
if (insn.v_vm() == 1) {
diff --git a/riscv/processor.cc b/riscv/processor.cc
index 0edf193..1dfd563 100644
--- a/riscv/processor.cc
+++ b/riscv/processor.cc
@@ -408,7 +408,6 @@ reg_t processor_t::vectorUnit_t::set_vl(int rd, int rs1, reg_t reqVL, reg_t newT
vlmax = VLEN/vsew * vlmul;
vflmul = vlmul;
}
- vmlen = 1;
vill = !(vlmul>=1 && vlmul <=8) || vsew > ELEN || vediv != 1 || (newType >> 8) != 0;
if (vill) {
diff --git a/riscv/processor.h b/riscv/processor.h
index e5520e6..be1ffea 100644
--- a/riscv/processor.h
+++ b/riscv/processor.h
@@ -449,7 +449,7 @@ public:
void *reg_file;
char reg_referenced[NVPR];
int setvl_count;
- reg_t vlmax, vmlen;
+ reg_t vlmax;
reg_t vstart, vxrm, vxsat, vl, vtype, vlenb;
reg_t vma, vta;
reg_t vediv, vsew, vlmul;