aboutsummaryrefslogtreecommitdiff
path: root/riscv/insns
diff options
context:
space:
mode:
authorChih-Min Chao <chihmin.chao@sifive.com>2019-09-25 19:57:50 -0700
committerChih-Min Chao <chihmin.chao@sifive.com>2019-11-11 19:02:34 -0800
commitb9d9e1ebd02c62ad354195481d1136f5be3f54cd (patch)
treec0d333eae33185b189cf730719f7d007c98bddaa /riscv/insns
parentdf85f7fbe4b82eccf594a0d0fb7b8f5e6150dea1 (diff)
downloadspike-b9d9e1ebd02c62ad354195481d1136f5be3f54cd.zip
spike-b9d9e1ebd02c62ad354195481d1136f5be3f54cd.tar.gz
spike-b9d9e1ebd02c62ad354195481d1136f5be3f54cd.tar.bz2
rvv: remove configuable tail-zero
tail zero feature has been removed after v0.8-draft Signed-off-by: Chih-Min Chao <chihmin.chao@sifive.com>
Diffstat (limited to 'riscv/insns')
-rw-r--r--riscv/insns/vcompress_vm.h2
-rw-r--r--riscv/insns/vfmerge_vfm.h1
-rw-r--r--riscv/insns/vfmv_s_f.h7
-rw-r--r--riscv/insns/vfmv_v_f.h1
-rw-r--r--riscv/insns/vid_v.h1
-rw-r--r--riscv/insns/viota_m.h1
-rw-r--r--riscv/insns/vleff_v.h17
-rw-r--r--riscv/insns/vmsbf_m.h1
-rw-r--r--riscv/insns/vmsif_m.h1
-rw-r--r--riscv/insns/vmsof_m.h1
-rw-r--r--riscv/insns/vmv_s_x.h20
-rw-r--r--riscv/insns/vsuxb_v.h19
-rw-r--r--riscv/insns/vsuxe_v.h21
-rw-r--r--riscv/insns/vsuxh_v.h16
-rw-r--r--riscv/insns/vsuxw_v.h11
15 files changed, 34 insertions, 86 deletions
diff --git a/riscv/insns/vcompress_vm.h b/riscv/insns/vcompress_vm.h
index b056b0e..91d6e90 100644
--- a/riscv/insns/vcompress_vm.h
+++ b/riscv/insns/vcompress_vm.h
@@ -34,7 +34,7 @@ for (reg_t i = P.VU.vstart ; i < vl; ++i) {
}
}
-if (vl > 0 && TAIL_ZEROING) {
+if (vl > 0 && P.VU.TZ) {
uint8_t *tail = &P.VU.elt<uint8_t>(rd_num, pos * ((sew >> 3) * 1));
memset(tail, 0, (P.VU.vlmax - pos) * ((sew >> 3) * 1));
}
diff --git a/riscv/insns/vfmerge_vfm.h b/riscv/insns/vfmerge_vfm.h
index 6d12bce..e8601fe 100644
--- a/riscv/insns/vfmerge_vfm.h
+++ b/riscv/insns/vfmerge_vfm.h
@@ -20,6 +20,5 @@ for (reg_t i=P.VU.vstart; i<vl; ++i) {
vd = use_first ? rs1 : vs2;
}
-VI_TAIL_ZERO(1);
P.VU.vstart = 0;
set_fp_exceptions;
diff --git a/riscv/insns/vfmv_s_f.h b/riscv/insns/vfmv_s_f.h
index a08282f..8ff6094 100644
--- a/riscv/insns/vfmv_s_f.h
+++ b/riscv/insns/vfmv_s_f.h
@@ -15,12 +15,5 @@ if (vl > 0) {
else
P.VU.elt<uint32_t>(rd_num, 0) = f32(FRS1).v;
- if (TAIL_ZEROING) {
- const reg_t max_len = P.VU.VLEN / sew;
- for (reg_t i = 1; i < max_len; ++i) {
- P.VU.elt<uint32_t>(rd_num, i) = 0;
- }
- }
-
vl = 0;
}
diff --git a/riscv/insns/vfmv_v_f.h b/riscv/insns/vfmv_v_f.h
index c85a3e9..150298b 100644
--- a/riscv/insns/vfmv_v_f.h
+++ b/riscv/insns/vfmv_v_f.h
@@ -15,6 +15,5 @@ for (reg_t i=P.VU.vstart; i<vl; ++i) {
vd = rs1;
}
-VI_TAIL_ZERO(1);
P.VU.vstart = 0;
set_fp_exceptions;
diff --git a/riscv/insns/vid_v.h b/riscv/insns/vid_v.h
index df6dd04..2291495 100644
--- a/riscv/insns/vid_v.h
+++ b/riscv/insns/vid_v.h
@@ -26,5 +26,4 @@ for (reg_t i = P.VU.vstart ; i < P.VU.vl; ++i) {
}
}
-VI_TAIL_ZERO(1);
P.VU.vstart = 0;
diff --git a/riscv/insns/viota_m.h b/riscv/insns/viota_m.h
index fde0291..55d8df1 100644
--- a/riscv/insns/viota_m.h
+++ b/riscv/insns/viota_m.h
@@ -49,4 +49,3 @@ for (reg_t i = 0; i < vl; ++i) {
}
}
-VI_TAIL_ZERO(1);
diff --git a/riscv/insns/vleff_v.h b/riscv/insns/vleff_v.h
index ec2777a..7b870ca 100644
--- a/riscv/insns/vleff_v.h
+++ b/riscv/insns/vleff_v.h
@@ -9,7 +9,6 @@ const reg_t rd_num = insn.rd();
bool early_stop = false;
const reg_t vlmul = P.VU.vlmul;
for (reg_t i = 0; i < P.VU.vlmax && vl != 0; ++i) {
- bool is_valid = true;
bool is_zero = false;
VI_STRIP(i);
VI_ELEMENT_SKIP(i);
@@ -20,23 +19,23 @@ for (reg_t i = 0; i < P.VU.vlmax && vl != 0; ++i) {
switch (sew) {
case e8:
P.VU.elt<uint8_t>(rd_num + fn * vlmul, vreg_inx) =
- is_valid ? MMU.load_uint8(baseAddr + (i * nf + fn) * 1) : 0;
- is_zero = is_valid && P.VU.elt<uint8_t>(rd_num + fn * vlmul, vreg_inx) == 0;
+ MMU.load_uint8(baseAddr + (i * nf + fn) * 1);
+ is_zero = P.VU.elt<uint8_t>(rd_num + fn * vlmul, vreg_inx) == 0;
break;
case e16:
P.VU.elt<uint16_t>(rd_num + fn * vlmul, vreg_inx) =
- is_valid ? MMU.load_uint16(baseAddr + (i * nf + fn) * 2) : 0;
- is_zero = is_valid && P.VU.elt<uint16_t>(rd_num + fn * vlmul, vreg_inx) == 0;
+ MMU.load_uint16(baseAddr + (i * nf + fn) * 2);
+ is_zero = P.VU.elt<uint16_t>(rd_num + fn * vlmul, vreg_inx) == 0;
break;
case e32:
P.VU.elt<uint32_t>(rd_num + fn * vlmul, vreg_inx) =
- is_valid ? MMU.load_uint32(baseAddr + (i * nf + fn) * 4) : 0;
- is_zero = is_valid && P.VU.elt<uint32_t>(rd_num + fn * vlmul, vreg_inx) == 0;
+ MMU.load_uint32(baseAddr + (i * nf + fn) * 4);
+ is_zero = P.VU.elt<uint32_t>(rd_num + fn * vlmul, vreg_inx) == 0;
break;
case e64:
P.VU.elt<uint64_t>(rd_num + fn * vlmul, vreg_inx) =
- is_valid ? MMU.load_uint64(baseAddr + (i * nf + fn) * 8) : 0;
- is_zero = is_valid && P.VU.elt<uint64_t>(rd_num + fn * vlmul, vreg_inx) == 0;
+ MMU.load_uint64(baseAddr + (i * nf + fn) * 8);
+ is_zero = P.VU.elt<uint64_t>(rd_num + fn * vlmul, vreg_inx) == 0;
break;
}
diff --git a/riscv/insns/vmsbf_m.h b/riscv/insns/vmsbf_m.h
index 3047cca..443fcbb 100644
--- a/riscv/insns/vmsbf_m.h
+++ b/riscv/insns/vmsbf_m.h
@@ -30,5 +30,4 @@ for (reg_t i = P.VU.vstart; i < vl; ++i) {
}
}
-VI_TAIL_ZERO_MASK(rd_num);
P.VU.vstart = 0;
diff --git a/riscv/insns/vmsif_m.h b/riscv/insns/vmsif_m.h
index 826e7cd..381088b 100644
--- a/riscv/insns/vmsif_m.h
+++ b/riscv/insns/vmsif_m.h
@@ -30,5 +30,4 @@ for (reg_t i = P.VU.vstart ; i < vl; ++i) {
}
}
-VI_TAIL_ZERO_MASK(rd_num);
P.VU.vstart = 0;
diff --git a/riscv/insns/vmsof_m.h b/riscv/insns/vmsof_m.h
index 48805f7..d66002d 100644
--- a/riscv/insns/vmsof_m.h
+++ b/riscv/insns/vmsof_m.h
@@ -28,5 +28,4 @@ for (reg_t i = P.VU.vstart ; i < vl; ++i) {
}
}
-VI_TAIL_ZERO_MASK(rd_num);
P.VU.vstart = 0;
diff --git a/riscv/insns/vmv_s_x.h b/riscv/insns/vmv_s_x.h
index f19fa61..948b5be 100644
--- a/riscv/insns/vmv_s_x.h
+++ b/riscv/insns/vmv_s_x.h
@@ -24,25 +24,5 @@ if (vl > 0) {
break;
}
- if (TAIL_ZEROING) {
- const reg_t max_len = P.VU.VLEN / sew;
- for (reg_t i = 1; i < max_len; ++i) {
- switch(sew) {
- case e8:
- P.VU.elt<uint8_t>(rd_num, i) = 0;
- break;
- case e16:
- P.VU.elt<uint16_t>(rd_num, i) = 0;
- break;
- case e32:
- P.VU.elt<uint32_t>(rd_num, i) = 0;
- break;
- default:
- P.VU.elt<uint64_t>(rd_num, i) = 0;
- break;
- }
- }
- }
-
vl = 0;
}
diff --git a/riscv/insns/vsuxb_v.h b/riscv/insns/vsuxb_v.h
index cf928f8..0dfe024 100644
--- a/riscv/insns/vsuxb_v.h
+++ b/riscv/insns/vsuxb_v.h
@@ -8,30 +8,25 @@ reg_t vs3 = insn.rd();
reg_t vlmax = P.VU.vlmax;
VI_DUPLICATE_VREG(stride, vlmax);
for (reg_t i = 0; i < vlmax && vl != 0; ++i) {
- bool is_valid = true;
VI_ELEMENT_SKIP(i);
VI_STRIP(i)
switch (P.VU.vsew) {
case e8:
- if (is_valid)
- MMU.store_uint8(baseAddr + index[i],
- P.VU.elt<uint8_t>(vs3, vreg_inx));
+ MMU.store_uint8(baseAddr + index[i],
+ P.VU.elt<uint8_t>(vs3, vreg_inx));
break;
case e16:
- if (is_valid)
- MMU.store_uint8(baseAddr + index[i],
- P.VU.elt<uint16_t>(vs3, vreg_inx));
+ MMU.store_uint8(baseAddr + index[i],
+ P.VU.elt<uint16_t>(vs3, vreg_inx));
break;
case e32:
- if (is_valid)
- MMU.store_uint8(baseAddr + index[i],
+ MMU.store_uint8(baseAddr + index[i],
P.VU.elt<uint32_t>(vs3, vreg_inx));
break;
case e64:
- if (is_valid)
- MMU.store_uint8(baseAddr + index[i],
- P.VU.elt<uint64_t>(vs3, vreg_inx));
+ MMU.store_uint8(baseAddr + index[i],
+ P.VU.elt<uint64_t>(vs3, vreg_inx));
break;
}
}
diff --git a/riscv/insns/vsuxe_v.h b/riscv/insns/vsuxe_v.h
index 8bd7545..5e4d3a2 100644
--- a/riscv/insns/vsuxe_v.h
+++ b/riscv/insns/vsuxe_v.h
@@ -9,30 +9,25 @@ reg_t vs3 = insn.rd();
reg_t vlmax = P.VU.vlmax;
VI_DUPLICATE_VREG(stride, vlmax);
for (reg_t i = 0; i < vlmax && vl != 0; ++i) {
- bool is_valid = true;
VI_ELEMENT_SKIP(i);
VI_STRIP(i)
switch (sew) {
case e8:
- if (is_valid)
- MMU.store_uint8(baseAddr + index[i],
- P.VU.elt<uint8_t>(vs3, vreg_inx));
+ MMU.store_uint8(baseAddr + index[i],
+ P.VU.elt<uint8_t>(vs3, vreg_inx));
break;
case e16:
- if (is_valid)
- MMU.store_uint16(baseAddr + index[i],
- P.VU.elt<uint16_t>(vs3, vreg_inx));
+ MMU.store_uint16(baseAddr + index[i],
+ P.VU.elt<uint16_t>(vs3, vreg_inx));
break;
case e32:
- if (is_valid)
- MMU.store_uint32(baseAddr + index[i],
- P.VU.elt<uint32_t>(vs3, vreg_inx));
+ MMU.store_uint32(baseAddr + index[i],
+ P.VU.elt<uint32_t>(vs3, vreg_inx));
break;
case e64:
- if (is_valid)
- MMU.store_uint64(baseAddr + index[i],
- P.VU.elt<uint64_t>(vs3, vreg_inx));
+ MMU.store_uint64(baseAddr + index[i],
+ P.VU.elt<uint64_t>(vs3, vreg_inx));
break;
}
}
diff --git a/riscv/insns/vsuxh_v.h b/riscv/insns/vsuxh_v.h
index 1d5a1bd..c6f8be4 100644
--- a/riscv/insns/vsuxh_v.h
+++ b/riscv/insns/vsuxh_v.h
@@ -8,25 +8,21 @@ reg_t vs3 = insn.rd();
reg_t vlmax = P.VU.vlmax;
VI_DUPLICATE_VREG(stride, vlmax);
for (reg_t i = 0; i < vlmax && vl != 0; ++i) {
- bool is_valid = true;
VI_ELEMENT_SKIP(i);
VI_STRIP(i)
switch (P.VU.vsew) {
case e16:
- if (is_valid)
- MMU.store_uint16(baseAddr + index[i],
- P.VU.elt<uint16_t>(vs3, vreg_inx));
+ MMU.store_uint16(baseAddr + index[i],
+ P.VU.elt<uint16_t>(vs3, vreg_inx));
break;
case e32:
- if (is_valid)
- MMU.store_uint16(baseAddr + index[i],
- P.VU.elt<uint32_t>(vs3, vreg_inx));
+ MMU.store_uint16(baseAddr + index[i],
+ P.VU.elt<uint32_t>(vs3, vreg_inx));
break;
case e64:
- if (is_valid)
- MMU.store_uint16(baseAddr + index[i],
- P.VU.elt<uint64_t>(vs3, vreg_inx));
+ MMU.store_uint16(baseAddr + index[i],
+ P.VU.elt<uint64_t>(vs3, vreg_inx));
break;
}
}
diff --git a/riscv/insns/vsuxw_v.h b/riscv/insns/vsuxw_v.h
index ec1a8fe..f133e77 100644
--- a/riscv/insns/vsuxw_v.h
+++ b/riscv/insns/vsuxw_v.h
@@ -8,20 +8,17 @@ reg_t vs3 = insn.rd();
reg_t vlmax = P.VU.vlmax;
VI_DUPLICATE_VREG(stride, vlmax);
for (reg_t i = 0; i < vlmax && vl != 0; ++i) {
- bool is_valid = true;
VI_ELEMENT_SKIP(i);
VI_STRIP(i)
switch (P.VU.vsew) {
case e32:
- if (is_valid)
- MMU.store_uint32(baseAddr + index[i],
- P.VU.elt<uint32_t>(vs3, vreg_inx));
+ MMU.store_uint32(baseAddr + index[i],
+ P.VU.elt<uint32_t>(vs3, vreg_inx));
break;
case e64:
- if (is_valid)
- MMU.store_uint32(baseAddr + index[i],
- P.VU.elt<uint64_t>(vs3, vreg_inx));
+ MMU.store_uint32(baseAddr + index[i],
+ P.VU.elt<uint64_t>(vs3, vreg_inx));
break;
}
}