aboutsummaryrefslogtreecommitdiff
path: root/riscv
diff options
context:
space:
mode:
authorYenHaoChen <howard25336284@gmail.com>2024-07-31 13:36:22 +0800
committerYenHaoChen <howard25336284@gmail.com>2024-07-31 15:11:10 +0800
commite9f620ffb53889be2de8997f2f48053160e3b5b6 (patch)
tree2474f3d195c37b3230fd7c2dba636e6531ea9368 /riscv
parentadacda49e067df0bc1fc23efd1d0e2b19ffedf61 (diff)
downloadriscv-isa-sim-e9f620ffb53889be2de8997f2f48053160e3b5b6.zip
riscv-isa-sim-e9f620ffb53889be2de8997f2f48053160e3b5b6.tar.gz
riscv-isa-sim-e9f620ffb53889be2de8997f2f48053160e3b5b6.tar.bz2
vector: Check if there is any vector extension before using vector CSRs
Diffstat (limited to 'riscv')
-rw-r--r--riscv/insns/vnclip_wi.h6
-rw-r--r--riscv/insns/vnclip_wv.h6
-rw-r--r--riscv/insns/vnclipu_wi.h6
-rw-r--r--riscv/insns/vnclipu_wv.h6
-rw-r--r--riscv/insns/vnclipu_wx.h6
-rw-r--r--riscv/insns/vsmul_vv.h8
-rw-r--r--riscv/insns/vsmul_vx.h8
-rw-r--r--riscv/insns/vssra_vi.h2
-rw-r--r--riscv/insns/vssra_vv.h2
-rw-r--r--riscv/insns/vssra_vx.h2
-rw-r--r--riscv/insns/vssrl_vi.h2
-rw-r--r--riscv/insns/vssrl_vv.h2
-rw-r--r--riscv/insns/vssrl_vx.h2
-rw-r--r--riscv/v_ext_macros.h8
14 files changed, 33 insertions, 33 deletions
diff --git a/riscv/insns/vnclip_wi.h b/riscv/insns/vnclip_wi.h
index ea6898c..4805173 100644
--- a/riscv/insns/vnclip_wi.h
+++ b/riscv/insns/vnclip_wi.h
@@ -1,9 +1,9 @@
// vnclip: vd[i] = clip(round(vs2[i] + rnd) >> simm)
-VRM xrm = P.VU.get_vround_mode();
-int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
-int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
VI_VI_LOOP_NARROW
({
+ VRM xrm = P.VU.get_vround_mode();
+ int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
+ int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
int128_t result = vs2;
unsigned shift = zimm5 & ((sew * 2) - 1);
diff --git a/riscv/insns/vnclip_wv.h b/riscv/insns/vnclip_wv.h
index 63b84c6..1f7558a 100644
--- a/riscv/insns/vnclip_wv.h
+++ b/riscv/insns/vnclip_wv.h
@@ -1,9 +1,9 @@
// vnclip: vd[i] = clip(round(vs2[i] + rnd) >> vs1[i])
-VRM xrm = P.VU.get_vround_mode();
-int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
-int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
VI_VV_LOOP_NARROW
({
+ VRM xrm = P.VU.get_vround_mode();
+ int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
+ int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
int128_t result = vs2;
unsigned shift = vs1 & ((sew * 2) - 1);
diff --git a/riscv/insns/vnclipu_wi.h b/riscv/insns/vnclipu_wi.h
index 441a3a7..10735ba 100644
--- a/riscv/insns/vnclipu_wi.h
+++ b/riscv/insns/vnclipu_wi.h
@@ -1,9 +1,9 @@
// vnclipu: vd[i] = clip(round(vs2[i] + rnd) >> simm)
-VRM xrm = P.VU.get_vround_mode();
-uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew);
-uint64_t sign_mask = UINT64_MAX << P.VU.vsew;
VI_VI_LOOP_NARROW
({
+ VRM xrm = P.VU.get_vround_mode();
+ uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew);
+ uint64_t sign_mask = UINT64_MAX << P.VU.vsew;
uint128_t result = vs2_u;
unsigned shift = zimm5 & ((sew * 2) - 1);
diff --git a/riscv/insns/vnclipu_wv.h b/riscv/insns/vnclipu_wv.h
index 8072489..0e3e8b0 100644
--- a/riscv/insns/vnclipu_wv.h
+++ b/riscv/insns/vnclipu_wv.h
@@ -1,9 +1,9 @@
// vnclipu: vd[i] = clip(round(vs2[i] + rnd) >> vs1[i])
-VRM xrm = P.VU.get_vround_mode();
-uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew);
-uint64_t sign_mask = UINT64_MAX << P.VU.vsew;
VI_VV_LOOP_NARROW
({
+ VRM xrm = P.VU.get_vround_mode();
+ uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew);
+ uint64_t sign_mask = UINT64_MAX << P.VU.vsew;
uint128_t result = vs2_u;
unsigned shift = vs1 & ((sew * 2) - 1);
diff --git a/riscv/insns/vnclipu_wx.h b/riscv/insns/vnclipu_wx.h
index b2d91c3..d7c6bea 100644
--- a/riscv/insns/vnclipu_wx.h
+++ b/riscv/insns/vnclipu_wx.h
@@ -1,9 +1,9 @@
// vnclipu: vd[i] = clip(round(vs2[i] + rnd) >> rs1[i])
-VRM xrm = P.VU.get_vround_mode();
-uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew);
-uint64_t sign_mask = UINT64_MAX << P.VU.vsew;
VI_VX_LOOP_NARROW
({
+ VRM xrm = P.VU.get_vround_mode();
+ uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew);
+ uint64_t sign_mask = UINT64_MAX << P.VU.vsew;
uint128_t result = vs2_u;
unsigned shift = rs1 & ((sew * 2) - 1);
diff --git a/riscv/insns/vsmul_vv.h b/riscv/insns/vsmul_vv.h
index 49e42c1..c1d0a57 100644
--- a/riscv/insns/vsmul_vv.h
+++ b/riscv/insns/vsmul_vv.h
@@ -1,10 +1,10 @@
// vsmul.vv vd, vs2, vs1
-VRM xrm = P.VU.get_vround_mode();
-int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
-int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
-
VI_VV_LOOP
({
+ VRM xrm = P.VU.get_vround_mode();
+ int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
+ int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
+
bool overflow = vs1 == vs2 && vs1 == int_min;
int128_t result = (int128_t)vs1 * (int128_t)vs2;
diff --git a/riscv/insns/vsmul_vx.h b/riscv/insns/vsmul_vx.h
index d2724ee..c2e531c 100644
--- a/riscv/insns/vsmul_vx.h
+++ b/riscv/insns/vsmul_vx.h
@@ -1,10 +1,10 @@
// vsmul.vx vd, vs2, rs1
-VRM xrm = P.VU.get_vround_mode();
-int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
-int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
-
VI_VX_LOOP
({
+ VRM xrm = P.VU.get_vround_mode();
+ int64_t int_max = INT64_MAX >> (64 - P.VU.vsew);
+ int64_t int_min = INT64_MIN >> (64 - P.VU.vsew);
+
bool overflow = rs1 == vs2 && rs1 == int_min;
int128_t result = (int128_t)rs1 * (int128_t)vs2;
diff --git a/riscv/insns/vssra_vi.h b/riscv/insns/vssra_vi.h
index ff2e1c5..64a41a7 100644
--- a/riscv/insns/vssra_vi.h
+++ b/riscv/insns/vssra_vi.h
@@ -1,7 +1,7 @@
// vssra.vi vd, vs2, simm5
-VRM xrm = P.VU.get_vround_mode();
VI_VI_LOOP
({
+ VRM xrm = P.VU.get_vround_mode();
int sh = simm5 & (sew - 1) & 0x1f;
int128_t val = vs2;
diff --git a/riscv/insns/vssra_vv.h b/riscv/insns/vssra_vv.h
index 7bbc766..babca47 100644
--- a/riscv/insns/vssra_vv.h
+++ b/riscv/insns/vssra_vv.h
@@ -1,7 +1,7 @@
// vssra.vv vd, vs2, vs1
-VRM xrm = P.VU.get_vround_mode();
VI_VV_LOOP
({
+ VRM xrm = P.VU.get_vround_mode();
int sh = vs1 & (sew - 1);
int128_t val = vs2;
diff --git a/riscv/insns/vssra_vx.h b/riscv/insns/vssra_vx.h
index 068a22b..3d70726 100644
--- a/riscv/insns/vssra_vx.h
+++ b/riscv/insns/vssra_vx.h
@@ -1,7 +1,7 @@
// vssra.vx vd, vs2, rs1
-VRM xrm = P.VU.get_vround_mode();
VI_VX_LOOP
({
+ VRM xrm = P.VU.get_vround_mode();
int sh = rs1 & (sew - 1);
int128_t val = vs2;
diff --git a/riscv/insns/vssrl_vi.h b/riscv/insns/vssrl_vi.h
index d125164..9990235 100644
--- a/riscv/insns/vssrl_vi.h
+++ b/riscv/insns/vssrl_vi.h
@@ -1,7 +1,7 @@
// vssra.vi vd, vs2, simm5
-VRM xrm = P.VU.get_vround_mode();
VI_VI_ULOOP
({
+ VRM xrm = P.VU.get_vround_mode();
int sh = zimm5 & (sew - 1) & 0x1f;
uint128_t val = vs2;
diff --git a/riscv/insns/vssrl_vv.h b/riscv/insns/vssrl_vv.h
index a8e5d16..f8924ba 100644
--- a/riscv/insns/vssrl_vv.h
+++ b/riscv/insns/vssrl_vv.h
@@ -1,7 +1,7 @@
// vssrl.vv vd, vs2, vs1
-VRM xrm = P.VU.get_vround_mode();
VI_VV_ULOOP
({
+ VRM xrm = P.VU.get_vround_mode();
int sh = vs1 & (sew - 1);
uint128_t val = vs2;
diff --git a/riscv/insns/vssrl_vx.h b/riscv/insns/vssrl_vx.h
index ee3cb34..04468d5 100644
--- a/riscv/insns/vssrl_vx.h
+++ b/riscv/insns/vssrl_vx.h
@@ -1,7 +1,7 @@
// vssrl.vx vd, vs2, rs1
-VRM xrm = P.VU.get_vround_mode();
VI_VX_ULOOP
({
+ VRM xrm = P.VU.get_vround_mode();
int sh = rs1 & (sew - 1);
uint128_t val = vs2;
diff --git a/riscv/v_ext_macros.h b/riscv/v_ext_macros.h
index efec7a2..b6365aa 100644
--- a/riscv/v_ext_macros.h
+++ b/riscv/v_ext_macros.h
@@ -1144,32 +1144,32 @@ static inline bool is_overlapped_widen(const int astart, int asize,
// average loop
#define VI_VV_LOOP_AVG(op) \
-VRM xrm = p->VU.get_vround_mode(); \
VI_VV_LOOP({ \
+ VRM xrm = p->VU.get_vround_mode(); \
uint128_t res = ((uint128_t)vs2) op vs1; \
INT_ROUNDING(res, xrm, 1); \
vd = res >> 1; \
})
#define VI_VX_LOOP_AVG(op) \
-VRM xrm = p->VU.get_vround_mode(); \
VI_VX_LOOP({ \
+ VRM xrm = p->VU.get_vround_mode(); \
uint128_t res = ((uint128_t)vs2) op rs1; \
INT_ROUNDING(res, xrm, 1); \
vd = res >> 1; \
})
#define VI_VV_ULOOP_AVG(op) \
-VRM xrm = p->VU.get_vround_mode(); \
VI_VV_ULOOP({ \
+ VRM xrm = p->VU.get_vround_mode(); \
uint128_t res = ((uint128_t)vs2) op vs1; \
INT_ROUNDING(res, xrm, 1); \
vd = res >> 1; \
})
#define VI_VX_ULOOP_AVG(op) \
-VRM xrm = p->VU.get_vround_mode(); \
VI_VX_ULOOP({ \
+ VRM xrm = p->VU.get_vround_mode(); \
uint128_t res = ((uint128_t)vs2) op rs1; \
INT_ROUNDING(res, xrm, 1); \
vd = res >> 1; \