aboutsummaryrefslogtreecommitdiff
path: root/riscv/insns/vbrev_v.h
diff options
context:
space:
mode:
authorEric Gouriou <ego@rivosinc.com>2023-06-01 18:06:55 -0700
committerEric Gouriou <ego@rivosinc.com>2023-06-19 14:30:32 -0700
commite87038ee5e6545a5149cdf4334d220f951534f30 (patch)
treeb2224a87df4346e9f5b0909058a08e4f9be2aeef /riscv/insns/vbrev_v.h
parentd5c0339484323b5a9498576d70ec90eab2e13438 (diff)
downloadriscv-isa-sim-e87038ee5e6545a5149cdf4334d220f951534f30.zip
riscv-isa-sim-e87038ee5e6545a5149cdf4334d220f951534f30.tar.gz
riscv-isa-sim-e87038ee5e6545a5149cdf4334d220f951534f30.tar.bz2
Zvk: Implement Zvbb, Vector Bit-manipulation for Cryptography
Implement the proposed instructions in Zvbb: - vandn.{vv,vx}, vector bitwise and-not - vbrev.v, vector bit reverse in element - vbrev8.v, vector bit reverse in bytes - vrev8.v, vector byte reverse - vctz.v, vector count trailing zeros - vclz.v, vector count leading zeros - vcpop.v, vector population count - vrol.{vv,vx}, vector rotate left - vror.{vi,vv,vx}, vector rotate right - vwsll.{vi,vv,vx} vector widening shift left logical A new instruction field, 'zimm6', is introduced, encoded in bits [15, 19] and [26].. It is used by "vror.vi" to encode a shift immediate in [0, 63]. Co-authored-by: Raghav Gupta <rgupta@rivosinc.com> Co-authored-by: Stanislaw Kardach <kda@semihalf.com> Signed-off-by: Eric Gouriou <ego@rivosinc.com>
Diffstat (limited to 'riscv/insns/vbrev_v.h')
-rw-r--r--riscv/insns/vbrev_v.h24
1 files changed, 24 insertions, 0 deletions
diff --git a/riscv/insns/vbrev_v.h b/riscv/insns/vbrev_v.h
new file mode 100644
index 0000000..7f784c2
--- /dev/null
+++ b/riscv/insns/vbrev_v.h
@@ -0,0 +1,24 @@
+// vbrev.v vd, vs2
+
+#include "zvk_ext_macros.h"
+
+require_zvbb;
+
+VI_V_ULOOP
+({
+ reg_t x = vs2;
+
+ // Reverse bits in bytes (vbrev8)
+ x = ((x & 0x5555555555555555llu) << 1) | ((x & 0xAAAAAAAAAAAAAAAAllu) >> 1);
+ x = ((x & 0x3333333333333333llu) << 2) | ((x & 0xCCCCCCCCCCCCCCCCllu) >> 2);
+ x = ((x & 0x0F0F0F0F0F0F0F0Fllu) << 4) | ((x & 0xF0F0F0F0F0F0F0F0llu) >> 4);
+ // Re-order bytes (vrev8)
+ if (P.VU.vsew > 8)
+ x = ((x & 0x00FF00FF00FF00FFllu) << 8) | ((x & 0xFF00FF00FF00FF00llu) >> 8);
+ if (P.VU.vsew > 16)
+ x = ((x & 0x0000FFFF0000FFFFllu) << 16) | ((x & 0xFFFF0000FFFF0000llu) >> 16);
+ if (P.VU.vsew > 32)
+ x = ((x & 0x00000000FFFFFFFFllu) << 32) | ((x & 0xFFFFFFFF00000000llu) >> 32);
+
+ vd = x;
+})