aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Gouriou <ego@rivosinc.com>2023-05-01 21:57:16 -0700
committerEric Gouriou <ego@rivosinc.com>2023-05-01 21:57:16 -0700
commit199e646f2a8e36533c92280a5fba0d259ba7fbfb (patch)
treecb37a856a49ecb27592a0e6e2d8f23155627436e
parentbb64568616b364e7103910f5dd363bc9cad1bb0a (diff)
downloadriscv-opcodes-199e646f2a8e36533c92280a5fba0d259ba7fbfb.zip
riscv-opcodes-199e646f2a8e36533c92280a5fba0d259ba7fbfb.tar.gz
riscv-opcodes-199e646f2a8e36533c92280a5fba0d259ba7fbfb.tar.bz2
Support for Zvk, Vector Cryptography Extensions
Add encodings for all instructions in the Zvk extensions: - Zvbb, Vector Bit-manipulation instructions used in Cryptography, - Zvbc, Vector Carryless Multiplication - Zvkg, Vector GCM/GMAC instruction for Cryptography, - Zvkned, NIST Suite: Vector AES Encryption & Decryption (Single Round), - Zvknha, Zvknhb, NIST Suite: Vector SHA-2, - Zvksed, ShangMi Suite: SM4 Block Cipher Instructions - Zvkssh, ShangMi Suite: SM3 Hash Function Instructions Add two "shorthand" extensions: - Zvkn: NIST Suite, imports Zvbb, Zvbc, Zvkned, and Zvknh - Zvks: ShangMi Suite, imports Zvbb, Zvbc, Zvksed, and Zvksh Three new fields are listed in constants.py: - 'zimm5', used to encode round constants (Zvkns, Zvksed, Zvksh), and 5-bit shift constant (vwsll.vi in Zvbb) - 'zimm6hi, zimm6lo', used to encode the 6 bits rotate amount in vror.vi. The Zvk instructions – with the exception of Zvbb, Zvbc – reside in the P opcode space. Some encodings conflict with proposed instructions in the P extension (packed SIMD). Zvk and P are exclusive of each other, no implementation will implement both. Conflicting P instructions are marked as pseudo of the Zvk instructions. The encodings match the current documentation of the specification at <https://github.com/riscv/riscv-crypto/tree/master/doc/vector>, at Version v0.9.1, 25 April, 2023 (Freeze Candidate). Co-authored-by: Eric Gouriou <ego@rivosinc.com> Co-authored-by: Stanislaw Kardach <kda@semihalf.com> Co-authored-by: Kornel Duleba <mindal@semihalf.com> Co-authored-by: Raghav Gupta <rgupta@rivosinc.com> Signed-off-by: Eric Gouriou <ego@rivosinc.com>
-rw-r--r--constants.py25
-rwxr-xr-xparse.py5
-rw-r--r--unratified/rv64_zpn6
-rw-r--r--unratified/rv_zpn2
-rw-r--r--unratified/rv_zvbb37
-rw-r--r--unratified/rv_zvbc9
-rw-r--r--unratified/rv_zvkg8
-rw-r--r--unratified/rv_zvkn46
-rw-r--r--unratified/rv_zvkned21
-rw-r--r--unratified/rv_zvknha9
-rw-r--r--unratified/rv_zvknhb9
-rw-r--r--unratified/rv_zvks34
-rw-r--r--unratified/rv_zvksed8
-rw-r--r--unratified/rv_zvksh7
14 files changed, 218 insertions, 8 deletions
diff --git a/constants.py b/constants.py
index 1594677..8d9352f 100644
--- a/constants.py
+++ b/constants.py
@@ -558,8 +558,11 @@ arg_lut['wd'] = (26, 26)
arg_lut['amoop'] = (31, 27)
arg_lut['nf'] = (31, 29)
arg_lut['simm5'] = (19, 15)
+arg_lut['zimm5'] = (19, 15)
arg_lut['zimm10'] = (29, 20)
arg_lut['zimm11'] = (30, 20)
+arg_lut['zimm6hi'] = (26, 26)
+arg_lut['zimm6lo'] = (19, 15)
#compressed immediates and fields
@@ -642,6 +645,8 @@ latex_mapping['rd_n0'] = 'rd$\\neq$0'
latex_mapping['rs1_n0'] = 'rs1$\\neq$0'
latex_mapping['c_rs1_n0'] = 'rs1$\\neq$0'
latex_mapping['rd_rs1'] = 'rd/rs1'
+latex_mapping['zimm6hi'] = 'uimm[5]'
+latex_mapping['zimm6lo'] = 'uimm[4:0]'
latex_mapping['c_nzuimm10'] = "nzuimm[5:4$\\vert$9:6$\\vert$2$\\vert$3]"
latex_mapping['c_uimm7lo'] = 'uimm[2$\\vert$6]'
latex_mapping['c_uimm7hi'] = 'uimm[5:3]'
@@ -702,3 +707,23 @@ latex_fixed_fields.append((19,15))
latex_fixed_fields.append((14,12))
latex_fixed_fields.append((11,7))
latex_fixed_fields.append((6,0))
+
+# Pseudo-ops present in the generated encodings.
+# By default pseudo-ops are not listed as they are considered aliases
+# of their base instruction.
+emitted_pseudo_ops = [
+ 'pause',
+ 'prefetch_i',
+ 'prefetch_r',
+ 'prefetch_w',
+ 'rstsa16',
+ 'rstsa32',
+ 'srli32_u',
+ 'slli_rv128',
+ 'slli_rv32',
+ 'srai_rv128',
+ 'srai_rv32',
+ 'srli_rv128',
+ 'srli_rv32',
+ 'umax32',
+]
diff --git a/parse.py b/parse.py
index 47846b7..e78a158 100755
--- a/parse.py
+++ b/parse.py
@@ -958,10 +958,7 @@ if __name__ == "__main__":
if '-c' in sys.argv[1:]:
instr_dict_c = create_inst_dict(extensions, False,
- include_pseudo_ops=['pause', 'prefetch_r', 'prefetch_w', 'prefetch_i',
- 'slli_rv32', 'srli_rv32', 'srai_rv32',
- 'slli_rv128', 'srli_rv128', 'srai_rv128',
- ])
+ include_pseudo_ops=emitted_pseudo_ops)
instr_dict_c = collections.OrderedDict(sorted(instr_dict_c.items()))
make_c(instr_dict_c)
logging.info('encoding.out.h generated successfully')
diff --git a/unratified/rv64_zpn b/unratified/rv64_zpn
index c90bf1e..fa484fa 100644
--- a/unratified/rv64_zpn
+++ b/unratified/rv64_zpn
@@ -27,7 +27,7 @@ urstas32 31..25=0b1101000 rs2 rs1 14..12=0b010 rd 6..0=0b1
kstas32 31..25=0b1100000 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
ukstas32 31..25=0b1110000 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
stsa32 31..25=0b1111001 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
-rstsa32 31..25=0b1011001 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
+$pseudo_op rv_zvkg::vghsh.vv rstsa32 31..25=0b1011001 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
urstsa32 31..25=0b1101001 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
kstsa32 31..25=0b1100001 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
ukstsa32 31..25=0b1110001 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
@@ -40,7 +40,7 @@ srai32.u 31..25=0b1000000 imm5 rs1 14..12=0b010 rd 6..0=0b1
srl32 31..25=0b0101001 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
srli32 31..25=0b0111001 imm5 rs1 14..12=0b010 rd 6..0=0b1110111
srl32.u 31..25=0b0110001 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
-srli32.u 31..25=0b1000001 imm5 rs1 14..12=0b010 rd 6..0=0b1110111
+$pseudo_op rv_zvksh::vsm3me.vv srli32.u 31..25=0b1000001 imm5 rs1 14..12=0b010 rd 6..0=0b1110111
sll32 31..25=0b0101010 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
slli32 31..25=0b0111010 imm5 rs1 14..12=0b010 rd 6..0=0b1110111
ksll32 31..25=0b0110010 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
@@ -50,7 +50,7 @@ kslra32.u 31..25=0b0110011 rs2 rs1 14..12=0b010 rd 6..0=0b1
smin32 31..25=0b1001000 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
umin32 31..25=0b1010000 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
smax32 31..25=0b1001001 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
-umax32 31..25=0b1010001 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
+$pseudo_op rv_zvkned::vaesdf.vv umax32 31..25=0b1010001 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
khmbb16 31..25=0b1101110 rs2 rs1 14..12=0b001 rd 6..0=0b1110111
khmbt16 31..25=0b1110110 rs2 rs1 14..12=0b001 rd 6..0=0b1110111
khmtt16 31..25=0b1111110 rs2 rs1 14..12=0b001 rd 6..0=0b1110111
diff --git a/unratified/rv_zpn b/unratified/rv_zpn
index af26492..29ab4fd 100644
--- a/unratified/rv_zpn
+++ b/unratified/rv_zpn
@@ -96,7 +96,7 @@ raddw 31..25=0b0010000 rs2 rs1 14..12=0b001
rcras16 31..25=0b0000010 rs2 rs1 14..12=0b000 rd 6..0=0b1110111
rcrsa16 31..25=0b0000011 rs2 rs1 14..12=0b000 rd 6..0=0b1110111
rstas16 31..25=0b1011010 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
-rstsa16 31..25=0b1011011 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
+$pseudo_op rv_zvknha::vsha2ms.vv rstsa16 31..25=0b1011011 rs2 rs1 14..12=0b010 rd 6..0=0b1110111
rsub16 31..25=0b0000001 rs2 rs1 14..12=0b000 rd 6..0=0b1110111
rsub8 31..25=0b0000101 rs2 rs1 14..12=0b000 rd 6..0=0b1110111
rsubw 31..25=0b0010001 rs2 rs1 14..12=0b001 rd 6..0=0b1110111
diff --git a/unratified/rv_zvbb b/unratified/rv_zvbb
new file mode 100644
index 0000000..dc48ee2
--- /dev/null
+++ b/unratified/rv_zvbb
@@ -0,0 +1,37 @@
+# Zvbb - Vector Bit-manipulation used in Cryptography
+
+# Vector And-Not
+vandn.vv 31..26=0x01 vm vs2 vs1 14..12=0x0 vd 6..0=0x57
+vandn.vx 31..26=0x01 vm vs2 rs1 14..12=0x4 vd 6..0=0x57
+
+# Vector Reverse Bits in Elements
+vbrev.v 31..26=0x12 vm vs2 19..15=0xA 14..12=0x2 vd 6..0=0x57
+
+# Vector Reverse Bits in Bytes
+vbrev8.v 31..26=0x12 vm vs2 19..15=0x8 14..12=0x2 vd 6..0=0x57
+
+# Vector Reverse Bytes
+vrev8.v 31..26=0x12 vm vs2 19..15=0x9 14..12=0x2 vd 6..0=0x57
+
+# Vector Count Leading Zeros
+vclz.v 31..26=0x12 vm vs2 19..15=0xC 14..12=0x2 vd 6..0=0x57
+
+# Vector Count Trailing Zeros
+vctz.v 31..26=0x12 vm vs2 19..15=0xD 14..12=0x2 vd 6..0=0x57
+
+# Vector Population Count
+vcpop.v 31..26=0x12 vm vs2 19..15=0xE 14..12=0x2 vd 6..0=0x57
+
+# Vector Rotate Left
+vrol.vv 31..26=0x15 vm vs2 vs1 14..12=0x0 vd 6..0=0x57
+vrol.vx 31..26=0x15 vm vs2 rs1 14..12=0x4 vd 6..0=0x57
+
+# Vector Rotate Right
+vror.vv 31..26=0x14 vm vs2 vs1 14..12=0x0 vd 6..0=0x57
+vror.vx 31..26=0x14 vm vs2 rs1 14..12=0x4 vd 6..0=0x57
+vror.vi 31..27=0xa zimm6hi vm vs2 zimm6lo 14..12=0x3 vd 6..0=0x57
+
+# Vector Widening Shift Left Logical
+vwsll.vv 31..26=0x35 vm vs2 vs1 14..12=0x0 vd 6..0=0x57
+vwsll.vx 31..26=0x35 vm vs2 rs1 14..12=0x4 vd 6..0=0x57
+vwsll.vi 31..26=0x35 vm vs2 zimm5 14..12=0x3 vd 6..0=0x57
diff --git a/unratified/rv_zvbc b/unratified/rv_zvbc
new file mode 100644
index 0000000..95bf431
--- /dev/null
+++ b/unratified/rv_zvbc
@@ -0,0 +1,9 @@
+# Zvbc - Vector Carryless Multiplication
+
+# Carryless Multiply
+vclmul.vv 31..26=0x0C vm vs2 vs1 14..12=0x2 vd 6..0=0x57
+vclmul.vx 31..26=0x0C vm vs2 rs1 14..12=0x6 vd 6..0=0x57
+
+# Carryless Multiply (High)
+vclmulh.vv 31..26=0x0D vm vs2 vs1 14..12=0x2 vd 6..0=0x57
+vclmulh.vx 31..26=0x0D vm vs2 rs1 14..12=0x6 vd 6..0=0x57
diff --git a/unratified/rv_zvkg b/unratified/rv_zvkg
new file mode 100644
index 0000000..ed7bf32
--- /dev/null
+++ b/unratified/rv_zvkg
@@ -0,0 +1,8 @@
+# Zvkg - Vector GCM/GMAC
+
+# Vector Multiply over GHASH Galois-Field
+vgmul.vv 31..26=0x28 25=1 vs2 19..15=0x11 14..12=0x2 vd 6..0=0x77
+
+# Vector Add-Multiply over GHASH Galois-Field
+vghsh.vv 31..26=0x2C 25=1 vs2 vs1 14..12=0x2 vd 6..0=0x77
+
diff --git a/unratified/rv_zvkn b/unratified/rv_zvkn
new file mode 100644
index 0000000..5a17e6d
--- /dev/null
+++ b/unratified/rv_zvkn
@@ -0,0 +1,46 @@
+# Zvkn, Vector Crypto Extension, NIST Algorithm Suite
+
+# Import Zvbb
+$import rv_zvbb::vandn.vv
+$import rv_zvbb::vandn.vx
+$import rv_zvbb::vbrev.v
+$import rv_zvbb::vbrev8.v
+$import rv_zvbb::vrev8.v
+$import rv_zvbb::vrol.vv
+$import rv_zvbb::vrol.vx
+$import rv_zvbb::vror.vv
+$import rv_zvbb::vror.vx
+$import rv_zvbb::vror.vi
+$import rv_zvbb::vclz.v
+$import rv_zvbb::vctz.v
+$import rv_zvbb::vcpop.v
+$import rv_zvbb::vwsll.vv
+$import rv_zvbb::vwsll.vx
+$import rv_zvbb::vwsll.vi
+
+# Import Zvbc
+$import rv_zvbc::vclmul.vv
+$import rv_zvbc::vclmul.vx
+$import rv_zvbc::vclmulh.vv
+$import rv_zvbc::vclmulh.vx
+
+# Import Zvkned
+$import rv_zvkned::vaesef.vs
+$import rv_zvkned::vaesef.vv
+$import rv_zvkned::vaesem.vs
+$import rv_zvkned::vaesem.vv
+$import rv_zvkned::vaesdf.vs
+$import rv_zvkned::vaesdf.vv
+$import rv_zvkned::vaesdm.vs
+$import rv_zvkned::vaesdm.vv
+$import rv_zvkned::vaeskf1.vi
+$import rv_zvkned::vaeskf2.vi
+$import rv_zvkned::vaesz.vs
+
+# Import Zvknh.
+# "Zvkn" implies "Zvknhb". We import the instructions from 'rv_zvknha',
+# because we cannot import already imported instructions, 'rv_zvknhb'
+# imports them from 'rv_zvknha', and the instructions are identical.
+$import rv_zvknha::vsha2ms.vv
+$import rv_zvknha::vsha2ch.vv
+$import rv_zvknha::vsha2cl.vv
diff --git a/unratified/rv_zvkned b/unratified/rv_zvkned
new file mode 100644
index 0000000..572b465
--- /dev/null
+++ b/unratified/rv_zvkned
@@ -0,0 +1,21 @@
+# Zvkned - Vector Crypto AES Encryption & Decryption (Singe Round)
+
+# AES Single Round Decryption
+vaesdf.vv 31..26=0x28 25=1 vs2 19..15=0x1 14..12=0x2 vd 6..0=0x77
+vaesdf.vs 31..26=0x29 25=1 vs2 19..15=0x1 14..12=0x2 vd 6..0=0x77
+vaesdm.vv 31..26=0x28 25=1 vs2 19..15=0x0 14..12=0x2 vd 6..0=0x77
+vaesdm.vs 31..26=0x29 25=1 vs2 19..15=0x0 14..12=0x2 vd 6..0=0x77
+
+# AES Single Round Encryption
+vaesef.vv 31..26=0x28 25=1 vs2 19..15=0x3 14..12=0x2 vd 6..0=0x77
+vaesef.vs 31..26=0x29 25=1 vs2 19..15=0x3 14..12=0x2 vd 6..0=0x77
+vaesem.vv 31..26=0x28 25=1 vs2 19..15=0x2 14..12=0x2 vd 6..0=0x77
+vaesem.vs 31..26=0x29 25=1 vs2 19..15=0x2 14..12=0x2 vd 6..0=0x77
+
+# AES Scalar Round Zero Encryption/Decryption
+vaesz.vs 31..26=0x29 25=1 vs2 19..15=0x7 14..12=0x2 vd 6..0=0x77
+
+# AES-128 Forward Key Schedule
+vaeskf1.vi 31..26=0x22 25=1 vs2 zimm5 14..12=0x2 vd 6..0=0x77
+# AES-256 Forward Key Schedule
+vaeskf2.vi 31..26=0x2A 25=1 vs2 zimm5 14..12=0x2 vd 6..0=0x77
diff --git a/unratified/rv_zvknha b/unratified/rv_zvknha
new file mode 100644
index 0000000..a09a36c
--- /dev/null
+++ b/unratified/rv_zvknha
@@ -0,0 +1,9 @@
+# Zvknha - Vector Crypto SHA-256 Secure Hash
+#
+# The following 3 instructions are defined in both Zvknha and Zvknhb:
+# - in Zvknha, they support SHA-256 (SEW=32) only,
+# - in Zvknhb, they support both SHA-256 (SEW=32) and SHA-512 (SEW=64).
+
+vsha2ms.vv 31..26=0x2D 25=1 vs2 vs1 14..12=0x2 vd 6..0=0x77
+vsha2ch.vv 31..26=0x2E 25=1 vs2 vs1 14..12=0x2 vd 6..0=0x77
+vsha2cl.vv 31..26=0x2F 25=1 vs2 vs1 14..12=0x2 vd 6..0=0x77
diff --git a/unratified/rv_zvknhb b/unratified/rv_zvknhb
new file mode 100644
index 0000000..c0b0d8f
--- /dev/null
+++ b/unratified/rv_zvknhb
@@ -0,0 +1,9 @@
+# Zvknhb - Vector Crypto SHA-256 and SHA-512 Secure Hash
+#
+# The following 3 instructions are defined in both Zvknha and Zvknhb:
+# - in Zvknha, they support SHA-256 (SEW=32) only,
+# - in Zvknhb, they support both SHA-256 (SEW=32) and SHA-512 (SEW=64).
+
+$import rv_zvknha::vsha2ms.vv
+$import rv_zvknha::vsha2ch.vv
+$import rv_zvknha::vsha2cl.vv
diff --git a/unratified/rv_zvks b/unratified/rv_zvks
new file mode 100644
index 0000000..b5448bf
--- /dev/null
+++ b/unratified/rv_zvks
@@ -0,0 +1,34 @@
+# Zvk, Vector Crypto Extension, ShangMi Algorithm Suite
+
+# Import Zvbb
+$import rv_zvbb::vandn.vv
+$import rv_zvbb::vandn.vx
+$import rv_zvbb::vbrev.v
+$import rv_zvbb::vbrev8.v
+$import rv_zvbb::vrev8.v
+$import rv_zvbb::vrol.vv
+$import rv_zvbb::vrol.vx
+$import rv_zvbb::vror.vv
+$import rv_zvbb::vror.vx
+$import rv_zvbb::vror.vi
+$import rv_zvbb::vclz.v
+$import rv_zvbb::vctz.v
+$import rv_zvbb::vcpop.v
+$import rv_zvbb::vwsll.vv
+$import rv_zvbb::vwsll.vx
+$import rv_zvbb::vwsll.vi
+
+# Import Zvbc
+$import rv_zvbc::vclmul.vv
+$import rv_zvbc::vclmul.vx
+$import rv_zvbc::vclmulh.vv
+$import rv_zvbc::vclmulh.vx
+
+# Import Zvksed
+$import rv_zvksed::vsm4k.vi
+$import rv_zvksed::vsm4r.vv
+$import rv_zvksed::vsm4r.vs
+
+# Import Zvksh
+$import rv_zvksh::vsm3c.vi
+$import rv_zvksh::vsm3me.vv
diff --git a/unratified/rv_zvksed b/unratified/rv_zvksed
new file mode 100644
index 0000000..b0b3037
--- /dev/null
+++ b/unratified/rv_zvksed
@@ -0,0 +1,8 @@
+# Zvksed - Vector Crypto SM4 (Block Cipher)
+
+# SM4 Key Expansion
+vsm4k.vi 31..26=0x21 25=1 vs2 zimm5 14..12=0x2 vd 6..0=0x77
+
+# SM4 Encryption/Decryption Rounds
+vsm4r.vv 31..26=0x28 25=1 vs2 19..15=0x10 14..12=0x2 vd 6..0=0x77
+vsm4r.vs 31..26=0x29 25=1 vs2 19..15=0x10 14..12=0x2 vd 6..0=0x77
diff --git a/unratified/rv_zvksh b/unratified/rv_zvksh
new file mode 100644
index 0000000..2dc6f6c
--- /dev/null
+++ b/unratified/rv_zvksh
@@ -0,0 +1,7 @@
+# Zvksh - Vector Crypto SM3 (Hash)
+
+# SM3 Message Compression
+vsm3c.vi 31..26=0x2B 25=1 vs2 zimm5 14..12=0x2 vd 6..0=0x77
+
+# SM3 Message Expansion
+vsm3me.vv 31..26=0x20 25=1 vs2 vs1 14..12=0x2 vd 6..0=0x77