aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Gouriou <ego@rivosinc.com>2023-06-01 18:07:38 -0700
committerEric Gouriou <ego@rivosinc.com>2023-06-19 14:30:35 -0700
commiteadb0e1129c23e709b0565740f0fc1a3359de7b7 (patch)
tree31e8a5fe6fa88892be4a330f8be2739d4d243119
parent00873aa61acae4a17c1d269cddf1885e83b50102 (diff)
downloadriscv-isa-sim-eadb0e1129c23e709b0565740f0fc1a3359de7b7.zip
riscv-isa-sim-eadb0e1129c23e709b0565740f0fc1a3359de7b7.tar.gz
riscv-isa-sim-eadb0e1129c23e709b0565740f0fc1a3359de7b7.tar.bz2
Zvk: Implement Zvkned, vector AES single round
Implement the Zvkned extension, "NIST Suite: Vector AES Encryption & Decryption (Single Round)". - vaeskf1.vi: AES forward key scheduling, AES-128. - vaeskf2.vi: AES forward key scheduling, AES-256. - vaesz.vs: AES encryption/decryption, 0-th round. - vaesdm.{vs,vv}: AES decryption, middle rounds. - vaesdf.{vs,vv}: AES decryption, final round. - vaesem.{vs,vv}: AES encryption, middle rounds. - vaesef.{vs,vv}: AES encryption, final round. An extension specific header containing common logic is added. Co-authored-by: Stanislaw Kardach <kda@semihalf.com> Signed-off-by: Eric Gouriou <ego@rivosinc.com>
-rw-r--r--riscv/insns/vaesdf_vs.h43
-rw-r--r--riscv/insns/vaesdf_vv.h37
-rw-r--r--riscv/insns/vaesdm_vs.h44
-rw-r--r--riscv/insns/vaesdm_vv.h38
-rw-r--r--riscv/insns/vaesef_vs.h43
-rw-r--r--riscv/insns/vaesef_vv.h37
-rw-r--r--riscv/insns/vaesem_vs.h44
-rw-r--r--riscv/insns/vaesem_vv.h38
-rw-r--r--riscv/insns/vaeskf1_vi.h65
-rw-r--r--riscv/insns/vaeskf2_vi.h89
-rw-r--r--riscv/insns/vaesz_vs.h24
-rw-r--r--riscv/riscv.mk.in14
-rw-r--r--riscv/zvkned_ext_macros.h270
13 files changed, 786 insertions, 0 deletions
diff --git a/riscv/insns/vaesdf_vs.h b/riscv/insns/vaesdf_vs.h
new file mode 100644
index 0000000..a124278
--- /dev/null
+++ b/riscv/insns/vaesdf_vs.h
@@ -0,0 +1,43 @@
+// vaesdf.vs vd, vs2
+
+#include "zvkned_ext_macros.h"
+#include "zvk_ext_macros.h"
+
+require_vaes_vs_constraints;
+
+VI_ZVK_VD_VS2_NOOPERANDS_PRELOOP_EGU32x4_NOVM_LOOP(
+ {},
+ // This statement will be executed before the first execution
+ // of the loop, and only if the loop is going to be entered.
+ // We cannot use a block ( { ... } ) since we want the variables declared
+ // here to be visible in the loop block.
+ // We capture the "scalar", vs2's first element, by copy, even though
+ // the "no overlap" constraint means that vs2 should remain constant
+ // during the loop.
+ const EGU8x16_t scalar_key = P.VU.elt_group<EGU8x16_t>(vs2_num, 0);,
+ {
+ // For AES128, AES192, or AES256, state and key are 128b/16B values:
+ // - vd contains the input state,
+ // - vs2 contains the round key,
+ // - vd does receive the output state.
+ //
+ // While the spec calls for handling the vector as made of EGU32x4
+ // element groups (i.e., 4 uint32_t), it is convenient to treat
+ // AES state and key as EGU8x16 (i.e., 16 uint8_t). This is why
+ // we extract the operands here instead of using the existing LOOP
+ // macro that defines/extracts the operand variables as EGU32x4.
+ EGU8x16_t aes_state = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg);
+
+ // InvShiftRows - Rotate each row bytes by 0, 1, 2, 3 positions.
+ VAES_INV_SHIFT_ROWS(aes_state);
+ // InvSubBytes - Apply S-box to every byte in the state
+ VAES_INV_SUB_BYTES(aes_state);
+ // AddRoundKey (which is also InvAddRoundKey as it's xor)
+ EGU8x16_XOREQ(aes_state, scalar_key);
+ // InvMixColumns is not performed in the final round.
+
+ // Update the destination register.
+ EGU8x16_t &vd = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg, true);
+ EGU8x16_COPY(vd, aes_state);
+ }
+);
diff --git a/riscv/insns/vaesdf_vv.h b/riscv/insns/vaesdf_vv.h
new file mode 100644
index 0000000..9fca572
--- /dev/null
+++ b/riscv/insns/vaesdf_vv.h
@@ -0,0 +1,37 @@
+// vaesdf.vv vd, vs2
+
+#include "zvkned_ext_macros.h"
+#include "zvk_ext_macros.h"
+
+require_vaes_vv_constraints;
+
+VI_ZVK_VD_VS2_NOOPERANDS_PRELOOP_EGU32x4_NOVM_LOOP(
+ {},
+ {}, // No PRELOOP.
+ {
+ // For AES128, AES192, or AES256, state and key are 128b/16B values:
+ // - vd in contains the input state,
+ // - vs2 contains the input round key,
+ // - vd out receives the output state.
+ //
+ // While the spec calls for handling the vector as made of EGU32x4
+ // element groups (i.e., 4 uint32_t), it is convenient to treat
+ // AES state and key as EGU8x16 (i.e., 16 uint8_t). This is why
+ // we extract the operands here instead of using the existing LOOP
+ // macro that defines/extracts the operand variables as EGU32x4.
+ EGU8x16_t aes_state = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg);
+ const EGU8x16_t round_key = P.VU.elt_group<EGU8x16_t>(vs2_num, idx_eg);
+
+ // InvShiftRows - Rotate each row bytes by 0, 1, 2, 3 positions.
+ VAES_INV_SHIFT_ROWS(aes_state);
+ // InvSubBytes - Apply S-box to every byte in the state
+ VAES_INV_SUB_BYTES(aes_state);
+ // AddRoundKey (which is also InvAddRoundKey as it's xor)
+ EGU8x16_XOREQ(aes_state, round_key);
+ // InvMixColumns is not performed in the final round.
+
+ // Update the destination register.
+ EGU8x16_t &vd = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg, true);
+ EGU8x16_COPY(vd, aes_state);
+ }
+);
diff --git a/riscv/insns/vaesdm_vs.h b/riscv/insns/vaesdm_vs.h
new file mode 100644
index 0000000..3c23e69
--- /dev/null
+++ b/riscv/insns/vaesdm_vs.h
@@ -0,0 +1,44 @@
+// vaesdm.vs vd, vs2
+
+#include "zvkned_ext_macros.h"
+#include "zvk_ext_macros.h"
+
+require_vaes_vs_constraints;
+
+VI_ZVK_VD_VS2_NOOPERANDS_PRELOOP_EGU32x4_NOVM_LOOP(
+ {},
+ // This statement will be executed before the first execution
+ // of the loop, and only if the loop is going to be entered.
+ // We cannot use a block ( { ... } ) since we want the variables declared
+ // here to be visible in the loop block.
+ // We capture the "scalar", vs2's first element, by copy, even though
+ // the "no overlap" constraint means that vs2 should remain constant
+ // during the loop.
+ const EGU8x16_t scalar_key = P.VU.elt_group<EGU8x16_t>(vs2_num, 0);,
+ {
+ // For AES128, AES192, or AES256, state and key are 128b/16B values:
+ // - vd in contains the input state,
+ // - vs2 contains the input round key,
+ // - vd out receives the output state.
+ //
+ // While the spec calls for handling the vector as made of EGU32x4
+ // element groups (i.e., 4 uint32_t), it is convenient to treat
+ // AES state and key as EGU8x16 (i.e., 16 uint8_t). This is why
+ // we extract the operands here instead of using the existing LOOP
+ // macro that defines/extracts the operand variables as EGU32x4.
+ EGU8x16_t aes_state = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg);
+
+ // InvShiftRows - Rotate each row bytes by 0, 1, 2, 3 positions.
+ VAES_INV_SHIFT_ROWS(aes_state);
+ // InvSubBytes - Apply S-box to every byte in the state
+ VAES_INV_SUB_BYTES(aes_state);
+ // AddRoundKey (which is also InvAddRoundKey as it's xor)
+ EGU8x16_XOREQ(aes_state, scalar_key);
+ // InvMixColumns
+ VAES_INV_MIX_COLUMNS(aes_state);
+
+ // Update the destination register.
+ EGU8x16_t &vd = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg, true);
+ EGU8x16_COPY(vd, aes_state);
+ }
+);
diff --git a/riscv/insns/vaesdm_vv.h b/riscv/insns/vaesdm_vv.h
new file mode 100644
index 0000000..9c29cd9
--- /dev/null
+++ b/riscv/insns/vaesdm_vv.h
@@ -0,0 +1,38 @@
+// vaesdm.vv vd, vs2
+
+#include "zvkned_ext_macros.h"
+#include "zvk_ext_macros.h"
+
+require_vaes_vv_constraints;
+
+VI_ZVK_VD_VS2_NOOPERANDS_PRELOOP_EGU32x4_NOVM_LOOP(
+ {},
+ {}, // No PRELOOP.
+ {
+ // For AES128, AES192, or AES256, state and key are 128b/16B values:
+ // - vd contains the input state,
+ // - vs2 contains the round key,
+ // - vd does receive the output state.
+ //
+ // While the spec calls for handling the vector as made of EGU32x4
+ // element groups (i.e., 4 uint32_t), it is convenient to treat
+ // AES state and key as EGU8x16 (i.e., 16 uint8_t). This is why
+ // we extract the operands here instead of using the existing LOOP
+ // macro that defines/extracts the operand variables as EGU32x4.
+ EGU8x16_t aes_state = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg);
+ const EGU8x16_t round_key = P.VU.elt_group<EGU8x16_t>(vs2_num, idx_eg);
+
+ // InvShiftRows - Rotate each row bytes by 0, 1, 2, 3 positions.
+ VAES_INV_SHIFT_ROWS(aes_state);
+ // InvSubBytes - Apply S-box to every byte in the state
+ VAES_INV_SUB_BYTES(aes_state);
+ // AddRoundKey (which is also InvAddRoundKey as it's xor)
+ EGU8x16_XOREQ(aes_state, round_key);
+ // InvMixColumns
+ VAES_INV_MIX_COLUMNS(aes_state);
+
+ // Update the destination register.
+ EGU8x16_t &vd = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg, true);
+ EGU8x16_COPY(vd, aes_state);
+ }
+);
diff --git a/riscv/insns/vaesef_vs.h b/riscv/insns/vaesef_vs.h
new file mode 100644
index 0000000..2d32653
--- /dev/null
+++ b/riscv/insns/vaesef_vs.h
@@ -0,0 +1,43 @@
+// vaesef.vs vd, vs2
+
+#include "zvkned_ext_macros.h"
+#include "zvk_ext_macros.h"
+
+require_vaes_vs_constraints;
+
+VI_ZVK_VD_VS2_NOOPERANDS_PRELOOP_EGU32x4_NOVM_LOOP(
+ {},
+ // This statement will be executed before the first execution
+ // of the loop, and only if the loop is going to be entered.
+ // We cannot use a block ( { ... } ) since we want the variables declared
+ // here to be visible in the loop block.
+ // We capture the "scalar", vs2's first element, by copy, even though
+ // the "no overlap" constraint means that vs2 should remain constant
+ // during the loop.
+ const EGU8x16_t scalar_key = P.VU.elt_group<EGU8x16_t>(vs2_num, 0);,
+ {
+ // For AES128, AES192, or AES256, state and key are 128b/16B values:
+ // - vd contains the input state,
+ // - vs2 contains the round key,
+ // - vd receives the output state.
+ //
+ // While the spec calls for handling the vector as made of EGU32x4
+ // element groups (i.e., 4 uint32_t), it is convenient to treat
+ // AES state and key as EGU8x16 (i.e., 16 uint8_t). This is why
+ // we extract the operands here instead of using the existing LOOP
+ // macro that defines/extracts the operand variables as EGU32x4.
+ EGU8x16_t aes_state = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg);
+
+ // SubBytes - Apply S-box to every byte in the state
+ VAES_SUB_BYTES(aes_state);
+ // ShiftRows - Rotate each row bytes by 0, 1, 2, 3 positions.
+ VAES_SHIFT_ROWS(aes_state);
+ // MixColumns is not performed for the final round.
+ // AddRoundKey
+ EGU8x16_XOREQ(aes_state, scalar_key);
+
+ // Update the destination register.
+ EGU8x16_t &vd = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg, true);
+ EGU8x16_COPY(vd, aes_state);
+ }
+);
diff --git a/riscv/insns/vaesef_vv.h b/riscv/insns/vaesef_vv.h
new file mode 100644
index 0000000..9b43a6d
--- /dev/null
+++ b/riscv/insns/vaesef_vv.h
@@ -0,0 +1,37 @@
+// vaesef.vv vd, vs2
+
+#include "zvkned_ext_macros.h"
+#include "zvk_ext_macros.h"
+
+require_vaes_vv_constraints;
+
+VI_ZVK_VD_VS2_NOOPERANDS_PRELOOP_EGU32x4_NOVM_LOOP(
+ {},
+ {}, // No PRELOOP.
+ {
+ // For AES128, AES192, or AES256, state and key are 128b/16B values:
+ // - vd contains the input state,
+ // - vs2 contains the round key,
+ // - vd receives the output state.
+ //
+ // While the spec calls for handling the vector as made of EGU32x4
+ // element groups (i.e., 4 uint32_t), it is convenient to treat
+ // AES state and key as EGU8x16 (i.e., 16 uint8_t). This is why
+ // we extract the operands here instead of using the existing LOOP
+ // macro that defines/extracts the operand variables as EGU32x4.
+ EGU8x16_t aes_state = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg);
+ const EGU8x16_t round_key = P.VU.elt_group<EGU8x16_t>(vs2_num, idx_eg);
+
+ // SubBytes - Apply S-box to every byte in the state
+ VAES_SUB_BYTES(aes_state);
+ // ShiftRows - Rotate each row bytes by 0, 1, 2, 3 positions.
+ VAES_SHIFT_ROWS(aes_state);
+ // MixColumns is not performed for the final round.
+ // AddRoundKey
+ EGU8x16_XOREQ(aes_state, round_key);
+
+ // Update the destination register.
+ EGU8x16_t &vd = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg, true);
+ EGU8x16_COPY(vd, aes_state);
+ }
+);
diff --git a/riscv/insns/vaesem_vs.h b/riscv/insns/vaesem_vs.h
new file mode 100644
index 0000000..348cd9f
--- /dev/null
+++ b/riscv/insns/vaesem_vs.h
@@ -0,0 +1,44 @@
+// vaesem.vs vd, vs2
+
+#include "zvkned_ext_macros.h"
+#include "zvk_ext_macros.h"
+
+require_vaes_vs_constraints;
+
+VI_ZVK_VD_VS2_NOOPERANDS_PRELOOP_EGU32x4_NOVM_LOOP(
+ {},
+ // This statement will be executed before the first execution
+ // of the loop, and only if the loop is going to be entered.
+ // We cannot use a block ( { ... } ) since we want the variables declared
+ // here to be visible in the loop block.
+ // We capture the "scalar", vs2's first element, by copy, even though
+ // the "no overlap" constraint means that vs2 should remain constant
+ // during the loop.
+ const EGU8x16_t scalar_key = P.VU.elt_group<EGU8x16_t>(vs2_num, 0);,
+ {
+ // For AES128, AES192, or AES256, state and key are 128b/16B values:
+ // - vd contains the input state,
+ // - vs2 contains the round key,
+ // - vd receives the output state.
+ //
+ // While the spec calls for handling the vector as made of EGU32x4
+ // element groups (i.e., 4 uint32_t), it is convenient to treat
+ // AES state and key as EGU8x16 (i.e., 16 uint8_t). This is why
+ // we extract the operands here instead of using the existing LOOP
+ // macro that defines/extracts the operand variables as EGU32x4.
+ EGU8x16_t aes_state = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg);
+
+ // SubBytes - Apply S-box to every byte in the state
+ VAES_SUB_BYTES(aes_state);
+ // ShiftRows - Rotate each row bytes by 0, 1, 2, 3 positions.
+ VAES_SHIFT_ROWS(aes_state);
+ // MixColumns
+ VAES_MIX_COLUMNS(aes_state);
+ // AddRoundKey
+ EGU8x16_XOREQ(aes_state, scalar_key);
+
+ // Update the destination register.
+ EGU8x16_t &vd = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg, true);
+ EGU8x16_COPY(vd, aes_state);
+ }
+);
diff --git a/riscv/insns/vaesem_vv.h b/riscv/insns/vaesem_vv.h
new file mode 100644
index 0000000..34f0056
--- /dev/null
+++ b/riscv/insns/vaesem_vv.h
@@ -0,0 +1,38 @@
+// vaesem.vv vd, vs2
+
+#include "zvkned_ext_macros.h"
+#include "zvk_ext_macros.h"
+
+require_vaes_vv_constraints;
+
+VI_ZVK_VD_VS2_NOOPERANDS_PRELOOP_EGU32x4_NOVM_LOOP(
+ {},
+ {}, // No PRELOOP.
+ {
+ // For AES128, AES192, or AES256, state and key are 128b/16B values:
+ // - vd contains the input state,
+ // - vs2 contains the round key,
+ // - vd receives the output state.
+ //
+ // While the spec calls for handling the vector as made of EGU32x4
+ // element groups (i.e., 4 uint32_t), it is convenient to treat
+ // AES state and key as EGU8x16 (i.e., 16 uint8_t). This is why
+ // we extract the operands here instead of using the existing LOOP
+ // macro that defines/extracts the operand variables as EGU32x4.
+ EGU8x16_t aes_state = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg);
+ const EGU8x16_t round_key = P.VU.elt_group<EGU8x16_t>(vs2_num, idx_eg);
+
+ // SubBytes - Apply S-box to every byte in the state
+ VAES_SUB_BYTES(aes_state);
+ // ShiftRows - Rotate each row bytes by 0, 1, 2, 3 positions.
+ VAES_SHIFT_ROWS(aes_state);
+ // MixColumns
+ VAES_MIX_COLUMNS(aes_state);
+ // AddRoundKey
+ EGU8x16_XOREQ(aes_state, round_key);
+
+ // Update the destination register.
+ EGU8x16_t &vd = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg, true);
+ EGU8x16_COPY(vd, aes_state);
+ }
+);
diff --git a/riscv/insns/vaeskf1_vi.h b/riscv/insns/vaeskf1_vi.h
new file mode 100644
index 0000000..28d03d0
--- /dev/null
+++ b/riscv/insns/vaeskf1_vi.h
@@ -0,0 +1,65 @@
+// vaeskf1.vi vd, vs2, rnd
+
+#include "zvk_ext_macros.h"
+#include "zvkned_ext_macros.h"
+
+require_vaeskf_vi_constraints;
+
+// There is one round constant for each round number
+// between 1 and 10. We index using 'round# -1'.
+static constexpr uint8_t kRoundConstants[10] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
+};
+
+// For AES128, AES192, or AES256, keys (and state) are handled as
+// 128b/16B values.
+//
+// The Zvkned spec calls for handling the vector as made of EGU32x4
+// element groups (i.e., 4 uint32_t), and FIPS-197 AES specification
+// describes the key expansion in terms of manipulations of 32 bit
+// words, so using the EGU32x4 is natural.
+//
+VI_ZVK_VD_VS2_ZIMM5_EGU32x4_NOVM_LOOP(
+ {},
+ // The following statements will be executed before the first execution
+ // of the loop, and only if the loop is going to be entered.
+ // We cannot use a block ( { ... } ) since we want the 'round' variable
+ // declared and defined here here to be visible in the loop block.
+ // Only consider the bottom 4 bits of the immediate.
+ const reg_t zimm4 = zimm5 & 0xF;
+ // Normalize the round value to be in [2, 14] by toggling bit 3
+ // if outside the range (i.e., +8 or -8).
+ const reg_t round = ((1 <= zimm4) && (zimm4 <= 10)) ? zimm4 : (zimm4 ^ 0x8);
+ const uint32_t rcon = kRoundConstants[round - 1];,
+ // Per Element Group body.
+ {
+ // vaeskf1_vi produces key[i+1] in vd, it receives key[i] in vs2,
+ // i.e., 4x32b values (4 words).
+ //
+ // The logic is fairly similar between vaeskf1/vaeskf2, with the following
+ // differences:
+ // - in AES-128 (vaeskf1), we get both the 'temp' word and
+ // the "previous words" w0..w3 from key[i]/vs2.
+ // - in AES-256 (vaeskf2), we get 'temp' from key[i]/vs2, and
+ // the "previous words" w0..w3 from key[i-1]/vd.
+
+ // 'temp' is extracted from the last (most significant) word of key[i].
+ uint32_t temp = vs2[3];
+ temp = (temp >> 8) | (temp << 24); // Rotate right by 8
+ temp = (((uint32_t)AES_ENC_SBOX[(temp >> 24) & 0xFF] << 24) |
+ ((uint32_t)AES_ENC_SBOX[(temp >> 16) & 0xFF] << 16) |
+ ((uint32_t)AES_ENC_SBOX[(temp >> 8) & 0xFF] << 8) |
+ ((uint32_t)AES_ENC_SBOX[(temp >> 0) & 0xFF] << 0));
+ temp = temp ^ rcon;
+
+ // "old" words are the w[i-Nk] of FIPS-197. They are extracted
+ // from vs2, which contains key[i] in AES-128 where Nk=4.
+ const uint32_t w0 = vs2[0] ^ temp;
+ const uint32_t w1 = vs2[1] ^ w0;
+ const uint32_t w2 = vs2[2] ^ w1;
+ const uint32_t w3 = vs2[3] ^ w2;
+
+ // Overwrite vd with k[i+1] from the new words.
+ SET_EGU32x4_LE(vd, w0, w1, w2, w3);
+ }
+);
diff --git a/riscv/insns/vaeskf2_vi.h b/riscv/insns/vaeskf2_vi.h
new file mode 100644
index 0000000..49c2a2d
--- /dev/null
+++ b/riscv/insns/vaeskf2_vi.h
@@ -0,0 +1,89 @@
+// vaeskf2.vi vd, vs2, rnd
+
+#include "zvk_ext_macros.h"
+#include "zvkned_ext_macros.h"
+
+require_vaeskf_vi_constraints;
+
+// Round Constants
+//
+// Only the odd rounds need to be encoded, the even ones can use 0
+// or skip the rcon handling. We can use '(round# / 2) - 1'
+// (or "(round# >> 1) - 1") to index into the array.
+//
+// Round# Constant
+// [ 2] -> kRoundConstants[0]
+// [ 3] -> 0 / Nothing
+// [ 4] -> kRoundConstants[1]
+// [ 5] -> 0 / Nothing
+// [ 6] -> kRoundConstants[2]
+// [ 7] -> 0 / Nothing
+// ...
+// [13] -> 0 / Nothing
+// [14] -> kRoundConstants[6]
+static constexpr uint8_t kRoundConstants[7] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,
+};
+
+// For AES128, AES192, or AES256, keys (and state) are handled as
+// 128b/16B values.
+//
+// The Zvkned spec calls for handling the vector as made of EGU32x4
+// element groups (i.e., 4 uint32_t), and FIPS-197 AES specification
+// describes the key expansion in terms of manipulations of 32 bit
+// words, so using the EGU32x4 is natural.
+//
+VI_ZVK_VD_VS2_ZIMM5_EGU32x4_NOVM_LOOP(
+ {},
+ // The following statements will be executed before the first execution
+ // of the loop, and only if the loop is going to be entered.
+ // We cannot use a block ( { ... } ) since we want the 'round' variable
+ // declared and defined here here to be visible in the loop block.
+ // Only consider the bottom 4 bits of the immediate.
+ const reg_t zimm4 = zimm5 & 0xF;
+ // Normalize the round value to be in [2, 14] by toggling bit 3
+ // if outside the range (i.e., +8 or -8).
+ const reg_t round = ((2 <= zimm4) && (zimm4 <= 14)) ? zimm4 : (zimm4 ^ 0x8);,
+ // Per Element Group body.
+ {
+ // vaeskf2_vi produces key[i+1] in vd, it receives key[i] in vs2,
+ // i.e., 4x32b values (4 words).
+ //
+ // The logic is fairly similar between vaeskf2/vaeskf2, with the following
+ // differences:
+ // - in AES-128 (vaeskf1), we get both the 'temp' word and
+ // the "previous words" w0..w3 from key[i]/vs2.
+ // - in AES-256 (vaeskf2), we get 'temp' from key[i]/vs2, and
+ // the "previous words" w0..w3 from key[i-1]/vd.
+
+ // 'temp' is extracted from the last (most significant) word of key[i].
+ uint32_t temp = vs2[3];
+ // With AES-256, when we have an even round number, we hit the
+ // Nk > 6 and i mod Nk = 4
+ // condition in the FIPS-197 key expansion pseudo-code (Figure 11).
+ // In those cases we skip RotWord and the round constant is 0.
+ const bool is_even_round = (round & 0x1) == 0;
+ if (is_even_round) {
+ temp = (temp >> 8) | (temp << 24); // Rotate right by 8
+ }
+ temp = (((uint32_t)AES_ENC_SBOX[(temp >> 24) & 0xFF] << 24) |
+ ((uint32_t)AES_ENC_SBOX[(temp >> 16) & 0xFF] << 16) |
+ ((uint32_t)AES_ENC_SBOX[(temp >> 8) & 0xFF] << 8) |
+ ((uint32_t)AES_ENC_SBOX[(temp >> 0) & 0xFF] << 0));
+
+ if (is_even_round) {
+ const uint32_t rcon = kRoundConstants[(round >> 1) - 1];
+ temp = temp ^ rcon;
+ }
+
+ // "old" words are the w[i-Nk] of FIPS-197. For AES-256, where Nk=8,
+ // they are extracted from vd which contains key[i-1].
+ const uint32_t w0 = vd[0] ^ temp;
+ const uint32_t w1 = vd[1] ^ w0;
+ const uint32_t w2 = vd[2] ^ w1;
+ const uint32_t w3 = vd[3] ^ w2;
+
+ // Overwrite vd with k[i+1] from the new words.
+ SET_EGU32x4_LE(vd, w0, w1, w2, w3);
+ }
+);
diff --git a/riscv/insns/vaesz_vs.h b/riscv/insns/vaesz_vs.h
new file mode 100644
index 0000000..c3dc931
--- /dev/null
+++ b/riscv/insns/vaesz_vs.h
@@ -0,0 +1,24 @@
+// vaesz.vs vd, vs2
+
+#include "zvk_ext_macros.h"
+#include "zvkned_ext_macros.h"
+
+require_vaes_vs_constraints;
+
+VI_ZVK_VD_VS2_NOOPERANDS_PRELOOP_EGU32x4_NOVM_LOOP(
+ {},
+ // This statement will be executed before the first execution
+ // of the loop, and only if the loop is going to be entered.
+ // We cannot use a block ( { ... } ) since we want the variables declared
+ // here to be visible in the loop block.
+ // We capture the "scalar", vs2's first element, by copy, even though
+ // the "no overlap" constraint means that vs2 should remain constant
+ // during the loop.
+ const EGU8x16_t scalar_key = P.VU.elt_group<EGU8x16_t>(vs2_num, 0);,
+ // Per Element Group body.
+ {
+ EGU8x16_t &vd = P.VU.elt_group<EGU8x16_t>(vd_num, idx_eg, true);
+ // Produce vd = vd ^ "common key from vs2".
+ EGU8x16_XOR(vd, vd, scalar_key);
+ }
+);
diff --git a/riscv/riscv.mk.in b/riscv/riscv.mk.in
index 4ce088f..2d75662 100644
--- a/riscv/riscv.mk.in
+++ b/riscv/riscv.mk.in
@@ -1368,6 +1368,19 @@ riscv_insn_ext_zvkg= \
vghsh_vv \
vgmul_vv \
+riscv_insn_ext_zvkned = \
+ vaesdf_vs \
+ vaesdf_vv \
+ vaesdm_vs \
+ vaesdm_vv \
+ vaesef_vs \
+ vaesef_vv \
+ vaesem_vs \
+ vaesem_vv \
+ vaeskf1_vi \
+ vaeskf2_vi \
+ vaesz_vs \
+
# Covers both Zvknha and Zvkhnb.
riscv_insn_ext_zvknh = \
vsha2cl_vv \
@@ -1378,6 +1391,7 @@ riscv_insn_ext_zvk = \
$(riscv_insn_ext_zvbb) \
$(riscv_insn_ext_zvbc) \
$(riscv_insn_ext_zvkg) \
+ $(riscv_insn_ext_zvkned) \
$(riscv_insn_ext_zvknh) \
riscv_insn_list = \
diff --git a/riscv/zvkned_ext_macros.h b/riscv/zvkned_ext_macros.h
new file mode 100644
index 0000000..db705c7
--- /dev/null
+++ b/riscv/zvkned_ext_macros.h
@@ -0,0 +1,270 @@
+// Helper macros to help implement instructions defined as part of
+// the RISC-V Zvkned extension (vector AES single round).
+
+#include "insns/aes_common.h"
+
+#ifndef RISCV_ZVKNED_EXT_MACROS_H_
+#define RISCV_ZVKNED_EXT_MACROS_H_
+
+// vaes*.vs instruction constraints:
+// - Zvkned is enabled
+// - EGW (128) <= LMUL * VLEN
+// - vd and vs2 cannot overlap
+//
+// The constraint that vstart and vl are both EGS (4) aligned
+// is checked in the VI_ZVK_..._EGU32x4_..._LOOP macros.
+#define require_vaes_vs_constraints \
+ do { \
+ require_zvkned; \
+ require(P.VU.vsew == 32); \
+ require_egw_fits(128); \
+ require(insn.rd() != insn.rs2()); \
+ } while (false)
+
+// vaes*.vv instruction constraints. Those are the same as the .vs ones,
+// except for the overlap constraint that is not present for .vv variants.
+// - Zvkned is enabled
+// - EGW (128) <= LMUL * VLEN
+//
+// The constraint that vstart and vl are both EGS (4) aligned
+// is checked in the VI_ZVK_..._EGU32x4_..._LOOP macros.
+#define require_vaes_vv_constraints \
+ do { \
+ require_zvkned; \
+ require(P.VU.vsew == 32); \
+ require_egw_fits(128); \
+ } while (false)
+
+// vaeskf*.vi instruction constraints. Those are the same as the .vv ones.
+#define require_vaeskf_vi_constraints \
+ do { \
+ require_zvkned; \
+ require(P.VU.vsew == 32); \
+ require_egw_fits(128); \
+ } while (false)
+
+#define VAES_XTIME(A) (((A) << 1) ^ (((A) & 0x80) ? 0x1b : 0))
+
+#define VAES_GFMUL(A, B) \
+ ((((B) & 0x1) ? (A) : 0) ^ \
+ (((B) & 0x2) ? VAES_XTIME(A) : 0) ^ \
+ (((B) & 0x4) ? VAES_XTIME(VAES_XTIME(A)) : 0) ^ \
+ (((B) & 0x8) ? VAES_XTIME(VAES_XTIME(VAES_XTIME(A))) : 0))
+
+// Apply the S-box transform to every byte in the VAESState 'state'
+#define VAES_SUB_BYTES(STATE) \
+ do { \
+ static constexpr uint8_t kVAESXEncSBox[256]= { \
+ 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, \
+ 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, \
+ 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, \
+ 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, \
+ 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, \
+ 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, \
+ 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, \
+ 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, \
+ 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, \
+ 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, \
+ 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, \
+ 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, \
+ 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, \
+ 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, \
+ 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, \
+ 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, \
+ 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, \
+ 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, \
+ 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, \
+ 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, \
+ 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, \
+ 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, \
+ 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, \
+ 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, \
+ 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, \
+ 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, \
+ 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, \
+ 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, \
+ 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, \
+ 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, \
+ 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, \
+ 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16, \
+ }; \
+ for (uint8_t& byte : (STATE)) { \
+ byte = kVAESXEncSBox[byte]; \
+ } \
+ } while (0)
+
+// Applies the S-box inverse (decode) transform to every byte
+// in the VAESState 'state'.
+#define VAES_INV_SUB_BYTES(STATE) \
+ do { \
+ static constexpr uint8_t kVAESXDecSBox[256] = { \
+ 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, \
+ 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB, \
+ 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, \
+ 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB, \
+ 0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, \
+ 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E, \
+ 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, \
+ 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25, \
+ 0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, \
+ 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92, \
+ 0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, \
+ 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84, \
+ 0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, \
+ 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06, \
+ 0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, \
+ 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B, \
+ 0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, \
+ 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73, \
+ 0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, \
+ 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E, \
+ 0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, \
+ 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B, \
+ 0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, \
+ 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4, \
+ 0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, \
+ 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F, \
+ 0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, \
+ 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF, \
+ 0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, \
+ 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61, \
+ 0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, \
+ 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D, \
+ }; \
+ for (uint8_t &byte : (STATE)) { \
+ byte = kVAESXDecSBox[byte]; \
+ } \
+ } while (0)
+
+// Shift the state rows, as specified in ShiftRows.
+// 'STATE' is a VAESState value.
+#define VAES_SHIFT_ROWS(STATE) \
+ do { \
+ uint8_t temp; \
+ /* Row 0 (byte indices 0, 4, 8, 12) does not rotate. */ \
+ /* Row 1 (byte indices 1, 5, 9, 13) rotates left by 1 position. */ \
+ temp = (STATE)[1]; \
+ (STATE)[ 1] = (STATE)[ 5]; \
+ (STATE)[ 5] = (STATE)[ 9]; \
+ (STATE)[ 9] = (STATE)[13]; \
+ (STATE)[13] = temp; \
+ /* Row 2 (byte indices 2, 6, 10, 14) rotates by 2 positions. */ \
+ temp = (STATE)[2]; \
+ (STATE)[ 2] = (STATE)[10]; \
+ (STATE)[10] = temp; \
+ temp = (STATE)[6]; \
+ (STATE)[ 6] = (STATE)[14]; \
+ (STATE)[14] = temp; \
+ /* Row 3 (byte indices 3, 7, 11, 15) rotates by 3 position (or -1). */ \
+ temp = (STATE)[3]; \
+ (STATE)[ 3] = (STATE)[15]; \
+ (STATE)[15] = (STATE)[11]; \
+ (STATE)[11] = (STATE)[ 7]; \
+ (STATE)[ 7] = temp; \
+ } while (0)
+
+// Shifts the state rows, as specified in InvShiftRows.
+// 'STATE' is a VAESState value.
+#define VAES_INV_SHIFT_ROWS(STATE) \
+ do { \
+ uint8_t temp; \
+ /* Row 0 (byte indices 0, 4, 8, 12) does not rotate. */ \
+ /* Row 1 (byte indices 1, 5, 9, 13) rotates left by 1 position. */ \
+ temp = (STATE)[1]; \
+ (STATE)[ 1] = (STATE)[13]; \
+ (STATE)[13] = (STATE)[ 9]; \
+ (STATE)[ 9] = (STATE)[ 5]; \
+ (STATE)[ 5] = temp; \
+ /* Row 2 (byte indices 2, 6, 10, 14) rotates by 2 positions. */ \
+ temp = (STATE)[2]; \
+ (STATE)[ 2] = (STATE)[10]; \
+ (STATE)[10] = temp; \
+ temp = (STATE)[6]; \
+ (STATE)[ 6] = (STATE)[14]; \
+ (STATE)[14] = temp; \
+ /* Row 3 (byte indices 3, 7, 11, 15) rotates by 3 position (or -1). */ \
+ temp = (STATE)[3]; \
+ (STATE)[ 3] = (STATE)[ 7]; \
+ (STATE)[ 7] = (STATE)[11]; \
+ (STATE)[11] = (STATE)[15]; \
+ (STATE)[15] = temp; \
+ } while (0)
+
+// Implements the function producing one byte, one-fourth of the column
+// transformation MixColumns() specified in FIPS-197 5.1.3 .
+//
+// The arguments are all bytes (i.e., uint8_t). The function implemented
+// is
+// F(A, B, C, D) = (2 . A) xor (3 . B) xor C xor D
+// where '.' denotes the Galois Field multiplication over 2**8.
+//
+#define VAES_MIX_COLUMN_BYTE(A, B, C, D) \
+ (VAES_GFMUL((A), 0x2) ^ VAES_GFMUL((B), 0x3) ^ (C) ^ (D))
+
+// Implements the function producing one byte, one-fourth of the column
+// transformation InvMixColumns() specified in FIPS-197 5.3.3 .
+//
+// The arguments are all bytes (i.e., uint8_t). The function implemented
+// is
+// F(A, B, C, D) = (0xE . A) xor (0xB . B) xor (0xD . C) xor (0x9 . D)
+// where '.' denotes the Galois Field multiplication over 2**8.
+//
+#define VAES_INV_MIX_COLUMN_BYTE(A, B, C, D) \
+ (VAES_GFMUL((A), 0xE) ^ \
+ VAES_GFMUL((B), 0xB) ^ \
+ VAES_GFMUL((C), 0xD) ^ \
+ VAES_GFMUL((D), 0x9))
+
+// Given a column as a uin32_t (4 Bytes), produces the mixed column
+// as a uin32_t.
+#define VAES_MIX_COLUMN(STATE, COL_IDX) \
+ do { \
+ uint8_t *column = &(STATE)[(COL_IDX) * 4]; \
+ /* Extract the bytes, before we start overwriting them */ \
+ const uint8_t b0 = column[0]; \
+ const uint8_t b1 = column[1]; \
+ const uint8_t b2 = column[2]; \
+ const uint8_t b3 = column[3]; \
+ /* Every iteration rotates the byte indices by 1 */ \
+ column[0] = VAES_MIX_COLUMN_BYTE(b0, b1, b2, b3); \
+ column[1] = VAES_MIX_COLUMN_BYTE(b1, b2, b3, b0); \
+ column[2] = VAES_MIX_COLUMN_BYTE(b2, b3, b0, b1); \
+ column[3] = VAES_MIX_COLUMN_BYTE(b3, b0, b1, b2); \
+ } while (0)
+
+// Given a column as a uin32_t (4 Bytes), produces the inverse
+// mixed column as a uin32_t.
+#define VAES_INV_MIX_COLUMN(STATE, COL_IDX) \
+ do { \
+ uint8_t *column = &(STATE)[(COL_IDX) * 4]; \
+ /* Extract the bytes, before we start overwriting them */ \
+ const uint8_t b0 = column[0]; \
+ const uint8_t b1 = column[1]; \
+ const uint8_t b2 = column[2]; \
+ const uint8_t b3 = column[3]; \
+ /* Every iteration rotates the byte indices by 1 */ \
+ column[0] = VAES_INV_MIX_COLUMN_BYTE(b0, b1, b2, b3); \
+ column[1] = VAES_INV_MIX_COLUMN_BYTE(b1, b2, b3, b0); \
+ column[2] = VAES_INV_MIX_COLUMN_BYTE(b2, b3, b0, b1); \
+ column[3] = VAES_INV_MIX_COLUMN_BYTE(b3, b0, b1, b2); \
+ } while (0)
+
+// Implements MixColumns as defined in FIPS-197 5.1.3.
+#define VAES_MIX_COLUMNS(STATE) \
+ do { \
+ VAES_MIX_COLUMN((STATE), 0); \
+ VAES_MIX_COLUMN((STATE), 1); \
+ VAES_MIX_COLUMN((STATE), 2); \
+ VAES_MIX_COLUMN((STATE), 3); \
+ } while (0)
+
+// Implements InvMixColumns as defined in FIPS-197 5.3.3.
+#define VAES_INV_MIX_COLUMNS(STATE) \
+ do { \
+ VAES_INV_MIX_COLUMN((STATE), 0); \
+ VAES_INV_MIX_COLUMN((STATE), 1); \
+ VAES_INV_MIX_COLUMN((STATE), 2); \
+ VAES_INV_MIX_COLUMN((STATE), 3); \
+ } while (0)
+
+#endif // RISCV_ZVKNED_EXT_MACROS_H_