diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2023-06-01 21:57:10 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-07-08 07:30:17 +0100 |
commit | 6b0a96ce3a405ef4676e1fa853f2c649dc25c2b4 (patch) | |
tree | d9883fd6861a69f1c87d30459983f14cb0242363 /crypto | |
parent | e20e14d2b15d5ad4fb0a640c95d7c1bc534d9fd7 (diff) | |
download | qemu-6b0a96ce3a405ef4676e1fa853f2c649dc25c2b4.zip qemu-6b0a96ce3a405ef4676e1fa853f2c649dc25c2b4.tar.gz qemu-6b0a96ce3a405ef4676e1fa853f2c649dc25c2b4.tar.bz2 |
crypto: Add aesenc_SB_SR_AK
Start adding infrastructure for accelerating guest AES.
Begin with a SubBytes + ShiftRows + AddRoundKey primitive.
Acked-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/aes.c | 46 |
1 files changed, 46 insertions, 0 deletions
diff --git a/crypto/aes.c b/crypto/aes.c index e65c97e..408d92b 100644 --- a/crypto/aes.c +++ b/crypto/aes.c @@ -29,6 +29,7 @@ */ #include "qemu/osdep.h" #include "crypto/aes.h" +#include "crypto/aes-round.h" typedef uint32_t u32; typedef uint8_t u8; @@ -1215,6 +1216,51 @@ static const u32 rcon[] = { 0x1B000000, 0x36000000, /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */ }; +/* + * Perform SubBytes + ShiftRows + AddRoundKey. + */ +static inline void +aesenc_SB_SR_AK_swap(AESState *ret, const AESState *st, + const AESState *rk, bool swap) +{ + const int swap_b = swap ? 15 : 0; + AESState t; + + t.b[swap_b ^ 0x0] = AES_sbox[st->b[swap_b ^ AES_SH(0x0)]]; + t.b[swap_b ^ 0x1] = AES_sbox[st->b[swap_b ^ AES_SH(0x1)]]; + t.b[swap_b ^ 0x2] = AES_sbox[st->b[swap_b ^ AES_SH(0x2)]]; + t.b[swap_b ^ 0x3] = AES_sbox[st->b[swap_b ^ AES_SH(0x3)]]; + t.b[swap_b ^ 0x4] = AES_sbox[st->b[swap_b ^ AES_SH(0x4)]]; + t.b[swap_b ^ 0x5] = AES_sbox[st->b[swap_b ^ AES_SH(0x5)]]; + t.b[swap_b ^ 0x6] = AES_sbox[st->b[swap_b ^ AES_SH(0x6)]]; + t.b[swap_b ^ 0x7] = AES_sbox[st->b[swap_b ^ AES_SH(0x7)]]; + t.b[swap_b ^ 0x8] = AES_sbox[st->b[swap_b ^ AES_SH(0x8)]]; + t.b[swap_b ^ 0x9] = AES_sbox[st->b[swap_b ^ AES_SH(0x9)]]; + t.b[swap_b ^ 0xa] = AES_sbox[st->b[swap_b ^ AES_SH(0xA)]]; + t.b[swap_b ^ 0xb] = AES_sbox[st->b[swap_b ^ AES_SH(0xB)]]; + t.b[swap_b ^ 0xc] = AES_sbox[st->b[swap_b ^ AES_SH(0xC)]]; + t.b[swap_b ^ 0xd] = AES_sbox[st->b[swap_b ^ AES_SH(0xD)]]; + t.b[swap_b ^ 0xe] = AES_sbox[st->b[swap_b ^ AES_SH(0xE)]]; + t.b[swap_b ^ 0xf] = AES_sbox[st->b[swap_b ^ AES_SH(0xF)]]; + + /* + * Perform the AddRoundKey with generic vectors. + * This may be expanded to either host integer or host vector code. + * The key and output endianness match, so no bswap required. + */ + ret->v = t.v ^ rk->v; +} + +void aesenc_SB_SR_AK_gen(AESState *r, const AESState *s, const AESState *k) +{ + aesenc_SB_SR_AK_swap(r, s, k, false); +} + +void aesenc_SB_SR_AK_genrev(AESState *r, const AESState *s, const AESState *k) +{ + aesenc_SB_SR_AK_swap(r, s, k, true); +} + /** * Expand the cipher key into the encryption key schedule. */ |