diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2023-06-01 23:58:52 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-07-08 07:30:17 +0100 |
commit | d6a2443696c5b34fb20879e899bf823d6168b068 (patch) | |
tree | cbb20eedf46dec9d97f710e1c796bf0982ed0a7d | |
parent | 28e91474ce558bca55c800e7977bab7c66a44abb (diff) | |
download | qemu-d6a2443696c5b34fb20879e899bf823d6168b068.zip qemu-d6a2443696c5b34fb20879e899bf823d6168b068.tar.gz qemu-d6a2443696c5b34fb20879e899bf823d6168b068.tar.bz2 |
host/include/i386: Implement aes-round.h
Detect AES in cpuinfo; implement the accel hooks.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r-- | host/include/i386/host/cpuinfo.h | 1 | ||||
-rw-r--r-- | host/include/i386/host/crypto/aes-round.h | 152 | ||||
-rw-r--r-- | host/include/x86_64/host/crypto/aes-round.h | 1 | ||||
-rw-r--r-- | util/cpuinfo-i386.c | 3 |
4 files changed, 157 insertions, 0 deletions
diff --git a/host/include/i386/host/cpuinfo.h b/host/include/i386/host/cpuinfo.h index a653712..073d0a4 100644 --- a/host/include/i386/host/cpuinfo.h +++ b/host/include/i386/host/cpuinfo.h @@ -26,6 +26,7 @@ #define CPUINFO_AVX512VBMI2 (1u << 15) #define CPUINFO_ATOMIC_VMOVDQA (1u << 16) #define CPUINFO_ATOMIC_VMOVDQU (1u << 17) +#define CPUINFO_AES (1u << 18) /* Initialized with a constructor. */ extern unsigned cpuinfo; diff --git a/host/include/i386/host/crypto/aes-round.h b/host/include/i386/host/crypto/aes-round.h new file mode 100644 index 0000000..59a6413 --- /dev/null +++ b/host/include/i386/host/crypto/aes-round.h @@ -0,0 +1,152 @@ +/* + * x86 specific aes acceleration. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef X86_HOST_CRYPTO_AES_ROUND_H +#define X86_HOST_CRYPTO_AES_ROUND_H + +#include "host/cpuinfo.h" +#include <immintrin.h> + +#if defined(__AES__) && defined(__SSSE3__) +# define HAVE_AES_ACCEL true +# define ATTR_AES_ACCEL +#else +# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_AES) +# define ATTR_AES_ACCEL __attribute__((target("aes,ssse3"))) +#endif + +static inline __m128i ATTR_AES_ACCEL +aes_accel_bswap(__m128i x) +{ + return _mm_shuffle_epi8(x, _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15)); +} + +static inline void ATTR_AES_ACCEL +aesenc_MC_accel(AESState *ret, const AESState *st, bool be) +{ + __m128i t = (__m128i)st->v; + __m128i z = _mm_setzero_si128(); + + if (be) { + t = aes_accel_bswap(t); + t = _mm_aesdeclast_si128(t, z); + t = _mm_aesenc_si128(t, z); + t = aes_accel_bswap(t); + } else { + t = _mm_aesdeclast_si128(t, z); + t = _mm_aesenc_si128(t, z); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesenc_SB_SR_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + __m128i t = (__m128i)st->v; + __m128i k = (__m128i)rk->v; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = _mm_aesenclast_si128(t, k); + t = aes_accel_bswap(t); + } else { + t = _mm_aesenclast_si128(t, k); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + __m128i t = (__m128i)st->v; + __m128i k = (__m128i)rk->v; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = _mm_aesenc_si128(t, k); + t = aes_accel_bswap(t); + } else { + t = _mm_aesenc_si128(t, k); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_IMC_accel(AESState *ret, const AESState *st, bool be) +{ + __m128i t = (__m128i)st->v; + + if (be) { + t = aes_accel_bswap(t); + t = _mm_aesimc_si128(t); + t = aes_accel_bswap(t); + } else { + t = _mm_aesimc_si128(t); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_ISB_ISR_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + __m128i t = (__m128i)st->v; + __m128i k = (__m128i)rk->v; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = _mm_aesdeclast_si128(t, k); + t = aes_accel_bswap(t); + } else { + t = _mm_aesdeclast_si128(t, k); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + __m128i t = (__m128i)st->v; + __m128i k = (__m128i)rk->v; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = _mm_aesdeclast_si128(t, k); + t = _mm_aesimc_si128(t); + t = aes_accel_bswap(t); + } else { + t = _mm_aesdeclast_si128(t, k); + t = _mm_aesimc_si128(t); + } + ret->v = (AESStateVec)t; +} + +static inline void ATTR_AES_ACCEL +aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st, + const AESState *rk, bool be) +{ + __m128i t = (__m128i)st->v; + __m128i k = (__m128i)rk->v; + + if (be) { + t = aes_accel_bswap(t); + k = aes_accel_bswap(k); + t = _mm_aesdec_si128(t, k); + t = aes_accel_bswap(t); + } else { + t = _mm_aesdec_si128(t, k); + } + ret->v = (AESStateVec)t; +} + +#endif /* X86_HOST_CRYPTO_AES_ROUND_H */ diff --git a/host/include/x86_64/host/crypto/aes-round.h b/host/include/x86_64/host/crypto/aes-round.h new file mode 100644 index 0000000..2773cc9 --- /dev/null +++ b/host/include/x86_64/host/crypto/aes-round.h @@ -0,0 +1 @@ +#include "host/include/i386/host/crypto/aes-round.h" diff --git a/util/cpuinfo-i386.c b/util/cpuinfo-i386.c index ab6143d..3a7b7e0 100644 --- a/util/cpuinfo-i386.c +++ b/util/cpuinfo-i386.c @@ -40,6 +40,9 @@ unsigned __attribute__((constructor)) cpuinfo_init(void) info |= (c & bit_MOVBE ? CPUINFO_MOVBE : 0); info |= (c & bit_POPCNT ? CPUINFO_POPCNT : 0); + /* Our AES support requires PSHUFB as well. */ + info |= ((c & bit_AES) && (c & bit_SSSE3) ? CPUINFO_AES : 0); + /* For AVX features, we must check available and usable. */ if ((c & bit_AVX) && (c & bit_OSXSAVE)) { unsigned bv = xgetbv_low(0); |