aboutsummaryrefslogtreecommitdiff
path: root/host
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-06-01 23:58:52 -0700
committerRichard Henderson <richard.henderson@linaro.org>2023-07-08 07:30:17 +0100
commitd6a2443696c5b34fb20879e899bf823d6168b068 (patch)
treecbb20eedf46dec9d97f710e1c796bf0982ed0a7d /host
parent28e91474ce558bca55c800e7977bab7c66a44abb (diff)
downloadqemu-d6a2443696c5b34fb20879e899bf823d6168b068.zip
qemu-d6a2443696c5b34fb20879e899bf823d6168b068.tar.gz
qemu-d6a2443696c5b34fb20879e899bf823d6168b068.tar.bz2
host/include/i386: Implement aes-round.h
Detect AES in cpuinfo; implement the accel hooks. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'host')
-rw-r--r--host/include/i386/host/cpuinfo.h1
-rw-r--r--host/include/i386/host/crypto/aes-round.h152
-rw-r--r--host/include/x86_64/host/crypto/aes-round.h1
3 files changed, 154 insertions, 0 deletions
diff --git a/host/include/i386/host/cpuinfo.h b/host/include/i386/host/cpuinfo.h
index a653712..073d0a4 100644
--- a/host/include/i386/host/cpuinfo.h
+++ b/host/include/i386/host/cpuinfo.h
@@ -26,6 +26,7 @@
#define CPUINFO_AVX512VBMI2 (1u << 15)
#define CPUINFO_ATOMIC_VMOVDQA (1u << 16)
#define CPUINFO_ATOMIC_VMOVDQU (1u << 17)
+#define CPUINFO_AES (1u << 18)
/* Initialized with a constructor. */
extern unsigned cpuinfo;
diff --git a/host/include/i386/host/crypto/aes-round.h b/host/include/i386/host/crypto/aes-round.h
new file mode 100644
index 0000000..59a6413
--- /dev/null
+++ b/host/include/i386/host/crypto/aes-round.h
@@ -0,0 +1,152 @@
+/*
+ * x86 specific aes acceleration.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef X86_HOST_CRYPTO_AES_ROUND_H
+#define X86_HOST_CRYPTO_AES_ROUND_H
+
+#include "host/cpuinfo.h"
+#include <immintrin.h>
+
+#if defined(__AES__) && defined(__SSSE3__)
+# define HAVE_AES_ACCEL true
+# define ATTR_AES_ACCEL
+#else
+# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_AES)
+# define ATTR_AES_ACCEL __attribute__((target("aes,ssse3")))
+#endif
+
+static inline __m128i ATTR_AES_ACCEL
+aes_accel_bswap(__m128i x)
+{
+ return _mm_shuffle_epi8(x, _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15));
+}
+
+static inline void ATTR_AES_ACCEL
+aesenc_MC_accel(AESState *ret, const AESState *st, bool be)
+{
+ __m128i t = (__m128i)st->v;
+ __m128i z = _mm_setzero_si128();
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ t = _mm_aesdeclast_si128(t, z);
+ t = _mm_aesenc_si128(t, z);
+ t = aes_accel_bswap(t);
+ } else {
+ t = _mm_aesdeclast_si128(t, z);
+ t = _mm_aesenc_si128(t, z);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+static inline void ATTR_AES_ACCEL
+aesenc_SB_SR_AK_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ __m128i t = (__m128i)st->v;
+ __m128i k = (__m128i)rk->v;
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ k = aes_accel_bswap(k);
+ t = _mm_aesenclast_si128(t, k);
+ t = aes_accel_bswap(t);
+ } else {
+ t = _mm_aesenclast_si128(t, k);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+static inline void ATTR_AES_ACCEL
+aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ __m128i t = (__m128i)st->v;
+ __m128i k = (__m128i)rk->v;
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ k = aes_accel_bswap(k);
+ t = _mm_aesenc_si128(t, k);
+ t = aes_accel_bswap(t);
+ } else {
+ t = _mm_aesenc_si128(t, k);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+static inline void ATTR_AES_ACCEL
+aesdec_IMC_accel(AESState *ret, const AESState *st, bool be)
+{
+ __m128i t = (__m128i)st->v;
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ t = _mm_aesimc_si128(t);
+ t = aes_accel_bswap(t);
+ } else {
+ t = _mm_aesimc_si128(t);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+static inline void ATTR_AES_ACCEL
+aesdec_ISB_ISR_AK_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ __m128i t = (__m128i)st->v;
+ __m128i k = (__m128i)rk->v;
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ k = aes_accel_bswap(k);
+ t = _mm_aesdeclast_si128(t, k);
+ t = aes_accel_bswap(t);
+ } else {
+ t = _mm_aesdeclast_si128(t, k);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+static inline void ATTR_AES_ACCEL
+aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ __m128i t = (__m128i)st->v;
+ __m128i k = (__m128i)rk->v;
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ k = aes_accel_bswap(k);
+ t = _mm_aesdeclast_si128(t, k);
+ t = _mm_aesimc_si128(t);
+ t = aes_accel_bswap(t);
+ } else {
+ t = _mm_aesdeclast_si128(t, k);
+ t = _mm_aesimc_si128(t);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+static inline void ATTR_AES_ACCEL
+aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st,
+ const AESState *rk, bool be)
+{
+ __m128i t = (__m128i)st->v;
+ __m128i k = (__m128i)rk->v;
+
+ if (be) {
+ t = aes_accel_bswap(t);
+ k = aes_accel_bswap(k);
+ t = _mm_aesdec_si128(t, k);
+ t = aes_accel_bswap(t);
+ } else {
+ t = _mm_aesdec_si128(t, k);
+ }
+ ret->v = (AESStateVec)t;
+}
+
+#endif /* X86_HOST_CRYPTO_AES_ROUND_H */
diff --git a/host/include/x86_64/host/crypto/aes-round.h b/host/include/x86_64/host/crypto/aes-round.h
new file mode 100644
index 0000000..2773cc9
--- /dev/null
+++ b/host/include/x86_64/host/crypto/aes-round.h
@@ -0,0 +1 @@
+#include "host/include/i386/host/crypto/aes-round.h"