diff options
author | Richard Earnshaw <rearnsha@arm.com> | 2021-11-01 13:23:26 +0000 |
---|---|---|
committer | Richard Earnshaw <rearnsha@arm.com> | 2022-01-20 11:15:22 +0000 |
commit | 2078550a005f3fde4c331ad4b8452c963c4cdb9d (patch) | |
tree | 4bbfc900fbfb7bf93d3bc8584e9231618fcab054 | |
parent | bc13384e1956a9bc38b084f82e250743451aae61 (diff) | |
download | gcc-2078550a005f3fde4c331ad4b8452c963c4cdb9d.zip gcc-2078550a005f3fde4c331ad4b8452c963c4cdb9d.tar.gz gcc-2078550a005f3fde4c331ad4b8452c963c4cdb9d.tar.bz2 |
arm: suppress aes erratum when forwarding from aes
AES operations are commonly chained and since the result of one AES
operation is never a 32-bit value, they do not need an additional
mitigation instruction for the forwarded result. We handle this
common case by adding additional patterns that allow for this.
gcc/ChangeLog:
* config/arm/crypto.md (crypto_<CRYPTO_AESMC:crypto_pattern>_protected):
New pattern.
(aarch32_crypto_aese_fused_protected): Likewise.
(aarch32_crypto_aesd_fused_protected): Likewise.
-rw-r--r-- | gcc/config/arm/crypto.md | 50 |
1 files changed, 50 insertions, 0 deletions
diff --git a/gcc/config/arm/crypto.md b/gcc/config/arm/crypto.md index fbee182..df85735 100644 --- a/gcc/config/arm/crypto.md +++ b/gcc/config/arm/crypto.md @@ -75,6 +75,20 @@ [(set_attr "type" "neon_move_q")] ) +;; An AESMC operation can feed directly into a subsequent AES +;; operation without needing mitigation. +(define_insn "*crypto_<CRYPTO_AESMC:crypto_pattern>_protected" + [(set (match_operand:<crypto_mode> 0 "register_operand" "=w") + (unspec:<crypto_mode> + [(unspec:<crypto_mode> + [(match_operand:<crypto_mode> 1 "register_operand" "w")] + CRYPTO_AESMC)] + UNSPEC_AES_PROTECT))] + "TARGET_CRYPTO && fix_aes_erratum_1742098" + "<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q1" + [(set_attr "type" "<crypto_type>")] +) + ;; When AESE/AESMC fusion is enabled we really want to keep the two together ;; and enforce the register dependency without scheduling or register ;; allocation messing up the order or introducing moves inbetween. @@ -95,6 +109,25 @@ (set_attr "length" "8")] ) +;; And similarly when mitigation is enabled, but not needed in this +;; case. +(define_insn "*aarch32_crypto_aese_fused_protected" + [(set (match_operand:V16QI 0 "register_operand" "=w") + (unspec:V16QI + [(unspec:V16QI + [(unspec:V16QI [(xor:V16QI + (match_operand:V16QI 1 "register_operand" "%0") + (match_operand:V16QI 2 "register_operand" "w"))] + UNSPEC_AESE)] + UNSPEC_AESMC)] + UNSPEC_AES_PROTECT))] + "TARGET_CRYPTO && fix_aes_erratum_1742098 + && arm_fusion_enabled_p (tune_params::FUSE_AES_AESMC)" + "aese.8\\t%q0, %q2\;aesmc.8\\t%q0, %q0" + [(set_attr "type" "crypto_aese") + (set_attr "length" "8")] +) + ;; When AESD/AESIMC fusion is enabled we really want to keep the two together ;; and enforce the register dependency without scheduling or register ;; allocation messing up the order or introducing moves inbetween. @@ -115,6 +148,23 @@ (set_attr "length" "8")] ) +(define_insn "*aarch32_crypto_aesd_fused_protected" + [(set (match_operand:V16QI 0 "register_operand" "=w") + (unspec:V16QI + [(unspec:V16QI + [(unspec:V16QI [(xor:V16QI + (match_operand:V16QI 1 "register_operand" "%0") + (match_operand:V16QI 2 "register_operand" "w"))] + UNSPEC_AESD)] + UNSPEC_AESIMC)] + UNSPEC_AES_PROTECT))] + "TARGET_CRYPTO && fix_aes_erratum_1742098 + && arm_fusion_enabled_p (tune_params::FUSE_AES_AESMC)" + "aesd.8\\t%q0, %q2\;aesimc.8\\t%q0, %q0" + [(set_attr "type" "crypto_aese") + (set_attr "length" "8")] +) + (define_insn "crypto_<CRYPTO_BINARY:crypto_pattern>" [(set (match_operand:<crypto_mode> 0 "register_operand" "=w") (unspec:<crypto_mode> |