diff options
author | Sylvia Taylor <sylvia.taylor@arm.com> | 2019-07-18 16:02:05 +0000 |
---|---|---|
committer | Kyrylo Tkachov <ktkachov@gcc.gnu.org> | 2019-07-18 16:02:05 +0000 |
commit | b9a0100963b357e7e84084e45bbc51d6576410a4 (patch) | |
tree | 806f623c6e01f54496eaf3c26999e573b7396818 /gcc | |
parent | e38341a8e0c7f89eb2146feddea8c2f3bf25a331 (diff) | |
download | gcc-b9a0100963b357e7e84084e45bbc51d6576410a4.zip gcc-b9a0100963b357e7e84084e45bbc51d6576410a4.tar.gz gcc-b9a0100963b357e7e84084e45bbc51d6576410a4.tar.bz2 |
[patch2/2][arm]: remove builtin expand for sha1
This patch removes the builtin expand handling for sha1h/c/m/p and
replaces it with expand patterns. This should make it more consistent
with how we handle intrinsic implementations and cleans up the custom
sha1 code in the arm_expand builtins for unop and ternop.
2019-07-18 Sylvia Taylor <sylvia.taylor@arm.com>
* config/arm/arm-builtins.c
(arm_expand_ternop_builtin): Remove explicit sha1 builtin handling.
(arm_expand_unop_builtin): Likewise.
* config/arm/crypto.md
(crypto_sha1h): Convert from define_insn to define_expand.
(crypto_<crypto_pattern>): Likewise.
(crypto_sha1h_lb): New define_insn.
(crypto_<crypto_pattern>_lb): Likewise.
From-SVN: r273575
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/ChangeLog | 11 | ||||
-rw-r--r-- | gcc/config/arm/arm-builtins.c | 36 | ||||
-rw-r--r-- | gcc/config/arm/crypto.md | 39 |
3 files changed, 46 insertions, 40 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 668dc40..7055ca4 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,5 +1,16 @@ 2019-07-18 Sylvia Taylor <sylvia.taylor@arm.com> + * config/arm/arm-builtins.c + (arm_expand_ternop_builtin): Remove explicit sha1 builtin handling. + (arm_expand_unop_builtin): Likewise. + * config/arm/crypto.md + (crypto_sha1h): Convert from define_insn to define_expand. + (crypto_<crypto_pattern>): Likewise. + (crypto_sha1h_lb): New define_insn. + (crypto_<crypto_pattern>_lb): Likewise. + +2019-07-18 Sylvia Taylor <sylvia.taylor@arm.com> + PR target/90317 * config/arm/arm_neon.h (vsha1h_u32): Refactor. diff --git a/gcc/config/arm/arm-builtins.c b/gcc/config/arm/arm-builtins.c index 55bbb48..8f2c937 100644 --- a/gcc/config/arm/arm-builtins.c +++ b/gcc/config/arm/arm-builtins.c @@ -1993,25 +1993,12 @@ arm_expand_ternop_builtin (enum insn_code icode, rtx op0 = expand_normal (arg0); rtx op1 = expand_normal (arg1); rtx op2 = expand_normal (arg2); - rtx op3 = NULL_RTX; - /* The sha1c, sha1p, sha1m crypto builtins require a different vec_select - lane operand depending on endianness. */ - bool builtin_sha1cpm_p = false; - - if (insn_data[icode].n_operands == 5) - { - gcc_assert (icode == CODE_FOR_crypto_sha1c - || icode == CODE_FOR_crypto_sha1p - || icode == CODE_FOR_crypto_sha1m); - builtin_sha1cpm_p = true; - } machine_mode tmode = insn_data[icode].operand[0].mode; machine_mode mode0 = insn_data[icode].operand[1].mode; machine_mode mode1 = insn_data[icode].operand[2].mode; machine_mode mode2 = insn_data[icode].operand[3].mode; - if (VECTOR_MODE_P (mode0)) op0 = safe_vector_operand (op0, mode0); if (VECTOR_MODE_P (mode1)) @@ -2034,13 +2021,8 @@ arm_expand_ternop_builtin (enum insn_code icode, op1 = copy_to_mode_reg (mode1, op1); if (! (*insn_data[icode].operand[3].predicate) (op2, mode2)) op2 = copy_to_mode_reg (mode2, op2); - if (builtin_sha1cpm_p) - op3 = GEN_INT (TARGET_BIG_END ? 1 : 0); - if (builtin_sha1cpm_p) - pat = GEN_FCN (icode) (target, op0, op1, op2, op3); - else - pat = GEN_FCN (icode) (target, op0, op1, op2); + pat = GEN_FCN (icode) (target, op0, op1, op2); if (! pat) return 0; emit_insn (pat); @@ -2096,16 +2078,8 @@ arm_expand_unop_builtin (enum insn_code icode, rtx pat; tree arg0 = CALL_EXPR_ARG (exp, 0); rtx op0 = expand_normal (arg0); - rtx op1 = NULL_RTX; machine_mode tmode = insn_data[icode].operand[0].mode; machine_mode mode0 = insn_data[icode].operand[1].mode; - bool builtin_sha1h_p = false; - - if (insn_data[icode].n_operands == 3) - { - gcc_assert (icode == CODE_FOR_crypto_sha1h); - builtin_sha1h_p = true; - } if (! target || GET_MODE (target) != tmode @@ -2121,13 +2095,9 @@ arm_expand_unop_builtin (enum insn_code icode, if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); } - if (builtin_sha1h_p) - op1 = GEN_INT (TARGET_BIG_END ? 1 : 0); - if (builtin_sha1h_p) - pat = GEN_FCN (icode) (target, op0, op1); - else - pat = GEN_FCN (icode) (target, op0); + pat = GEN_FCN (icode) (target, op0); + if (! pat) return 0; emit_insn (pat); diff --git a/gcc/config/arm/crypto.md b/gcc/config/arm/crypto.md index 115c515..03596fe 100644 --- a/gcc/config/arm/crypto.md +++ b/gcc/config/arm/crypto.md @@ -109,13 +109,23 @@ of the V4SI, adjusted for endianness. Required due to neon_vget_lane and neon_set_lane that change the element ordering in memory for big-endian. */ -(define_insn "crypto_sha1h" +(define_expand "crypto_sha1h" + [(set (match_operand:V4SI 0 "register_operand") + (match_operand:V4SI 1 "register_operand"))] + "TARGET_CRYPTO" +{ + rtx op2 = GEN_INT (NEON_ENDIAN_LANE_N (V2SImode, 0)); + emit_insn (gen_crypto_sha1h_lb (operands[0], operands[1], op2)); + DONE; +}) + +(define_insn "crypto_sha1h_lb" [(set (match_operand:V4SI 0 "register_operand" "=w") - (unspec:V4SI - [(vec_select:SI - (match_operand:V4SI 1 "register_operand" "w") - (parallel [(match_operand:SI 2 "immediate_operand" "i")]))] - UNSPEC_SHA1H))] + (unspec:V4SI + [(vec_select:SI + (match_operand:V4SI 1 "register_operand" "w") + (parallel [(match_operand:SI 2 "immediate_operand" "i")]))] + UNSPEC_SHA1H))] "TARGET_CRYPTO && INTVAL (operands[2]) == NEON_ENDIAN_LANE_N (V2SImode, 0)" "sha1h.32\\t%q0, %q1" [(set_attr "type" "crypto_sha1_fast")] @@ -135,7 +145,22 @@ of the V4SI, adjusted for endianness. Required due to neon_vget_lane and neon_set_lane that change the element ordering in memory for big-endian. */ -(define_insn "crypto_<crypto_pattern>" +(define_expand "crypto_<crypto_pattern>" + [(set (match_operand:V4SI 0 "register_operand") + (unspec:<crypto_mode> + [(match_operand:<crypto_mode> 1 "register_operand") + (match_operand:<crypto_mode> 2 "register_operand") + (match_operand:<crypto_mode> 3 "register_operand")] + CRYPTO_SELECTING))] + "TARGET_CRYPTO" +{ + rtx op4 = GEN_INT (NEON_ENDIAN_LANE_N (V2SImode, 0)); + emit_insn (gen_crypto_<crypto_pattern>_lb + (operands[0], operands[1], operands[2], operands[3], op4)); + DONE; +}) + +(define_insn "crypto_<crypto_pattern>_lb" [(set (match_operand:V4SI 0 "register_operand" "=w") (unspec:<crypto_mode> [(match_operand:<crypto_mode> 1 "register_operand" "0") |