diff options
author | Richard Sandiford <richard.sandiford@arm.com> | 2025-02-20 17:10:14 +0000 |
---|---|---|
committer | Richard Sandiford <richard.sandiford@arm.com> | 2025-02-20 17:10:14 +0000 |
commit | d7ff3142821bd114ba925cb5a3778d973fe5335e (patch) | |
tree | a025b58b74ce73feecf54c1dcdb30abcb8775e1e /gcc/config/aarch64 | |
parent | 2f03b10da878fe8365975f54b72ff5e717a295a9 (diff) | |
download | gcc-d7ff3142821bd114ba925cb5a3778d973fe5335e.zip gcc-d7ff3142821bd114ba925cb5a3778d973fe5335e.tar.gz gcc-d7ff3142821bd114ba925cb5a3778d973fe5335e.tar.bz2 |
aarch64: Remove old aarch64_expand_sve_vec_cmp_float code
While looking at PR118956, I noticed that we had some dead code
left over after the removal of the vcond patterns. The can_invert_p
path is no longer used.
gcc/
* config/aarch64/aarch64-protos.h (aarch64_expand_sve_vec_cmp_float):
Remove can_invert_p argument and change return type to void.
* config/aarch64/aarch64.cc (aarch64_expand_sve_vec_cmp_float):
Likewise.
* config/aarch64/aarch64-sve.md (vec_cmp<mode><vpred>): Update call
accordingly.
Diffstat (limited to 'gcc/config/aarch64')
-rw-r--r-- | gcc/config/aarch64/aarch64-protos.h | 2 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64-sve.md | 2 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64.cc | 36 |
3 files changed, 11 insertions, 29 deletions
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h index 4235f4a..0291a8a 100644 --- a/gcc/config/aarch64/aarch64-protos.h +++ b/gcc/config/aarch64/aarch64-protos.h @@ -1098,7 +1098,7 @@ void aarch64_finish_ldpstp_peephole (rtx *, bool, enum rtx_code = (enum rtx_code)0); void aarch64_expand_sve_vec_cmp_int (rtx, rtx_code, rtx, rtx); -bool aarch64_expand_sve_vec_cmp_float (rtx, rtx_code, rtx, rtx, bool); +void aarch64_expand_sve_vec_cmp_float (rtx, rtx_code, rtx, rtx); bool aarch64_prepare_sve_int_fma (rtx *, rtx_code); bool aarch64_prepare_sve_cond_int_fma (rtx *, rtx_code); diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md index e975286..a93bc46 100644 --- a/gcc/config/aarch64/aarch64-sve.md +++ b/gcc/config/aarch64/aarch64-sve.md @@ -8495,7 +8495,7 @@ "TARGET_SVE" { aarch64_expand_sve_vec_cmp_float (operands[0], GET_CODE (operands[1]), - operands[2], operands[3], false); + operands[2], operands[3]); DONE; } ) diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc index f5f23f6..fe76730 100644 --- a/gcc/config/aarch64/aarch64.cc +++ b/gcc/config/aarch64/aarch64.cc @@ -26877,14 +26877,10 @@ aarch64_emit_sve_invert_fp_cond (rtx target, rtx_code code, rtx pred, /* Expand an SVE floating-point comparison using the SVE equivalent of: - (set TARGET (CODE OP0 OP1)) - - If CAN_INVERT_P is true, the caller can also handle inverted results; - return true if the result is in fact inverted. */ + (set TARGET (CODE OP0 OP1)). */ -bool -aarch64_expand_sve_vec_cmp_float (rtx target, rtx_code code, - rtx op0, rtx op1, bool can_invert_p) +void +aarch64_expand_sve_vec_cmp_float (rtx target, rtx_code code, rtx op0, rtx op1) { machine_mode pred_mode = GET_MODE (target); machine_mode data_mode = GET_MODE (op0); @@ -26902,16 +26898,14 @@ aarch64_expand_sve_vec_cmp_float (rtx target, rtx_code code, case GE: case EQ: case NE: - { - /* There is native support for the comparison. */ - aarch64_emit_sve_fp_cond (target, code, ptrue, true, op0, op1); - return false; - } + /* There is native support for the comparison. */ + aarch64_emit_sve_fp_cond (target, code, ptrue, true, op0, op1); + return; case LTGT: /* This is a trapping operation (LT or GT). */ aarch64_emit_sve_or_fp_conds (target, LT, GT, ptrue, true, op0, op1); - return false; + return; case UNEQ: if (!flag_trapping_math) @@ -26920,7 +26914,7 @@ aarch64_expand_sve_vec_cmp_float (rtx target, rtx_code code, op1 = force_reg (data_mode, op1); aarch64_emit_sve_or_fp_conds (target, UNORDERED, EQ, ptrue, true, op0, op1); - return false; + return; } /* fall through */ case UNLT: @@ -26941,15 +26935,9 @@ aarch64_expand_sve_vec_cmp_float (rtx target, rtx_code code, code = NE; else code = reverse_condition_maybe_unordered (code); - if (can_invert_p) - { - aarch64_emit_sve_fp_cond (target, code, - ordered, false, op0, op1); - return true; - } aarch64_emit_sve_invert_fp_cond (target, code, ordered, false, op0, op1); - return false; + return; } break; @@ -26964,13 +26952,7 @@ aarch64_expand_sve_vec_cmp_float (rtx target, rtx_code code, /* There is native support for the inverse comparison. */ code = reverse_condition_maybe_unordered (code); - if (can_invert_p) - { - aarch64_emit_sve_fp_cond (target, code, ptrue, true, op0, op1); - return true; - } aarch64_emit_sve_invert_fp_cond (target, code, ptrue, true, op0, op1); - return false; } /* Return true if: |