aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKyrylo Tkachov <kyrylo.tkachov@arm.com>2015-04-30 13:36:22 +0000
committerKyrylo Tkachov <ktkachov@gcc.gnu.org>2015-04-30 13:36:22 +0000
commitd2ac256bc78dd0360c2f56fc1471c118689216bf (patch)
treeab33f1979b16158007618ce80acd46a1b3b634ef
parentec3fba517279402b4686129c23af1d34f23b5ce0 (diff)
downloadgcc-d2ac256bc78dd0360c2f56fc1471c118689216bf.zip
gcc-d2ac256bc78dd0360c2f56fc1471c118689216bf.tar.gz
gcc-d2ac256bc78dd0360c2f56fc1471c118689216bf.tar.bz2
[AArch64] Properly cost MNEG/[SU]MNEGL patterns
* config/aarch64/aarch64.c (aarch64_rtx_mult_cost): Handle MNEG and [SU]MNEGL patterns. From-SVN: r222627
-rw-r--r--gcc/ChangeLog5
-rw-r--r--gcc/config/aarch64/aarch64.c15
2 files changed, 17 insertions, 3 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 4eacfc9..8cf8854 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,10 @@
2015-04-30 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+ * config/aarch64/aarch64.c (aarch64_rtx_mult_cost): Handle MNEG
+ and [SU]MNEGL patterns.
+
+2015-04-30 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
* config/aarch64/aarch64.c (aarch64_shift_p): New function.
(aarch64_rtx_mult_cost): Update comment to reflect that it also handles
combined arithmetic-shift ops. Properly handle all shift and extend
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 3be7b41..7d7f890 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -5229,6 +5229,15 @@ aarch64_rtx_mult_cost (rtx x, int code, int outer, bool speed)
return cost;
}
+ /* MNEG or [US]MNEGL. Extract the NEG operand and indicate that it's a
+ compound and let the below cases handle it. After all, MNEG is a
+ special-case alias of MSUB. */
+ if (GET_CODE (op0) == NEG)
+ {
+ op0 = XEXP (op0, 0);
+ compound_p = true;
+ }
+
/* Integer multiplies or FMAs have zero/sign extending variants. */
if ((GET_CODE (op0) == ZERO_EXTEND
&& GET_CODE (op1) == ZERO_EXTEND)
@@ -5241,7 +5250,7 @@ aarch64_rtx_mult_cost (rtx x, int code, int outer, bool speed)
if (speed)
{
if (compound_p)
- /* MADD/SMADDL/UMADDL. */
+ /* SMADDL/UMADDL/UMSUBL/SMSUBL. */
cost += extra_cost->mult[0].extend_add;
else
/* MUL/SMULL/UMULL. */
@@ -5251,7 +5260,7 @@ aarch64_rtx_mult_cost (rtx x, int code, int outer, bool speed)
return cost;
}
- /* This is either an integer multiply or an FMA. In both cases
+ /* This is either an integer multiply or a MADD. In both cases
we want to recurse and cost the operands. */
cost += rtx_cost (op0, MULT, 0, speed)
+ rtx_cost (op1, MULT, 1, speed);
@@ -5259,7 +5268,7 @@ aarch64_rtx_mult_cost (rtx x, int code, int outer, bool speed)
if (speed)
{
if (compound_p)
- /* MADD. */
+ /* MADD/MSUB. */
cost += extra_cost->mult[mode == DImode].add;
else
/* MUL. */