aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorBin Cheng <bin.cheng@arm.com>2015-12-04 03:26:19 +0000
committerBin Cheng <amker@gcc.gnu.org>2015-12-04 03:26:19 +0000
commite8426e0a22212f9b355364186c9d1748a38b0549 (patch)
treedb11a2a168f632b1beed0eba339c7cb492f9b486 /gcc
parent72cc0e580391b320b96f9fbdaf1c26f905a4058f (diff)
downloadgcc-e8426e0a22212f9b355364186c9d1748a38b0549.zip
gcc-e8426e0a22212f9b355364186c9d1748a38b0549.tar.gz
gcc-e8426e0a22212f9b355364186c9d1748a38b0549.tar.bz2
aarch64.c (aarch64_legitimize_address): legitimize address expressions like Ra + Rb + CONST and Ra + Rb<<SCALE + CONST.
* config/aarch64/aarch64.c (aarch64_legitimize_address): legitimize address expressions like Ra + Rb + CONST and Ra + Rb<<SCALE + CONST. Co-Authored-By: Jiong Wang <jiong.wang@arm.com> From-SVN: r231244
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog6
-rw-r--r--gcc/config/aarch64/aarch64.c64
2 files changed, 69 insertions, 1 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 900119a..977f7ae 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,9 @@
+2015-12-04 Bin Cheng <bin.cheng@arm.com>
+ Jiong Wang <jiong.wang@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_legitimize_address): legitimize
+ address expressions like Ra + Rb + CONST and Ra + Rb<<SCALE + CONST.
+
2015-12-03 Jan Hubicka <hubicka@ucw.cz>
* alias.c (alias_set_subset_of, alias_sets_must_conflict_p)
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index bfbfc2a..191ad6d 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -4823,13 +4823,75 @@ aarch64_legitimize_address (rtx x, rtx /* orig_x */, machine_mode mode)
We try to pick as large a range for the offset as possible to
maximize the chance of a CSE. However, for aligned addresses
we limit the range to 4k so that structures with different sized
- elements are likely to use the same base. */
+ elements are likely to use the same base. We need to be careful
+ not to split a CONST for some forms of address expression, otherwise
+ it will generate sub-optimal code. */
if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
{
HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
HOST_WIDE_INT base_offset;
+ if (GET_CODE (XEXP (x, 0)) == PLUS)
+ {
+ rtx op0 = XEXP (XEXP (x, 0), 0);
+ rtx op1 = XEXP (XEXP (x, 0), 1);
+
+ /* Address expressions of the form Ra + Rb + CONST.
+
+ If CONST is within the range supported by the addressing
+ mode "reg+offset", do not split CONST and use the
+ sequence
+ Rt = Ra + Rb;
+ addr = Rt + CONST. */
+ if (REG_P (op0) && REG_P (op1))
+ {
+ machine_mode addr_mode = GET_MODE (x);
+ rtx base = gen_reg_rtx (addr_mode);
+ rtx addr = plus_constant (addr_mode, base, offset);
+
+ if (aarch64_legitimate_address_hook_p (mode, addr, false))
+ {
+ emit_insn (gen_adddi3 (base, op0, op1));
+ return addr;
+ }
+ }
+ /* Address expressions of the form Ra + Rb<<SCALE + CONST.
+
+ If Reg + Rb<<SCALE is a valid address expression, do not
+ split CONST and use the sequence
+ Rc = CONST;
+ Rt = Ra + Rc;
+ addr = Rt + Rb<<SCALE.
+
+ Here we split CONST out of memory referece because:
+ a) We depend on GIMPLE optimizers to pick up common sub
+ expression involving the scaling operation.
+ b) The index Rb is likely a loop iv, it's better to split
+ the CONST so that computation of new base Rt is a loop
+ invariant and can be moved out of loop. This is more
+ important when the original base Ra is sfp related. */
+ else if (REG_P (op0) || REG_P (op1))
+ {
+ machine_mode addr_mode = GET_MODE (x);
+ rtx base = gen_reg_rtx (addr_mode);
+
+ /* Switch to make sure that register is in op0. */
+ if (REG_P (op1))
+ std::swap (op0, op1);
+
+ rtx addr = gen_rtx_PLUS (addr_mode, op1, base);
+
+ if (aarch64_legitimate_address_hook_p (mode, addr, false))
+ {
+ base = force_operand (plus_constant (addr_mode,
+ op0, offset),
+ NULL_RTX);
+ return gen_rtx_PLUS (addr_mode, op1, base);
+ }
+ }
+ }
+
/* Does it look like we'll need a load/store-pair operation? */
if (GET_MODE_SIZE (mode) > 16
|| mode == TImode)