aboutsummaryrefslogtreecommitdiff
path: root/gcc/lra-constraints.c
diff options
context:
space:
mode:
authorEric Botcazou <ebotcazou@adacore.com>2017-01-11 11:27:43 +0000
committerEric Botcazou <ebotcazou@gcc.gnu.org>2017-01-11 11:27:43 +0000
commit849fccf8312f734dddf4e3ea84eeabd2e243a10d (patch)
tree293439ba584fbb7053bd43b36c0c1d62e28925c7 /gcc/lra-constraints.c
parente325277522ec7a72286f1faa5bfb47f4b4be3d81 (diff)
downloadgcc-849fccf8312f734dddf4e3ea84eeabd2e243a10d.zip
gcc-849fccf8312f734dddf4e3ea84eeabd2e243a10d.tar.gz
gcc-849fccf8312f734dddf4e3ea84eeabd2e243a10d.tar.bz2
re PR rtl-optimization/79032 (unaligned memory access generated with LRA and optimization)
PR rtl-optimization/79032 * lra-constraints.c (simplify_operand_subreg): In the MEM case, test the alignment of the adjusted memory reference against that of MODE, instead of the alignment of the original memory reference. From-SVN: r244311
Diffstat (limited to 'gcc/lra-constraints.c')
-rw-r--r--gcc/lra-constraints.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/gcc/lra-constraints.c b/gcc/lra-constraints.c
index 5ada67a..7b0d2f4 100644
--- a/gcc/lra-constraints.c
+++ b/gcc/lra-constraints.c
@@ -1505,15 +1505,15 @@ simplify_operand_subreg (int nop, machine_mode reg_mode)
MEM_ADDR_SPACE (subst))))
{
/* If we change the address for a paradoxical subreg of memory, the
- address might violate the necessary alignment or the access might
- be slow. So take this into consideration. We need not worry
+ new address might violate the necessary alignment or the access
+ might be slow; take this into consideration. We need not worry
about accesses beyond allocated memory for paradoxical memory
subregs as we don't substitute such equiv memory (see processing
equivalences in function lra_constraints) and because for spilled
pseudos we allocate stack memory enough for the biggest
corresponding paradoxical subreg. */
- if (!(MEM_ALIGN (reg) < GET_MODE_ALIGNMENT (mode)
- && SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (reg)))
+ if (!(MEM_ALIGN (subst) < GET_MODE_ALIGNMENT (mode)
+ && SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (subst)))
|| (MEM_ALIGN (reg) < GET_MODE_ALIGNMENT (innermode)
&& SLOW_UNALIGNED_ACCESS (innermode, MEM_ALIGN (reg))))
return true;