aboutsummaryrefslogtreecommitdiff
path: root/gcc/expr.c
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@linaro.org>2017-09-12 13:27:55 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2017-09-12 13:27:55 +0000
commite0bd6c9f0aa67d88bbb20019362a4572fc5fac3c (patch)
tree3ca0e90f453bcc3361eb758707d8da86a656477f /gcc/expr.c
parent41defab318e4b5d8b87ba2b3512b02cb49c748a9 (diff)
downloadgcc-e0bd6c9f0aa67d88bbb20019362a4572fc5fac3c.zip
gcc-e0bd6c9f0aa67d88bbb20019362a4572fc5fac3c.tar.gz
gcc-e0bd6c9f0aa67d88bbb20019362a4572fc5fac3c.tar.bz2
Turn SLOW_UNALIGNED_ACCESS into a target hook
2017-09-12 Richard Sandiford <richard.sandiford@linaro.org> Alan Hayward <alan.hayward@arm.com> David Sherwood <david.sherwood@arm.com> gcc/ * defaults.h (SLOW_UNALIGNED_ACCESS): Delete. * target.def (slow_unaligned_access): New hook. * targhooks.h (default_slow_unaligned_access): Declare. * targhooks.c (default_slow_unaligned_access): New function. * doc/tm.texi.in (SLOW_UNALIGNED_ACCESS): Replace with... (TARGET_SLOW_UNALIGNED_ACCESS): ...this. * doc/tm.texi: Regenerate. * config/alpha/alpha.h (SLOW_UNALIGNED_ACCESS): Delete. * config/arm/arm.h (SLOW_UNALIGNED_ACCESS): Delete. * config/i386/i386.h (SLOW_UNALIGNED_ACCESS): Delete commented-out definition. * config/powerpcspe/powerpcspe.h (SLOW_UNALIGNED_ACCESS): Delete. * config/powerpcspe/powerpcspe.c (TARGET_SLOW_UNALIGNED_ACCESS): Redefine. (rs6000_slow_unaligned_access): New function. (rs6000_emit_move): Use it instead of SLOW_UNALIGNED_ACCESS. (expand_block_compare): Likewise. (expand_strn_compare): Likewise. (rs6000_rtx_costs): Likewise. * config/riscv/riscv.h (SLOW_UNALIGNED_ACCESS): Delete. (riscv_slow_unaligned_access): Likewise. * config/riscv/riscv.c (riscv_slow_unaligned_access): Rename to... (riscv_slow_unaligned_access_p): ...this and make static. (riscv_option_override): Update accordingly. (riscv_slow_unaligned_access): New function. (TARGET_SLOW_UNALIGNED_ACCESS): Redefine. * config/rs6000/rs6000.h (SLOW_UNALIGNED_ACCESS): Delete. * config/rs6000/rs6000.c (TARGET_SLOW_UNALIGNED_ACCESS): Redefine. (rs6000_slow_unaligned_access): New function. (rs6000_emit_move): Use it instead of SLOW_UNALIGNED_ACCESS. (rs6000_rtx_costs): Likewise. * config/rs6000/rs6000-string.c (expand_block_compare) (expand_strn_compare): Use targetm.slow_unaligned_access instead of SLOW_UNALIGNED_ACCESS. * config/tilegx/tilegx.h (SLOW_UNALIGNED_ACCESS): Delete. * config/tilepro/tilepro.h (SLOW_UNALIGNED_ACCESS): Delete. * calls.c (expand_call): Use targetm.slow_unaligned_access instead of SLOW_UNALIGNED_ACCESS. * expmed.c (simple_mem_bitfield_p): Likewise. * expr.c (alignment_for_piecewise_move): Likewise. (emit_group_load_1): Likewise. (emit_group_store): Likewise. (copy_blkmode_from_reg): Likewise. (emit_push_insn): Likewise. (expand_assignment): Likewise. (store_field): Likewise. (expand_expr_real_1): Likewise. * gimple-fold.c (gimple_fold_builtin_memory_op): Likewise. * lra-constraints.c (simplify_operand_subreg): Likewise. * stor-layout.c (bit_field_mode_iterator::next_mode): Likewise. * gimple-ssa-store-merging.c: Likewise in block comment at start of file. * tree-ssa-strlen.c: Include target.h. (handle_builtin_memcmp): Use targetm.slow_unaligned_access instead of SLOW_UNALIGNED_ACCESS. * system.h (SLOW_UNALIGNED_ACCESS): Poison. Co-Authored-By: Alan Hayward <alan.hayward@arm.com> Co-Authored-By: David Sherwood <david.sherwood@arm.com> From-SVN: r252009
Diffstat (limited to 'gcc/expr.c')
-rw-r--r--gcc/expr.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/gcc/expr.c b/gcc/expr.c
index c97f4a6..989badc 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -730,7 +730,7 @@ alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align)
{
tmode = mode_iter.require ();
if (GET_MODE_SIZE (tmode) > max_pieces
- || SLOW_UNALIGNED_ACCESS (tmode, align))
+ || targetm.slow_unaligned_access (tmode, align))
break;
xmode = tmode;
}
@@ -2179,7 +2179,7 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize)
/* Optimize the access just a bit. */
if (MEM_P (src)
- && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (src))
+ && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src))
|| MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
&& bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
&& bytelen == GET_MODE_SIZE (mode))
@@ -2584,7 +2584,7 @@ emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize)
/* Optimize the access just a bit. */
else if (MEM_P (dest)
- && (!SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (dest))
+ && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (dest))
|| MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
&& bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
&& bytelen == GET_MODE_SIZE (mode))
@@ -2653,7 +2653,7 @@ copy_blkmode_from_reg (rtx target, rtx srcreg, tree type)
/* We can use a single move if we have an exact mode for the size. */
else if (MEM_P (target)
- && (!SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (target))
+ && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (target))
|| MEM_ALIGN (target) >= GET_MODE_ALIGNMENT (mode))
&& bytes == GET_MODE_SIZE (mode))
{
@@ -4348,7 +4348,7 @@ emit_push_insn (rtx x, machine_mode mode, tree type, rtx size,
/* Here we avoid the case of a structure whose weak alignment
forces many pushes of a small amount of data,
and such small pushes do rounding that causes trouble. */
- && ((! SLOW_UNALIGNED_ACCESS (word_mode, align))
+ && ((!targetm.slow_unaligned_access (word_mode, align))
|| align >= BIGGEST_ALIGNMENT
|| (PUSH_ROUNDING (align / BITS_PER_UNIT)
== (align / BITS_PER_UNIT)))
@@ -4947,7 +4947,7 @@ expand_assignment (tree to, tree from, bool nontemporal)
< GET_MODE_ALIGNMENT (mode))
&& (((icode = optab_handler (movmisalign_optab, mode))
!= CODE_FOR_nothing)
- || SLOW_UNALIGNED_ACCESS (mode, align)))
+ || targetm.slow_unaligned_access (mode, align)))
{
rtx reg, mem;
@@ -6783,7 +6783,7 @@ store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
|| (mode != BLKmode
&& ((((MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode))
|| bitpos % GET_MODE_ALIGNMENT (mode))
- && SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (target)))
+ && targetm.slow_unaligned_access (mode, MEM_ALIGN (target)))
|| (bitpos % BITS_PER_UNIT != 0)))
|| (bitsize >= 0 && mode != BLKmode
&& GET_MODE_BITSIZE (mode) > bitsize)
@@ -10229,7 +10229,7 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
expand_insn (icode, 2, ops);
temp = ops[0].value;
}
- else if (SLOW_UNALIGNED_ACCESS (mode, align))
+ else if (targetm.slow_unaligned_access (mode, align))
temp = extract_bit_field (temp, GET_MODE_BITSIZE (mode),
0, TYPE_UNSIGNED (TREE_TYPE (exp)),
(modifier == EXPAND_STACK_PARM
@@ -10663,7 +10663,8 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
&& ((modifier == EXPAND_CONST_ADDRESS
|| modifier == EXPAND_INITIALIZER)
? STRICT_ALIGNMENT
- : SLOW_UNALIGNED_ACCESS (mode1, MEM_ALIGN (op0))))
+ : targetm.slow_unaligned_access (mode1,
+ MEM_ALIGN (op0))))
|| (bitpos % BITS_PER_UNIT != 0)))
/* If the type and the field are a constant size and the
size of the type isn't the same size as the bitfield,