aboutsummaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@linaro.org>2017-09-12 13:27:55 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2017-09-12 13:27:55 +0000
commite0bd6c9f0aa67d88bbb20019362a4572fc5fac3c (patch)
tree3ca0e90f453bcc3361eb758707d8da86a656477f /gcc/config
parent41defab318e4b5d8b87ba2b3512b02cb49c748a9 (diff)
downloadgcc-e0bd6c9f0aa67d88bbb20019362a4572fc5fac3c.zip
gcc-e0bd6c9f0aa67d88bbb20019362a4572fc5fac3c.tar.gz
gcc-e0bd6c9f0aa67d88bbb20019362a4572fc5fac3c.tar.bz2
Turn SLOW_UNALIGNED_ACCESS into a target hook
2017-09-12 Richard Sandiford <richard.sandiford@linaro.org> Alan Hayward <alan.hayward@arm.com> David Sherwood <david.sherwood@arm.com> gcc/ * defaults.h (SLOW_UNALIGNED_ACCESS): Delete. * target.def (slow_unaligned_access): New hook. * targhooks.h (default_slow_unaligned_access): Declare. * targhooks.c (default_slow_unaligned_access): New function. * doc/tm.texi.in (SLOW_UNALIGNED_ACCESS): Replace with... (TARGET_SLOW_UNALIGNED_ACCESS): ...this. * doc/tm.texi: Regenerate. * config/alpha/alpha.h (SLOW_UNALIGNED_ACCESS): Delete. * config/arm/arm.h (SLOW_UNALIGNED_ACCESS): Delete. * config/i386/i386.h (SLOW_UNALIGNED_ACCESS): Delete commented-out definition. * config/powerpcspe/powerpcspe.h (SLOW_UNALIGNED_ACCESS): Delete. * config/powerpcspe/powerpcspe.c (TARGET_SLOW_UNALIGNED_ACCESS): Redefine. (rs6000_slow_unaligned_access): New function. (rs6000_emit_move): Use it instead of SLOW_UNALIGNED_ACCESS. (expand_block_compare): Likewise. (expand_strn_compare): Likewise. (rs6000_rtx_costs): Likewise. * config/riscv/riscv.h (SLOW_UNALIGNED_ACCESS): Delete. (riscv_slow_unaligned_access): Likewise. * config/riscv/riscv.c (riscv_slow_unaligned_access): Rename to... (riscv_slow_unaligned_access_p): ...this and make static. (riscv_option_override): Update accordingly. (riscv_slow_unaligned_access): New function. (TARGET_SLOW_UNALIGNED_ACCESS): Redefine. * config/rs6000/rs6000.h (SLOW_UNALIGNED_ACCESS): Delete. * config/rs6000/rs6000.c (TARGET_SLOW_UNALIGNED_ACCESS): Redefine. (rs6000_slow_unaligned_access): New function. (rs6000_emit_move): Use it instead of SLOW_UNALIGNED_ACCESS. (rs6000_rtx_costs): Likewise. * config/rs6000/rs6000-string.c (expand_block_compare) (expand_strn_compare): Use targetm.slow_unaligned_access instead of SLOW_UNALIGNED_ACCESS. * config/tilegx/tilegx.h (SLOW_UNALIGNED_ACCESS): Delete. * config/tilepro/tilepro.h (SLOW_UNALIGNED_ACCESS): Delete. * calls.c (expand_call): Use targetm.slow_unaligned_access instead of SLOW_UNALIGNED_ACCESS. * expmed.c (simple_mem_bitfield_p): Likewise. * expr.c (alignment_for_piecewise_move): Likewise. (emit_group_load_1): Likewise. (emit_group_store): Likewise. (copy_blkmode_from_reg): Likewise. (emit_push_insn): Likewise. (expand_assignment): Likewise. (store_field): Likewise. (expand_expr_real_1): Likewise. * gimple-fold.c (gimple_fold_builtin_memory_op): Likewise. * lra-constraints.c (simplify_operand_subreg): Likewise. * stor-layout.c (bit_field_mode_iterator::next_mode): Likewise. * gimple-ssa-store-merging.c: Likewise in block comment at start of file. * tree-ssa-strlen.c: Include target.h. (handle_builtin_memcmp): Use targetm.slow_unaligned_access instead of SLOW_UNALIGNED_ACCESS. * system.h (SLOW_UNALIGNED_ACCESS): Poison. Co-Authored-By: Alan Hayward <alan.hayward@arm.com> Co-Authored-By: David Sherwood <david.sherwood@arm.com> From-SVN: r252009
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/alpha/alpha.h6
-rw-r--r--gcc/config/arm/arm.h2
-rw-r--r--gcc/config/i386/i386.h14
-rw-r--r--gcc/config/powerpcspe/powerpcspe.c47
-rw-r--r--gcc/config/powerpcspe/powerpcspe.h14
-rw-r--r--gcc/config/riscv/riscv.c17
-rw-r--r--gcc/config/riscv/riscv.h3
-rw-r--r--gcc/config/rs6000/rs6000-string.c13
-rw-r--r--gcc/config/rs6000/rs6000.c34
-rw-r--r--gcc/config/rs6000/rs6000.h13
-rw-r--r--gcc/config/tilegx/tilegx.h3
-rw-r--r--gcc/config/tilepro/tilepro.h3
12 files changed, 80 insertions, 89 deletions
diff --git a/gcc/config/alpha/alpha.h b/gcc/config/alpha/alpha.h
index 8bca5b3..7d81ac4 100644
--- a/gcc/config/alpha/alpha.h
+++ b/gcc/config/alpha/alpha.h
@@ -300,12 +300,6 @@ extern enum alpha_fp_trap_mode alpha_fptm;
#define STRICT_ALIGNMENT 1
-/* Set this nonzero if unaligned move instructions are extremely slow.
-
- On the Alpha, they trap. */
-
-#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 1
-
/* Standard register usage. */
/* Number of actual hardware registers.
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index 5fdb65b..bef6602 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -1917,8 +1917,6 @@ enum arm_auto_incmodes
/* Nonzero if access to memory by bytes is slow and undesirable. */
#define SLOW_BYTE_ACCESS 0
-#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 1
-
/* Immediate shift counts are truncated by the output routines (or was it
the assembler?). Shift counts in a register are truncated by ARM. Note
that the native compiler puts too large (> 32) immediate shift counts
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index 873122c..e8ed897 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -2017,20 +2017,6 @@ do { \
/* Nonzero if access to memory by shorts is slow and undesirable. */
#define SLOW_SHORT_ACCESS 0
-/* Define this macro to be the value 1 if unaligned accesses have a
- cost many times greater than aligned accesses, for example if they
- are emulated in a trap handler.
-
- When this macro is nonzero, the compiler will act as if
- `STRICT_ALIGNMENT' were nonzero when generating code for block
- moves. This can cause significantly more instructions to be
- produced. Therefore, do not set this macro nonzero if unaligned
- accesses only add a cycle or two to the time for a memory access.
-
- If the value of this macro is always zero, it need not be defined. */
-
-/* #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 0 */
-
/* Define this macro if it is as good or better to call a constant
function address than to call an address kept in a register.
diff --git a/gcc/config/powerpcspe/powerpcspe.c b/gcc/config/powerpcspe/powerpcspe.c
index b964e6e..446a8bb 100644
--- a/gcc/config/powerpcspe/powerpcspe.c
+++ b/gcc/config/powerpcspe/powerpcspe.c
@@ -1986,6 +1986,9 @@ static const struct attribute_spec rs6000_attribute_table[] =
#undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
#define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
rs6000_hard_regno_call_part_clobbered
+
+#undef TARGET_SLOW_UNALIGNED_ACCESS
+#define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
/* Processor table. */
@@ -8366,6 +8369,21 @@ rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
return align;
}
+/* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
+ instructions simply ignore the low bits; SPE vector memory
+ instructions trap on unaligned accesses; VSX memory instructions are
+ aligned to 4 or 8 bytes. */
+
+static bool
+rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
+{
+ return (STRICT_ALIGNMENT
+ || (!TARGET_EFFICIENT_UNALIGNED_VSX
+ && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
+ || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
+ && (int) align < VECTOR_ALIGN (mode)))));
+}
+
/* Previous GCC releases forced all vector types to have 16-byte alignment. */
bool
@@ -11015,13 +11033,14 @@ rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
if (GET_CODE (operands[0]) == MEM
&& GET_CODE (operands[1]) == MEM
&& mode == DImode
- && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
- || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
- && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
- ? 32 : MEM_ALIGN (operands[0])))
- || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
- ? 32
- : MEM_ALIGN (operands[1]))))
+ && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
+ || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
+ && ! (rs6000_slow_unaligned_access (SImode,
+ (MEM_ALIGN (operands[0]) > 32
+ ? 32 : MEM_ALIGN (operands[0])))
+ || rs6000_slow_unaligned_access (SImode,
+ (MEM_ALIGN (operands[1]) > 32
+ ? 32 : MEM_ALIGN (operands[1]))))
&& ! MEM_VOLATILE_P (operands [0])
&& ! MEM_VOLATILE_P (operands [1]))
{
@@ -19989,9 +20008,9 @@ expand_block_compare (rtx operands[])
unsigned int base_align = UINTVAL (align_rtx) / BITS_PER_UNIT;
- /* SLOW_UNALIGNED_ACCESS -- don't do unaligned stuff. */
- if (SLOW_UNALIGNED_ACCESS (word_mode, MEM_ALIGN (orig_src1))
- || SLOW_UNALIGNED_ACCESS (word_mode, MEM_ALIGN (orig_src2)))
+ /* rs6000_slow_unaligned_access -- don't do unaligned stuff. */
+ if (rs6000_slow_unaligned_access (word_mode, MEM_ALIGN (orig_src1))
+ || rs6000_slow_unaligned_access (word_mode, MEM_ALIGN (orig_src2)))
return false;
gcc_assert (GET_MODE (target) == SImode);
@@ -20380,9 +20399,9 @@ expand_strn_compare (rtx operands[], int no_length)
int align1 = MEM_ALIGN (orig_src1) / BITS_PER_UNIT;
int align2 = MEM_ALIGN (orig_src2) / BITS_PER_UNIT;
- /* SLOW_UNALIGNED_ACCESS -- don't do unaligned stuff. */
- if (SLOW_UNALIGNED_ACCESS (word_mode, align1)
- || SLOW_UNALIGNED_ACCESS (word_mode, align2))
+ /* rs6000_slow_unaligned_access -- don't do unaligned stuff. */
+ if (rs6000_slow_unaligned_access (word_mode, align1)
+ || rs6000_slow_unaligned_access (word_mode, align2))
return false;
gcc_assert (GET_MODE (target) == SImode);
@@ -37439,7 +37458,7 @@ rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
than generating address, e.g., (plus (reg) (const)).
L1 cache latency is about two instructions. */
*total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
- if (SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (x)))
+ if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
*total += COSTS_N_INSNS (100);
return true;
diff --git a/gcc/config/powerpcspe/powerpcspe.h b/gcc/config/powerpcspe/powerpcspe.h
index 548e615..a3b2347 100644
--- a/gcc/config/powerpcspe/powerpcspe.h
+++ b/gcc/config/powerpcspe/powerpcspe.h
@@ -998,20 +998,6 @@ enum data_align { align_abi, align_opt, align_both };
/* Nonzero if move instructions will actually fail to work
when given unaligned data. */
#define STRICT_ALIGNMENT 0
-
-/* Define this macro to be the value 1 if unaligned accesses have a cost
- many times greater than aligned accesses, for example if they are
- emulated in a trap handler. */
-/* Altivec vector memory instructions simply ignore the low bits; SPE vector
- memory instructions trap on unaligned accesses; VSX memory instructions are
- aligned to 4 or 8 bytes. */
-#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) \
- (STRICT_ALIGNMENT \
- || (!TARGET_EFFICIENT_UNALIGNED_VSX \
- && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) && (ALIGN) < 32) \
- || ((VECTOR_MODE_P (MODE) || FLOAT128_VECTOR_P (MODE)) \
- && (int) (ALIGN) < VECTOR_ALIGN (MODE)))))
-
/* Standard register usage. */
diff --git a/gcc/config/riscv/riscv.c b/gcc/config/riscv/riscv.c
index 62472e9..0e440f7 100644
--- a/gcc/config/riscv/riscv.c
+++ b/gcc/config/riscv/riscv.c
@@ -217,7 +217,7 @@ struct riscv_cpu_info {
/* Global variables for machine-dependent things. */
/* Whether unaligned accesses execute very slowly. */
-bool riscv_slow_unaligned_access;
+static bool riscv_slow_unaligned_access_p;
/* Which tuning parameters to use. */
static const struct riscv_tune_info *tune_info;
@@ -3744,8 +3744,8 @@ riscv_option_override (void)
/* Use -mtune's setting for slow_unaligned_access, even when optimizing
for size. For architectures that trap and emulate unaligned accesses,
the performance cost is too great, even for -Os. */
- riscv_slow_unaligned_access = (cpu->tune_info->slow_unaligned_access
- || TARGET_STRICT_ALIGN);
+ riscv_slow_unaligned_access_p = (cpu->tune_info->slow_unaligned_access
+ || TARGET_STRICT_ALIGN);
/* If the user hasn't specified a branch cost, use the processor's
default. */
@@ -3966,6 +3966,14 @@ riscv_cannot_copy_insn_p (rtx_insn *insn)
return recog_memoized (insn) >= 0 && get_attr_cannot_copy (insn);
}
+/* Implement TARGET_SLOW_UNALIGNED_ACCESS. */
+
+static bool
+riscv_slow_unaligned_access (machine_mode, unsigned int)
+{
+ return riscv_slow_unaligned_access_p;
+}
+
/* Initialize the GCC target structure. */
#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
@@ -4102,6 +4110,9 @@ riscv_cannot_copy_insn_p (rtx_insn *insn)
#undef TARGET_MODES_TIEABLE_P
#define TARGET_MODES_TIEABLE_P riscv_modes_tieable_p
+#undef TARGET_SLOW_UNALIGNED_ACCESS
+#define TARGET_SLOW_UNALIGNED_ACCESS riscv_slow_unaligned_access
+
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-riscv.h"
diff --git a/gcc/config/riscv/riscv.h b/gcc/config/riscv/riscv.h
index c0cf8c3..d851fd8 100644
--- a/gcc/config/riscv/riscv.h
+++ b/gcc/config/riscv/riscv.h
@@ -130,8 +130,6 @@ along with GCC; see the file COPYING3. If not see
of the privileged architecture. */
#define STRICT_ALIGNMENT TARGET_STRICT_ALIGN
-#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) riscv_slow_unaligned_access
-
/* Define this if you wish to imitate the way many other C compilers
handle alignment of bitfields and the structures that contain
them.
@@ -854,7 +852,6 @@ while (0)
#ifndef USED_FOR_TARGET
extern const enum reg_class riscv_regno_to_class[];
-extern bool riscv_slow_unaligned_access;
#endif
#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
diff --git a/gcc/config/rs6000/rs6000-string.c b/gcc/config/rs6000/rs6000-string.c
index fb57ab3..19463c9 100644
--- a/gcc/config/rs6000/rs6000-string.c
+++ b/gcc/config/rs6000/rs6000-string.c
@@ -32,6 +32,7 @@
#include "explow.h"
#include "expr.h"
#include "output.h"
+#include "target.h"
/* Expand a block clear operation, and return 1 if successful. Return 0
if we should let the compiler generate normal code.
@@ -338,9 +339,9 @@ expand_block_compare (rtx operands[])
unsigned int base_align = UINTVAL (align_rtx) / BITS_PER_UNIT;
- /* SLOW_UNALIGNED_ACCESS -- don't do unaligned stuff. */
- if (SLOW_UNALIGNED_ACCESS (word_mode, MEM_ALIGN (orig_src1))
- || SLOW_UNALIGNED_ACCESS (word_mode, MEM_ALIGN (orig_src2)))
+ /* targetm.slow_unaligned_access -- don't do unaligned stuff. */
+ if (targetm.slow_unaligned_access (word_mode, MEM_ALIGN (orig_src1))
+ || targetm.slow_unaligned_access (word_mode, MEM_ALIGN (orig_src2)))
return false;
gcc_assert (GET_MODE (target) == SImode);
@@ -729,9 +730,9 @@ expand_strn_compare (rtx operands[], int no_length)
int align1 = MEM_ALIGN (orig_src1) / BITS_PER_UNIT;
int align2 = MEM_ALIGN (orig_src2) / BITS_PER_UNIT;
- /* SLOW_UNALIGNED_ACCESS -- don't do unaligned stuff. */
- if (SLOW_UNALIGNED_ACCESS (word_mode, align1)
- || SLOW_UNALIGNED_ACCESS (word_mode, align2))
+ /* targetm.slow_unaligned_access -- don't do unaligned stuff. */
+ if (targetm.slow_unaligned_access (word_mode, align1)
+ || targetm.slow_unaligned_access (word_mode, align2))
return false;
gcc_assert (GET_MODE (target) == SImode);
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index ecdf776..2ff7e1e 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -1976,6 +1976,9 @@ static const struct attribute_spec rs6000_attribute_table[] =
#undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
#define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
rs6000_hard_regno_call_part_clobbered
+
+#undef TARGET_SLOW_UNALIGNED_ACCESS
+#define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
/* Processor table. */
@@ -7902,6 +7905,20 @@ rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
return align;
}
+/* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
+ instructions simply ignore the low bits; VSX memory instructions
+ are aligned to 4 or 8 bytes. */
+
+static bool
+rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
+{
+ return (STRICT_ALIGNMENT
+ || (!TARGET_EFFICIENT_UNALIGNED_VSX
+ && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
+ || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
+ && (int) align < VECTOR_ALIGN (mode)))));
+}
+
/* Previous GCC releases forced all vector types to have 16-byte alignment. */
bool
@@ -10500,13 +10517,14 @@ rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
if (GET_CODE (operands[0]) == MEM
&& GET_CODE (operands[1]) == MEM
&& mode == DImode
- && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
- || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
- && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
- ? 32 : MEM_ALIGN (operands[0])))
- || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
- ? 32
- : MEM_ALIGN (operands[1]))))
+ && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
+ || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
+ && ! (rs6000_slow_unaligned_access (SImode,
+ (MEM_ALIGN (operands[0]) > 32
+ ? 32 : MEM_ALIGN (operands[0])))
+ || rs6000_slow_unaligned_access (SImode,
+ (MEM_ALIGN (operands[1]) > 32
+ ? 32 : MEM_ALIGN (operands[1]))))
&& ! MEM_VOLATILE_P (operands [0])
&& ! MEM_VOLATILE_P (operands [1]))
{
@@ -34252,7 +34270,7 @@ rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
than generating address, e.g., (plus (reg) (const)).
L1 cache latency is about two instructions. */
*total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
- if (SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (x)))
+ if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
*total += COSTS_N_INSNS (100);
return true;
diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h
index 20f5b6a..4e2d0bb 100644
--- a/gcc/config/rs6000/rs6000.h
+++ b/gcc/config/rs6000/rs6000.h
@@ -968,19 +968,6 @@ enum data_align { align_abi, align_opt, align_both };
/* Nonzero if move instructions will actually fail to work
when given unaligned data. */
#define STRICT_ALIGNMENT 0
-
-/* Define this macro to be the value 1 if unaligned accesses have a cost
- many times greater than aligned accesses, for example if they are
- emulated in a trap handler. */
-/* Altivec vector memory instructions simply ignore the low bits; VSX memory
- instructions are aligned to 4 or 8 bytes. */
-#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) \
- (STRICT_ALIGNMENT \
- || (!TARGET_EFFICIENT_UNALIGNED_VSX \
- && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) && (ALIGN) < 32) \
- || ((VECTOR_MODE_P (MODE) || FLOAT128_VECTOR_P (MODE)) \
- && (int) (ALIGN) < VECTOR_ALIGN (MODE)))))
-
/* Standard register usage. */
diff --git a/gcc/config/tilegx/tilegx.h b/gcc/config/tilegx/tilegx.h
index 66cbd0d..bbeefa7 100644
--- a/gcc/config/tilegx/tilegx.h
+++ b/gcc/config/tilegx/tilegx.h
@@ -94,9 +94,6 @@
#define BIGGEST_FIELD_ALIGNMENT 128
#define WIDEST_HARDWARE_FP_SIZE 64
-/* Unaligned moves trap and are very slow. */
-#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 1
-
/* Make strings word-aligned so strcpy from constants will be
faster. */
#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
diff --git a/gcc/config/tilepro/tilepro.h b/gcc/config/tilepro/tilepro.h
index 599a7d8..221f32a 100644
--- a/gcc/config/tilepro/tilepro.h
+++ b/gcc/config/tilepro/tilepro.h
@@ -58,9 +58,6 @@
#define FASTEST_ALIGNMENT 32
#define BIGGEST_FIELD_ALIGNMENT 64
-/* Unaligned moves trap and are very slow. */
-#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 1
-
/* Make strings word-aligned so strcpy from constants will be
faster. */
#define CONSTANT_ALIGNMENT(EXP, ALIGN) \