diff options
author | Richard Sandiford <richard.sandiford@linaro.org> | 2017-09-12 13:27:55 +0000 |
---|---|---|
committer | Richard Sandiford <rsandifo@gcc.gnu.org> | 2017-09-12 13:27:55 +0000 |
commit | e0bd6c9f0aa67d88bbb20019362a4572fc5fac3c (patch) | |
tree | 3ca0e90f453bcc3361eb758707d8da86a656477f /gcc/config/powerpcspe/powerpcspe.c | |
parent | 41defab318e4b5d8b87ba2b3512b02cb49c748a9 (diff) | |
download | gcc-e0bd6c9f0aa67d88bbb20019362a4572fc5fac3c.zip gcc-e0bd6c9f0aa67d88bbb20019362a4572fc5fac3c.tar.gz gcc-e0bd6c9f0aa67d88bbb20019362a4572fc5fac3c.tar.bz2 |
Turn SLOW_UNALIGNED_ACCESS into a target hook
2017-09-12 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
gcc/
* defaults.h (SLOW_UNALIGNED_ACCESS): Delete.
* target.def (slow_unaligned_access): New hook.
* targhooks.h (default_slow_unaligned_access): Declare.
* targhooks.c (default_slow_unaligned_access): New function.
* doc/tm.texi.in (SLOW_UNALIGNED_ACCESS): Replace with...
(TARGET_SLOW_UNALIGNED_ACCESS): ...this.
* doc/tm.texi: Regenerate.
* config/alpha/alpha.h (SLOW_UNALIGNED_ACCESS): Delete.
* config/arm/arm.h (SLOW_UNALIGNED_ACCESS): Delete.
* config/i386/i386.h (SLOW_UNALIGNED_ACCESS): Delete commented-out
definition.
* config/powerpcspe/powerpcspe.h (SLOW_UNALIGNED_ACCESS): Delete.
* config/powerpcspe/powerpcspe.c (TARGET_SLOW_UNALIGNED_ACCESS):
Redefine.
(rs6000_slow_unaligned_access): New function.
(rs6000_emit_move): Use it instead of SLOW_UNALIGNED_ACCESS.
(expand_block_compare): Likewise.
(expand_strn_compare): Likewise.
(rs6000_rtx_costs): Likewise.
* config/riscv/riscv.h (SLOW_UNALIGNED_ACCESS): Delete.
(riscv_slow_unaligned_access): Likewise.
* config/riscv/riscv.c (riscv_slow_unaligned_access): Rename to...
(riscv_slow_unaligned_access_p): ...this and make static.
(riscv_option_override): Update accordingly.
(riscv_slow_unaligned_access): New function.
(TARGET_SLOW_UNALIGNED_ACCESS): Redefine.
* config/rs6000/rs6000.h (SLOW_UNALIGNED_ACCESS): Delete.
* config/rs6000/rs6000.c (TARGET_SLOW_UNALIGNED_ACCESS): Redefine.
(rs6000_slow_unaligned_access): New function.
(rs6000_emit_move): Use it instead of SLOW_UNALIGNED_ACCESS.
(rs6000_rtx_costs): Likewise.
* config/rs6000/rs6000-string.c (expand_block_compare)
(expand_strn_compare): Use targetm.slow_unaligned_access instead
of SLOW_UNALIGNED_ACCESS.
* config/tilegx/tilegx.h (SLOW_UNALIGNED_ACCESS): Delete.
* config/tilepro/tilepro.h (SLOW_UNALIGNED_ACCESS): Delete.
* calls.c (expand_call): Use targetm.slow_unaligned_access instead
of SLOW_UNALIGNED_ACCESS.
* expmed.c (simple_mem_bitfield_p): Likewise.
* expr.c (alignment_for_piecewise_move): Likewise.
(emit_group_load_1): Likewise.
(emit_group_store): Likewise.
(copy_blkmode_from_reg): Likewise.
(emit_push_insn): Likewise.
(expand_assignment): Likewise.
(store_field): Likewise.
(expand_expr_real_1): Likewise.
* gimple-fold.c (gimple_fold_builtin_memory_op): Likewise.
* lra-constraints.c (simplify_operand_subreg): Likewise.
* stor-layout.c (bit_field_mode_iterator::next_mode): Likewise.
* gimple-ssa-store-merging.c: Likewise in block comment at start
of file.
* tree-ssa-strlen.c: Include target.h.
(handle_builtin_memcmp): Use targetm.slow_unaligned_access instead
of SLOW_UNALIGNED_ACCESS.
* system.h (SLOW_UNALIGNED_ACCESS): Poison.
Co-Authored-By: Alan Hayward <alan.hayward@arm.com>
Co-Authored-By: David Sherwood <david.sherwood@arm.com>
From-SVN: r252009
Diffstat (limited to 'gcc/config/powerpcspe/powerpcspe.c')
-rw-r--r-- | gcc/config/powerpcspe/powerpcspe.c | 47 |
1 files changed, 33 insertions, 14 deletions
diff --git a/gcc/config/powerpcspe/powerpcspe.c b/gcc/config/powerpcspe/powerpcspe.c index b964e6e..446a8bb 100644 --- a/gcc/config/powerpcspe/powerpcspe.c +++ b/gcc/config/powerpcspe/powerpcspe.c @@ -1986,6 +1986,9 @@ static const struct attribute_spec rs6000_attribute_table[] = #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \ rs6000_hard_regno_call_part_clobbered + +#undef TARGET_SLOW_UNALIGNED_ACCESS +#define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access /* Processor table. */ @@ -8366,6 +8369,21 @@ rs6000_data_alignment (tree type, unsigned int align, enum data_align how) return align; } +/* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory + instructions simply ignore the low bits; SPE vector memory + instructions trap on unaligned accesses; VSX memory instructions are + aligned to 4 or 8 bytes. */ + +static bool +rs6000_slow_unaligned_access (machine_mode mode, unsigned int align) +{ + return (STRICT_ALIGNMENT + || (!TARGET_EFFICIENT_UNALIGNED_VSX + && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32) + || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode)) + && (int) align < VECTOR_ALIGN (mode))))); +} + /* Previous GCC releases forced all vector types to have 16-byte alignment. */ bool @@ -11015,13 +11033,14 @@ rs6000_emit_move (rtx dest, rtx source, machine_mode mode) if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM && mode == DImode - && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0])) - || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1]))) - && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32 - ? 32 : MEM_ALIGN (operands[0]))) - || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32 - ? 32 - : MEM_ALIGN (operands[1])))) + && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0])) + || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1]))) + && ! (rs6000_slow_unaligned_access (SImode, + (MEM_ALIGN (operands[0]) > 32 + ? 32 : MEM_ALIGN (operands[0]))) + || rs6000_slow_unaligned_access (SImode, + (MEM_ALIGN (operands[1]) > 32 + ? 32 : MEM_ALIGN (operands[1])))) && ! MEM_VOLATILE_P (operands [0]) && ! MEM_VOLATILE_P (operands [1])) { @@ -19989,9 +20008,9 @@ expand_block_compare (rtx operands[]) unsigned int base_align = UINTVAL (align_rtx) / BITS_PER_UNIT; - /* SLOW_UNALIGNED_ACCESS -- don't do unaligned stuff. */ - if (SLOW_UNALIGNED_ACCESS (word_mode, MEM_ALIGN (orig_src1)) - || SLOW_UNALIGNED_ACCESS (word_mode, MEM_ALIGN (orig_src2))) + /* rs6000_slow_unaligned_access -- don't do unaligned stuff. */ + if (rs6000_slow_unaligned_access (word_mode, MEM_ALIGN (orig_src1)) + || rs6000_slow_unaligned_access (word_mode, MEM_ALIGN (orig_src2))) return false; gcc_assert (GET_MODE (target) == SImode); @@ -20380,9 +20399,9 @@ expand_strn_compare (rtx operands[], int no_length) int align1 = MEM_ALIGN (orig_src1) / BITS_PER_UNIT; int align2 = MEM_ALIGN (orig_src2) / BITS_PER_UNIT; - /* SLOW_UNALIGNED_ACCESS -- don't do unaligned stuff. */ - if (SLOW_UNALIGNED_ACCESS (word_mode, align1) - || SLOW_UNALIGNED_ACCESS (word_mode, align2)) + /* rs6000_slow_unaligned_access -- don't do unaligned stuff. */ + if (rs6000_slow_unaligned_access (word_mode, align1) + || rs6000_slow_unaligned_access (word_mode, align2)) return false; gcc_assert (GET_MODE (target) == SImode); @@ -37439,7 +37458,7 @@ rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code, than generating address, e.g., (plus (reg) (const)). L1 cache latency is about two instructions. */ *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2); - if (SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (x))) + if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x))) *total += COSTS_N_INSNS (100); return true; |