diff options
-rw-r--r-- | gcc/ChangeLog | 7 | ||||
-rw-r--r-- | gcc/config/i386/x86-tune.def | 4 |
2 files changed, 9 insertions, 2 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 3cdf247..6667b33 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,5 +1,12 @@ 2013-11-12 H.J. Lu <hongjiu.lu@intel.com> + PR target/59088 + * config/i386/x86-tune.def (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL): + Set for m_HASWELL. + (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL): Set for m_HASWELL. + +2013-11-12 H.J. Lu <hongjiu.lu@intel.com> + PR target/59084 * config/i386/i386.c (ix86_option_override_internal): Check X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL and diff --git a/gcc/config/i386/x86-tune.def b/gcc/config/i386/x86-tune.def index 54867d2..4c13c3a 100644 --- a/gcc/config/i386/x86-tune.def +++ b/gcc/config/i386/x86-tune.def @@ -318,12 +318,12 @@ DEF_TUNE (X86_TUNE_GENERAL_REGS_SSE_SPILL, "general_regs_sse_spill", /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL: Use movups for misaligned loads instead of a sequence loading registers by parts. */ DEF_TUNE (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL, "sse_unaligned_load_optimal", - m_COREI7 | m_COREI7_AVX | m_AMDFAM10 | m_BDVER | m_BTVER | m_SLM | m_GENERIC) + m_COREI7 | m_COREI7_AVX | m_HASWELL | m_AMDFAM10 | m_BDVER | m_BTVER | m_SLM | m_GENERIC) /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL: Use movups for misaligned stores instead of a sequence loading registers by parts. */ DEF_TUNE (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL, "sse_unaligned_store_optimal", - m_COREI7 | m_COREI7_AVX | m_BDVER | m_SLM | m_GENERIC) + m_COREI7 | m_COREI7_AVX | m_HASWELL | m_BDVER | m_SLM | m_GENERIC) /* Use packed single precision instructions where posisble. I.e. movups instead of movupd. */ |