diff options
-rw-r--r-- | gcc/ChangeLog | 36 | ||||
-rw-r--r-- | gcc/config.gcc | 22 | ||||
-rw-r--r-- | gcc/config/i386/driver-i386.c | 10 | ||||
-rw-r--r-- | gcc/config/i386/i386-c.c | 7 | ||||
-rw-r--r-- | gcc/config/i386/i386.c | 130 | ||||
-rw-r--r-- | gcc/config/i386/i386.h | 3 | ||||
-rw-r--r-- | gcc/config/i386/i386.md | 3 | ||||
-rw-r--r-- | gcc/config/i386/slm.md | 758 | ||||
-rw-r--r-- | libgcc/config/i386/cpuinfo.c | 1 |
9 files changed, 937 insertions, 33 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 6d540e7..8b27ecc 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,39 @@ +2013-05-30 Yuri Rumyantsev <yuri.s.rumyantsev@intel.com> + Igor Zamyatin <igor.zamyatin@intel.com> + + Silvermont (SLM) architecture pipeline model, tuning and + insn selection. + * config.gcc: Add slm config options and target. + + * config/i386/slm.md: New. + + * config/i386/driver-i386.c (host_detect_local_cpu): Check movbe. + + * gcc/config/i386/i386-c.c (ix86_target_macros_internal): New case + PROCESSOR_SLM. + (ix86_target_macros_internal): Likewise. + + * gcc/config/i386/i386.c (slm_cost): New cost. + (m_SLM): New macro flag. + (initial_ix86_tune_features): Set m_SLM. + (x86_accumulate_outgoing_args): Likewise. + (x86_arch_always_fancy_math_387): Likewise. + (processor_target_table): Add slm cost. + (cpu_names): Add slm cpu name. + (x86_option_override_internal): Set SLM ISA. + (ix86_issue_rate): New case PROCESSOR_SLM. + (ia32_multipass_dfa_lookahead): Likewise. + (fold_builtin_cpu): Add slm. + + * config/i386/i386.h (TARGET_SLM): New target macro. + (target_cpu_default): Add TARGET_CPU_DEFAULT_slm. + (processor_type): Add PROCESSOR_SLM. + + * config/i386/i386.md (cpu): Add new value "slm". + (slm.md): Include slm.md. + + * libgcc/config/i386/cpuinfo.c (INTEL_SLM): New enum value. + 2013-05-30 Bernd Schmidt <bernds@codesourcery.com> Zhenqiang Chen <zhenqiang.chen@linaro.org> diff --git a/gcc/config.gcc b/gcc/config.gcc index ef75fa5..a3dc2a9 100644 --- a/gcc/config.gcc +++ b/gcc/config.gcc @@ -1293,7 +1293,7 @@ i[34567]86-*-linux* | i[34567]86-*-kfreebsd*-gnu | i[34567]86-*-knetbsd*-gnu | i TM_MULTILIB_CONFIG=`echo $TM_MULTILIB_CONFIG | sed 's/^,//'` need_64bit_isa=yes case X"${with_cpu}" in - Xgeneric|Xatom|Xcore2|Xcorei7|Xcorei7-avx|Xnocona|Xx86-64|Xbdver3|Xbdver2|Xbdver1|Xbtver2|Xbtver1|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3) + Xgeneric|Xatom|Xslm|Xcore2|Xcorei7|Xcorei7-avx|Xnocona|Xx86-64|Xbdver3|Xbdver2|Xbdver1|Xbtver2|Xbtver1|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3) ;; X) if test x$with_cpu_64 = x; then @@ -1302,7 +1302,7 @@ i[34567]86-*-linux* | i[34567]86-*-kfreebsd*-gnu | i[34567]86-*-knetbsd*-gnu | i ;; *) echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2 - echo "generic atom core2 corei7 corei7-avx nocona x86-64 bdver3 bdver2 bdver1 btver2 btver1 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2 + echo "generic atom slm core2 corei7 corei7-avx nocona x86-64 bdver3 bdver2 bdver1 btver2 btver1 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2 exit 1 ;; esac @@ -1414,7 +1414,7 @@ i[34567]86-*-solaris2* | x86_64-*-solaris2.1[0-9]*) tmake_file="$tmake_file i386/t-sol2-64" need_64bit_isa=yes case X"${with_cpu}" in - Xgeneric|Xatom|Xcore2|Xcorei7|Xcorei7-avx|Xnocona|Xx86-64|Xbdver3|Xbdver2|Xbdver1|Xbtver2|Xbtver1|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3) + Xgeneric|Xatom|Xslm|Xcore2|Xcorei7|Xcorei7-avx|Xnocona|Xx86-64|Xbdver3|Xbdver2|Xbdver1|Xbtver2|Xbtver1|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3) ;; X) if test x$with_cpu_64 = x; then @@ -1423,7 +1423,7 @@ i[34567]86-*-solaris2* | x86_64-*-solaris2.1[0-9]*) ;; *) echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2 - echo "generic atom core2 corei7 corei7-avx nocona x86-64 bdver3 bdver2 bdver1 btver2 btver1 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2 + echo "generic atom slm core2 corei7 corei7-avx nocona x86-64 bdver3 bdver2 bdver1 btver2 btver1 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2 exit 1 ;; esac @@ -1496,7 +1496,7 @@ i[34567]86-*-mingw* | x86_64-*-mingw*) if test x$enable_targets = xall; then tm_defines="${tm_defines} TARGET_BI_ARCH=1" case X"${with_cpu}" in - Xgeneric|Xatom|Xcore2|Xcorei7|Xcorei7-avx|Xnocona|Xx86-64|Xbdver3|Xbdver2|Xbdver1|Xbtver2|Xbtver1|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3) + Xgeneric|Xatom|Xslm|Xcore2|Xcorei7|Xcorei7-avx|Xnocona|Xx86-64|Xbdver3|Xbdver2|Xbdver1|Xbtver2|Xbtver1|Xamdfam10|Xbarcelona|Xk8|Xopteron|Xathlon64|Xathlon-fx|Xathlon64-sse3|Xk8-sse3|Xopteron-sse3) ;; X) if test x$with_cpu_64 = x; then @@ -1505,7 +1505,7 @@ i[34567]86-*-mingw* | x86_64-*-mingw*) ;; *) echo "Unsupported CPU used in --with-cpu=$with_cpu, supported values:" 1>&2 - echo "generic atom core2 corei7 Xcorei7-avx nocona x86-64 bdver3 bdver2 bdver1 btver2 btver1 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2 + echo "generic atom slm core2 corei7 Xcorei7-avx nocona x86-64 bdver3 bdver2 bdver1 btver2 btver1 amdfam10 barcelona k8 opteron athlon64 athlon-fx athlon64-sse3 k8-sse3 opteron-sse3" 1>&2 exit 1 ;; esac @@ -2848,6 +2848,10 @@ case ${target} in arch=atom cpu=atom ;; + slm-*) + arch=slm + cpu=slm + ;; core2-*) arch=core2 cpu=core2 @@ -2919,6 +2923,10 @@ case ${target} in arch=atom cpu=atom ;; + slm-*) + arch=slm + cpu=slm + ;; core2-*) arch=core2 cpu=core2 @@ -3422,7 +3430,7 @@ case "${target}" in | k8 | k8-sse3 | athlon64 | athlon64-sse3 | opteron \ | opteron-sse3 | athlon-fx | bdver3 | bdver2 | bdver1 | btver2 \ | btver1 | amdfam10 | barcelona | nocona | core2 | corei7 \ - | corei7-avx | core-avx-i | core-avx2 | atom) + | corei7-avx | core-avx-i | core-avx2 | atom | slm) # OK ;; *) diff --git a/gcc/config/i386/driver-i386.c b/gcc/config/i386/driver-i386.c index e28f098..249c4cd 100644 --- a/gcc/config/i386/driver-i386.c +++ b/gcc/config/i386/driver-i386.c @@ -674,8 +674,14 @@ const char *host_detect_local_cpu (int argc, const char **argv) /* Assume Sandy Bridge. */ cpu = "corei7-avx"; else if (has_sse4_2) - /* Assume Core i7. */ - cpu = "corei7"; + { + if (has_movbe) + /* Assume SLM. */ + cpu = "slm"; + else + /* Assume Core i7. */ + cpu = "corei7"; + } else if (has_ssse3) { if (has_movbe) diff --git a/gcc/config/i386/i386-c.c b/gcc/config/i386/i386-c.c index 51fec84..ef1e1eb 100644 --- a/gcc/config/i386/i386-c.c +++ b/gcc/config/i386/i386-c.c @@ -149,6 +149,10 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag, def_or_undef (parse_in, "__atom"); def_or_undef (parse_in, "__atom__"); break; + case PROCESSOR_SLM: + def_or_undef (parse_in, "__slm"); + def_or_undef (parse_in, "__slm__"); + break; /* use PROCESSOR_max to not set/unset the arch macro. */ case PROCESSOR_max: break; @@ -241,6 +245,9 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag, case PROCESSOR_ATOM: def_or_undef (parse_in, "__tune_atom__"); break; + case PROCESSOR_SLM: + def_or_undef (parse_in, "__tune_slm__"); + break; case PROCESSOR_GENERIC32: case PROCESSOR_GENERIC64: break; diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index 9d05a7b..c37108b 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -1482,6 +1482,79 @@ struct processor_costs atom_cost = { 1, /* cond_not_taken_branch_cost. */ }; +static const +struct processor_costs slm_cost = { + COSTS_N_INSNS (1), /* cost of an add instruction */ + COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */ + COSTS_N_INSNS (1), /* variable shift costs */ + COSTS_N_INSNS (1), /* constant shift costs */ + {COSTS_N_INSNS (3), /* cost of starting multiply for QI */ + COSTS_N_INSNS (4), /* HI */ + COSTS_N_INSNS (3), /* SI */ + COSTS_N_INSNS (4), /* DI */ + COSTS_N_INSNS (2)}, /* other */ + 0, /* cost of multiply per each bit set */ + {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */ + COSTS_N_INSNS (26), /* HI */ + COSTS_N_INSNS (42), /* SI */ + COSTS_N_INSNS (74), /* DI */ + COSTS_N_INSNS (74)}, /* other */ + COSTS_N_INSNS (1), /* cost of movsx */ + COSTS_N_INSNS (1), /* cost of movzx */ + 8, /* "large" insn */ + 17, /* MOVE_RATIO */ + 4, /* cost for loading QImode using movzbl */ + {4, 4, 4}, /* cost of loading integer registers + in QImode, HImode and SImode. + Relative to reg-reg move (2). */ + {4, 4, 4}, /* cost of storing integer registers */ + 4, /* cost of reg,reg fld/fst */ + {12, 12, 12}, /* cost of loading fp registers + in SFmode, DFmode and XFmode */ + {6, 6, 8}, /* cost of storing fp registers + in SFmode, DFmode and XFmode */ + 2, /* cost of moving MMX register */ + {8, 8}, /* cost of loading MMX registers + in SImode and DImode */ + {8, 8}, /* cost of storing MMX registers + in SImode and DImode */ + 2, /* cost of moving SSE register */ + {8, 8, 8}, /* cost of loading SSE registers + in SImode, DImode and TImode */ + {8, 8, 8}, /* cost of storing SSE registers + in SImode, DImode and TImode */ + 5, /* MMX or SSE register to integer */ + 32, /* size of l1 cache. */ + 256, /* size of l2 cache. */ + 64, /* size of prefetch block */ + 6, /* number of parallel prefetches */ + 3, /* Branch cost */ + COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */ + COSTS_N_INSNS (8), /* cost of FMUL instruction. */ + COSTS_N_INSNS (20), /* cost of FDIV instruction. */ + COSTS_N_INSNS (8), /* cost of FABS instruction. */ + COSTS_N_INSNS (8), /* cost of FCHS instruction. */ + COSTS_N_INSNS (40), /* cost of FSQRT instruction. */ + {{libcall, {{11, loop, false}, {-1, rep_prefix_4_byte, false}}}, + {libcall, {{32, loop, false}, {64, rep_prefix_4_byte, false}, + {8192, rep_prefix_8_byte, false}, {-1, libcall, false}}}}, + {{libcall, {{8, loop, false}, {15, unrolled_loop, false}, + {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}}, + {libcall, {{24, loop, false}, {32, unrolled_loop, false}, + {8192, rep_prefix_8_byte, false}, {-1, libcall, false}}}}, + 1, /* scalar_stmt_cost. */ + 1, /* scalar load_cost. */ + 1, /* scalar_store_cost. */ + 1, /* vec_stmt_cost. */ + 1, /* vec_to_scalar_cost. */ + 1, /* scalar_to_vec_cost. */ + 1, /* vec_align_load_cost. */ + 2, /* vec_unalign_load_cost. */ + 1, /* vec_store_cost. */ + 3, /* cond_taken_branch_cost. */ + 1, /* cond_not_taken_branch_cost. */ +}; + /* Generic64 should produce code tuned for Nocona and K8. */ static const struct processor_costs generic64_cost = { @@ -1735,6 +1808,7 @@ const struct processor_costs *ix86_cost = &pentium_cost; #define m_HASWELL (1<<PROCESSOR_HASWELL) #define m_CORE_ALL (m_CORE2 | m_COREI7 | m_HASWELL) #define m_ATOM (1<<PROCESSOR_ATOM) +#define m_SLM (1<<PROCESSOR_SLM) #define m_GEODE (1<<PROCESSOR_GEODE) #define m_K6 (1<<PROCESSOR_K6) @@ -1778,7 +1852,7 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = { m_486 | m_PENT, /* X86_TUNE_UNROLL_STRLEN */ - m_486 | m_PENT | m_PPRO | m_ATOM | m_CORE_ALL | m_K6 | m_AMD_MULTIPLE | m_GENERIC, + m_486 | m_PENT | m_PPRO | m_ATOM | m_SLM | m_CORE_ALL | m_K6 | m_AMD_MULTIPLE | m_GENERIC, /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based on simulation result. But after P4 was made, no performance benefit @@ -1790,11 +1864,11 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = { ~m_386, /* X86_TUNE_USE_SAHF */ - m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER | m_GENERIC, + m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER | m_GENERIC, /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid partial dependencies. */ - m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_GEODE | m_AMD_MULTIPLE | m_GENERIC, + m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_GEODE | m_AMD_MULTIPLE | m_GENERIC, /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial register stalls on Generic32 compilation setting as well. However @@ -1817,13 +1891,13 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = { m_386 | m_486 | m_K6_GEODE, /* X86_TUNE_USE_SIMODE_FIOP */ - ~(m_PENT | m_PPRO | m_CORE_ALL | m_ATOM | m_AMD_MULTIPLE | m_GENERIC), + ~(m_PENT | m_PPRO | m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC), /* X86_TUNE_USE_MOV0 */ m_K6, /* X86_TUNE_USE_CLTD */ - ~(m_PENT | m_ATOM | m_K6), + ~(m_PENT | m_ATOM | m_SLM | m_K6), /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */ m_PENT4, @@ -1838,7 +1912,7 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = { ~(m_PENT | m_PPRO), /* X86_TUNE_PROMOTE_QIMODE */ - m_386 | m_486 | m_PENT | m_CORE_ALL | m_ATOM | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC, + m_386 | m_486 | m_PENT | m_CORE_ALL | m_ATOM | m_SLM | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC, /* X86_TUNE_FAST_PREFIX */ ~(m_386 | m_486 | m_PENT), @@ -1879,10 +1953,10 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = { /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred for DFmode copies */ - ~(m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_GEODE | m_AMD_MULTIPLE | m_ATOM | m_GENERIC), + ~(m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_GEODE | m_AMD_MULTIPLE | m_GENERIC), /* X86_TUNE_PARTIAL_REG_DEPENDENCY */ - m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_AMD_MULTIPLE | m_GENERIC, + m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC, /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a conflict here in between PPro/Pentium4 based chips that thread 128bit @@ -1893,13 +1967,13 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = { shows that disabling this option on P4 brings over 20% SPECfp regression, while enabling it on K8 brings roughly 2.4% regression that can be partly masked by careful scheduling of moves. */ - m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_AMDFAM10 | m_BDVER | m_GENERIC, + m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_AMDFAM10 | m_BDVER | m_GENERIC, /* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */ - m_COREI7 | m_AMDFAM10 | m_BDVER | m_BTVER, + m_COREI7 | m_AMDFAM10 | m_BDVER | m_BTVER | m_SLM, /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */ - m_COREI7 | m_BDVER, + m_COREI7 | m_BDVER | m_SLM, /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */ m_BDVER , @@ -1917,7 +1991,7 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = { m_PPRO | m_P4_NOCONA, /* X86_TUNE_MEMORY_MISMATCH_STALL */ - m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_AMD_MULTIPLE | m_GENERIC, + m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC, /* X86_TUNE_PROLOGUE_USING_MOVE */ m_PPRO | m_ATHLON_K8, @@ -1942,16 +2016,16 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = { /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more than 4 branch instructions in the 16 byte window. */ - m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_AMD_MULTIPLE | m_GENERIC, + m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC, /* X86_TUNE_SCHEDULE */ - m_PENT | m_PPRO | m_CORE_ALL | m_ATOM | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC, + m_PENT | m_PPRO | m_CORE_ALL | m_ATOM | m_SLM | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC, /* X86_TUNE_USE_BT */ - m_CORE_ALL | m_ATOM | m_AMD_MULTIPLE | m_GENERIC, + m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC, /* X86_TUNE_USE_INCDEC */ - ~(m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_GENERIC), + ~(m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_GENERIC), /* X86_TUNE_PAD_RETURNS */ m_CORE_ALL | m_AMD_MULTIPLE | m_GENERIC, @@ -1960,7 +2034,7 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = { m_ATOM, /* X86_TUNE_EXT_80387_CONSTANTS */ - m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_K6_GEODE | m_ATHLON_K8 | m_GENERIC, + m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_K6_GEODE | m_ATHLON_K8 | m_GENERIC, /* X86_TUNE_AVOID_VECTOR_DECODE */ m_CORE_ALL | m_K8 | m_GENERIC64, @@ -2005,7 +2079,7 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = { /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag will impact LEA instruction selection. */ - m_ATOM, + m_ATOM | m_SLM, /* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector instructions. */ @@ -2026,7 +2100,7 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = { /* X86_TUNE_REASSOC_FP_TO_PARALLEL: Try to produce parallel computations during reassociation of fp computation. */ - m_ATOM | m_HASWELL | m_BDVER1 | m_BDVER2, + m_ATOM | m_SLM | m_HASWELL | m_BDVER1 | m_BDVER2, /* X86_TUNE_GENERAL_REGS_SSE_SPILL: Try to spill general regs to SSE regs instead of memory. */ @@ -2060,10 +2134,10 @@ static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = { }; static const unsigned int x86_accumulate_outgoing_args - = m_PPRO | m_P4_NOCONA | m_ATOM | m_CORE_ALL | m_AMD_MULTIPLE | m_GENERIC; + = m_PPRO | m_P4_NOCONA | m_ATOM | m_SLM | m_CORE_ALL | m_AMD_MULTIPLE | m_GENERIC; static const unsigned int x86_arch_always_fancy_math_387 - = m_PENT | m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_AMD_MULTIPLE | m_GENERIC; + = m_PENT | m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC; static const unsigned int x86_avx256_split_unaligned_load = m_COREI7 | m_GENERIC; @@ -2458,7 +2532,8 @@ static const struct ptt processor_target_table[PROCESSOR_max] = {&bdver3_cost, 16, 10, 16, 7, 11}, {&btver1_cost, 16, 10, 16, 7, 11}, {&btver2_cost, 16, 10, 16, 7, 11}, - {&atom_cost, 16, 15, 16, 7, 16} + {&atom_cost, 16, 15, 16, 7, 16}, + {&slm_cost, 16, 15, 16, 7, 16} }; static const char *const cpu_names[TARGET_CPU_DEFAULT_max] = @@ -2479,6 +2554,7 @@ static const char *const cpu_names[TARGET_CPU_DEFAULT_max] = "corei7", "core-avx2", "atom", + "slm", "geode", "k6", "k6-2", @@ -2940,6 +3016,10 @@ ix86_option_override_internal (bool main_args_p) {"atom", PROCESSOR_ATOM, CPU_ATOM, PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE | PTA_FXSR}, + {"slm", PROCESSOR_SLM, CPU_SLM, + PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3 + | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_CX16 | PTA_MOVBE + | PTA_FXSR}, {"geode", PROCESSOR_GEODE, CPU_GEODE, PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE | PTA_PRFCHW}, {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX}, @@ -24199,6 +24279,7 @@ ix86_issue_rate (void) { case PROCESSOR_PENTIUM: case PROCESSOR_ATOM: + case PROCESSOR_SLM: case PROCESSOR_K6: case PROCESSOR_BTVER2: return 2; @@ -24466,6 +24547,7 @@ ia32_multipass_dfa_lookahead (void) case PROCESSOR_COREI7: case PROCESSOR_HASWELL: case PROCESSOR_ATOM: + case PROCESSOR_SLM: /* Generally, we want haifa-sched:max_issue() to look ahead as far as many instructions can be executed on a cycle, i.e., issue_rate. I wonder why tuning for many CPUs does not do this. */ @@ -24566,7 +24648,7 @@ ix86_sched_reorder(FILE *dump, int sched_verbose, rtx *ready, int *pn_ready, continue; if (pro != insn) index = -1; - } + } if (index >= 0) break; } @@ -29752,6 +29834,7 @@ fold_builtin_cpu (tree fndecl, tree *args) M_AMD, M_CPU_TYPE_START, M_INTEL_ATOM, + M_INTEL_SLM, M_INTEL_CORE2, M_INTEL_COREI7, M_AMDFAM10H, @@ -29778,6 +29861,7 @@ fold_builtin_cpu (tree fndecl, tree *args) {"amd", M_AMD}, {"intel", M_INTEL}, {"atom", M_INTEL_ATOM}, + {"slm", M_INTEL_SLM}, {"core2", M_INTEL_CORE2}, {"corei7", M_INTEL_COREI7}, {"nehalem", M_INTEL_COREI7_NEHALEM}, diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h index 6055b99..776582a 100644 --- a/gcc/config/i386/i386.h +++ b/gcc/config/i386/i386.h @@ -257,6 +257,7 @@ extern const struct processor_costs ix86_size_cost; #define TARGET_BTVER1 (ix86_tune == PROCESSOR_BTVER1) #define TARGET_BTVER2 (ix86_tune == PROCESSOR_BTVER2) #define TARGET_ATOM (ix86_tune == PROCESSOR_ATOM) +#define TARGET_SLM (ix86_tune == PROCESSOR_SLM) /* Feature tests against the various tunings. */ enum ix86_tune_indices { @@ -623,6 +624,7 @@ enum target_cpu_default TARGET_CPU_DEFAULT_corei7, TARGET_CPU_DEFAULT_haswell, TARGET_CPU_DEFAULT_atom, + TARGET_CPU_DEFAULT_slm, TARGET_CPU_DEFAULT_geode, TARGET_CPU_DEFAULT_k6, @@ -2131,6 +2133,7 @@ enum processor_type PROCESSOR_BTVER1, PROCESSOR_BTVER2, PROCESSOR_ATOM, + PROCESSOR_SLM, PROCESSOR_max }; diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md index ce77f15..28b0c78 100644 --- a/gcc/config/i386/i386.md +++ b/gcc/config/i386/i386.md @@ -323,7 +323,7 @@ ;; Processor type. (define_attr "cpu" "none,pentium,pentiumpro,geode,k6,athlon,k8,core2,corei7, - atom,generic64,amdfam10,bdver1,bdver2,bdver3,btver1,btver2" + atom,slm,generic64,amdfam10,bdver1,bdver2,bdver3,btver1,btver2" (const (symbol_ref "ix86_schedule"))) ;; A basic instruction type. Refinements due to arguments to be @@ -964,6 +964,7 @@ (include "btver2.md") (include "geode.md") (include "atom.md") +(include "slm.md") (include "core2.md") diff --git a/gcc/config/i386/slm.md b/gcc/config/i386/slm.md new file mode 100644 index 0000000..3ac919e --- /dev/null +++ b/gcc/config/i386/slm.md @@ -0,0 +1,758 @@ +;; Slivermont(SLM) Scheduling +;; Copyright (C) 2009, 2010 Free Software Foundation, Inc. +;; +;; This file is part of GCC. +;; +;; GCC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 3, or (at your option) +;; any later version. +;; +;; GCC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. +;; +;; You should have received a copy of the GNU General Public License +;; along with GCC; see the file COPYING3. If not see +;; <http://www.gnu.org/licenses/>. +;; +;; Silvermont has 2 out-of-order IEC, 2 in-order FEC and 1 in-order MEC. + + +(define_automaton "slm") + +;; EU: Execution Unit +;; Silvermont EUs are connected by port 0 or port 1. + +;; SLM has two ports: port 0 and port 1 connecting to all execution units +(define_cpu_unit "slm-port-0,slm-port-1" "slm") + +(define_cpu_unit "slm-ieu-0, slm-ieu-1, + slm-imul, slm-feu-0, slm-feu-1" + "slm") + +(define_reservation "slm-all-ieu" "(slm-ieu-0 + slm-ieu-1 + slm-imul)") +(define_reservation "slm-all-feu" "(slm-feu-0 + slm-feu-1)") +(define_reservation "slm-all-eu" "(slm-all-ieu + slm-all-feu)") +(define_reservation "slm-fp-0" "(slm-port-0 + slm-feu-0)") + +;; Some EUs have duplicated copied and can be accessed via either +;; port 0 or port 1 +;; (define_reservation "slm-port-either" "(slm-port-0 | slm-port-1)" +(define_reservation "slm-port-dual" "(slm-port-0 + slm-port-1)") + +;;; fmul insn can have 4 or 5 cycles latency +(define_reservation "slm-fmul-5c" + "(slm-port-0 + slm-feu-0), slm-feu-0, nothing*3") +(define_reservation "slm-fmul-4c" "(slm-port-0 + slm-feu-0), nothing*3") + +;;; fadd can has 3 cycles latency depends on instruction forms +(define_reservation "slm-fadd-3c" "(slm-port-1 + slm-feu-1), nothing*2") +(define_reservation "slm-fadd-4c" + "(slm-port-1 + slm-feu-1), slm-feu-1, nothing*2") + +;;; imul insn has 3 cycles latency for SI operands +(define_reservation "slm-imul-32" + "(slm-port-1 + slm-imul), nothing*2") +(define_reservation "slm-imul-mem-32" + "(slm-port-1 + slm-imul + slm-port-0), nothing*2") +;;; imul has 4 cycles latency for DI operands with 1/2 tput +(define_reservation "slm-imul-64" + "(slm-port-1 + slm-imul), slm-imul, nothing*2") + +;;; dual-execution instructions can have 1,2,4,5 cycles latency depends on +;;; instruction forms +(define_reservation "slm-dual-1c" "(slm-port-dual + slm-all-eu)") +(define_reservation "slm-dual-2c" + "(slm-port-dual + slm-all-eu, nothing)") + +;;; Most of simple ALU instructions have 1 cycle latency. Some of them +;;; issue in port 0, some in port 0 and some in either port. +(define_reservation "slm-simple-0" "(slm-port-0 + slm-ieu-0)") +(define_reservation "slm-simple-1" "(slm-port-1 + slm-ieu-1)") +(define_reservation "slm-simple-either" "(slm-simple-0 | slm-simple-1)") + +;;; Complex macro-instruction has variants of latency, and uses both ports. +(define_reservation "slm-complex" "(slm-port-dual + slm-all-eu)") + +(define_insn_reservation "slm_other" 9 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "other") + (eq_attr "atom_unit" "!jeu"))) + "slm-complex, slm-all-eu*8") + +;; return has type "other" with atom_unit "jeu" +(define_insn_reservation "slm_other_2" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "other") + (eq_attr "atom_unit" "jeu"))) + "slm-dual-1c") + +(define_insn_reservation "slm_multi" 9 + (and (eq_attr "cpu" "slm") + (eq_attr "type" "multi")) + "slm-complex, slm-all-eu*8") + +;; Normal alu insns without carry +(define_insn_reservation "slm_alu" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "alu") + (and (eq_attr "memory" "none") + (eq_attr "use_carry" "0")))) + "slm-simple-either") + +;; Normal alu insns without carry, but use MEC. +(define_insn_reservation "slm_alu_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "alu") + (and (eq_attr "memory" "!none") + (eq_attr "use_carry" "0")))) + "slm-simple-either") + +;; Alu insn consuming CF, such as add/sbb +(define_insn_reservation "slm_alu_carry" 2 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "alu") + (and (eq_attr "memory" "none") + (eq_attr "use_carry" "1")))) + "slm-simple-either, nothing") + +;; Alu insn consuming CF, such as add/sbb +(define_insn_reservation "slm_alu_carry_mem" 2 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "alu") + (and (eq_attr "memory" "!none") + (eq_attr "use_carry" "1")))) + "slm-simple-either, nothing") + +(define_insn_reservation "slm_alu1" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "alu1") + (eq_attr "memory" "none") (eq_attr "prefix_0f" "0"))) + "slm-simple-either") + +;; bsf and bsf insn +(define_insn_reservation "slm_alu1_1" 10 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "alu1") + (eq_attr "memory" "none") (eq_attr "prefix_0f" "1"))) + "slm-simple-1, slm-ieu-1*9") + +(define_insn_reservation "slm_alu1_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "alu1") + (eq_attr "memory" "!none"))) + "slm-simple-either") + +(define_insn_reservation "slm_negnot" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "negnot") + (eq_attr "memory" "none"))) + "slm-simple-either") + +(define_insn_reservation "slm_negnot_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "negnot") + (eq_attr "memory" "!none"))) + "slm-simple-either") + +(define_insn_reservation "slm_imov" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "imov") + (eq_attr "memory" "none"))) + "slm-simple-either") + +(define_insn_reservation "slm_imov_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "imov") + (eq_attr "memory" "!none"))) + "slm-simple-0") + +;; 16<-16, 32<-32 +(define_insn_reservation "slm_imovx" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "imovx") + (and (eq_attr "memory" "none") + (ior (and (match_operand:HI 0 "register_operand") + (match_operand:HI 1 "general_operand")) + (and (match_operand:SI 0 "register_operand") + (match_operand:SI 1 "general_operand")))))) + "slm-simple-either") + +;; 16<-16, 32<-32, mem +(define_insn_reservation "slm_imovx_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "imovx") + (and (eq_attr "memory" "!none") + (ior (and (match_operand:HI 0 "register_operand") + (match_operand:HI 1 "general_operand")) + (and (match_operand:SI 0 "register_operand") + (match_operand:SI 1 "general_operand")))))) + "slm-simple-either") + +;; 32<-16, 32<-8, 64<-16, 64<-8, 64<-32, 8<-8 +(define_insn_reservation "slm_imovx_2" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "imovx") + (and (eq_attr "memory" "none") + (ior (match_operand:QI 0 "register_operand") + (ior (and (match_operand:SI 0 "register_operand") + (not (match_operand:SI 1 "general_operand"))) + (match_operand:DI 0 "register_operand")))))) + "slm-simple-either") + +;; 32<-16, 32<-8, 64<-16, 64<-8, 64<-32, 8<-8, mem +(define_insn_reservation "slm_imovx_2_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "imovx") + (and (eq_attr "memory" "!none") + (ior (match_operand:QI 0 "register_operand") + (ior (and (match_operand:SI 0 "register_operand") + (not (match_operand:SI 1 "general_operand"))) + (match_operand:DI 0 "register_operand")))))) + "slm-simple-0") + +;; 16<-8 +(define_insn_reservation "slm_imovx_3" 3 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "imovx") + (and (match_operand:HI 0 "register_operand") + (match_operand:QI 1 "general_operand")))) + "slm-simple-0, nothing*2") + +(define_insn_reservation "slm_lea" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "lea") + (eq_attr "mode" "!HI"))) + "slm-simple-either") + +;; lea 16bit address is complex insn +(define_insn_reservation "slm_lea_2" 2 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "lea") + (eq_attr "mode" "HI"))) + "slm-complex, slm-all-eu") + +(define_insn_reservation "slm_incdec" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "incdec") + (eq_attr "memory" "none"))) + "slm-simple-0") + +(define_insn_reservation "slm_incdec_mem" 3 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "incdec") + (eq_attr "memory" "!none"))) + "slm-simple-0, nothing*2") + +;; simple shift instruction use SHIFT eu, none memory +(define_insn_reservation "slm_ishift" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ishift") + (and (eq_attr "memory" "none") (eq_attr "prefix_0f" "0")))) + "slm-simple-0") + +;; simple shift instruction use SHIFT eu, memory +(define_insn_reservation "slm_ishift_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ishift") + (and (eq_attr "memory" "!none") (eq_attr "prefix_0f" "0")))) + "slm-simple-0") + +;; DF shift (prefixed with 0f) is complex insn with latency of 4 cycles +(define_insn_reservation "slm_ishift_3" 4 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ishift") + (eq_attr "prefix_0f" "1"))) + "slm-complex, slm-all-eu*3") + +(define_insn_reservation "slm_ishift1" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ishift1") + (eq_attr "memory" "none"))) + "slm-simple-0") + +(define_insn_reservation "slm_ishift1_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ishift1") + (eq_attr "memory" "!none"))) + "slm-simple-0") + +(define_insn_reservation "slm_rotate" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "rotate") + (eq_attr "memory" "none"))) + "slm-simple-0") + +(define_insn_reservation "slm_rotate_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "rotate") + (eq_attr "memory" "!none"))) + "slm-simple-0") + +(define_insn_reservation "slm_rotate1" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "rotate1") + (eq_attr "memory" "none"))) + "slm-simple-0") + +(define_insn_reservation "slm_rotate1_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "rotate1") + (eq_attr "memory" "!none"))) + "slm-simple-0") + +(define_insn_reservation "slm_imul" 3 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "imul") + (and (eq_attr "memory" "none") (eq_attr "mode" "SI")))) + "slm-imul-32") + +(define_insn_reservation "slm_imul_mem" 3 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "imul") + (and (eq_attr "memory" "!none") (eq_attr "mode" "SI")))) + "slm-imul-mem-32") + +;; latency set to 4 as common 64x64 imul with 1/2 tput +(define_insn_reservation "slm_imul_3" 4 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "imul") + (eq_attr "mode" "!SI"))) + "slm-imul-64") + +(define_insn_reservation "slm_idiv" 33 + (and (eq_attr "cpu" "slm") + (eq_attr "type" "idiv")) + "slm-complex, slm-all-eu*16, nothing*16") + +(define_insn_reservation "slm_icmp" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "icmp") + (eq_attr "memory" "none"))) + "slm-simple-either") + +(define_insn_reservation "slm_icmp_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "icmp") + (eq_attr "memory" "!none"))) + "slm-simple-either") + +(define_insn_reservation "slm_test" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "test") + (eq_attr "memory" "none"))) + "slm-simple-either") + +(define_insn_reservation "slm_test_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "test") + (eq_attr "memory" "!none"))) + "slm-simple-either") + +(define_insn_reservation "slm_ibr" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ibr") + (eq_attr "memory" "!load"))) + "slm-simple-1") + +;; complex if jump target is from address +(define_insn_reservation "slm_ibr_2" 2 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ibr") + (eq_attr "memory" "load"))) + "slm-complex, slm-all-eu") + +(define_insn_reservation "slm_setcc" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "setcc") + (eq_attr "memory" "!store"))) + "slm-simple-either") + +;; 2 cycles complex if target is in memory +(define_insn_reservation "slm_setcc_2" 2 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "setcc") + (eq_attr "memory" "store"))) + "slm-complex, slm-all-eu") + +(define_insn_reservation "slm_icmov" 2 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "icmov") + (eq_attr "memory" "none"))) + "slm-simple-either, nothing") + +(define_insn_reservation "slm_icmov_mem" 2 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "icmov") + (eq_attr "memory" "!none"))) + "slm-simple-0, nothing") + +;; UCODE if segreg, ignored +(define_insn_reservation "slm_push" 2 + (and (eq_attr "cpu" "slm") + (eq_attr "type" "push")) + "slm-dual-2c") + +;; pop r64 is 1 cycle. UCODE if segreg, ignored +(define_insn_reservation "slm_pop" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "pop") + (eq_attr "mode" "DI"))) + "slm-dual-1c") + +;; pop non-r64 is 2 cycles. UCODE if segreg, ignored +(define_insn_reservation "slm_pop_2" 2 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "pop") + (eq_attr "mode" "!DI"))) + "slm-dual-2c") + +;; UCODE if segreg, ignored +(define_insn_reservation "slm_call" 1 + (and (eq_attr "cpu" "slm") + (eq_attr "type" "call")) + "slm-dual-1c") + +(define_insn_reservation "slm_callv" 1 + (and (eq_attr "cpu" "slm") + (eq_attr "type" "callv")) + "slm-dual-1c") + +(define_insn_reservation "slm_leave" 3 + (and (eq_attr "cpu" "slm") + (eq_attr "type" "leave")) + "slm-complex, slm-all-eu*2") + +(define_insn_reservation "slm_str" 3 + (and (eq_attr "cpu" "slm") + (eq_attr "type" "str")) + "slm-complex, slm-all-eu*2") + +(define_insn_reservation "slm_sselog" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sselog") + (eq_attr "memory" "none"))) + "slm-simple-either") + +(define_insn_reservation "slm_sselog_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sselog") + (eq_attr "memory" "!none"))) + "slm-simple-either") + +(define_insn_reservation "slm_sselog1" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sselog1") + (eq_attr "memory" "none"))) + "slm-simple-0") + +(define_insn_reservation "slm_sselog1_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sselog1") + (eq_attr "memory" "!none"))) + "slm-simple-0") + +;; not pmad, not psad +(define_insn_reservation "slm_sseiadd" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sseiadd") + (and (not (match_operand:V2DI 0 "register_operand")) + (and (eq_attr "atom_unit" "!simul") + (eq_attr "atom_unit" "!complex"))))) + "slm-simple-either") + +;; pmad, psad and 64 +(define_insn_reservation "slm_sseiadd_2" 4 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sseiadd") + (and (not (match_operand:V2DI 0 "register_operand")) + (and (eq_attr "atom_unit" "simul" ) + (eq_attr "mode" "DI"))))) + "slm-fmul-4c") + +;; pmad, psad and 128 +(define_insn_reservation "slm_sseiadd_3" 5 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sseiadd") + (and (not (match_operand:V2DI 0 "register_operand")) + (and (eq_attr "atom_unit" "simul" ) + (eq_attr "mode" "TI"))))) + "slm-fmul-5c") + +;; if paddq(64 bit op), phadd/phsub +(define_insn_reservation "slm_sseiadd_4" 4 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sseiadd") + (ior (match_operand:V2DI 0 "register_operand") + (eq_attr "atom_unit" "complex")))) + "slm-fadd-4c") + +;; if immediate op. +(define_insn_reservation "slm_sseishft" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sseishft") + (and (eq_attr "atom_unit" "!sishuf") + (match_operand 2 "immediate_operand")))) + "slm-simple-either") + +;; if palignr or psrldq +(define_insn_reservation "slm_sseishft_2" 1 + (and (eq_attr "cpu" "slm") + (ior (eq_attr "type" "sseishft1") + (and (eq_attr "type" "sseishft") + (and (eq_attr "atom_unit" "sishuf") + (match_operand 2 "immediate_operand"))))) + "slm-simple-0") + +;; if reg/mem op +(define_insn_reservation "slm_sseishft_3" 2 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sseishft") + (not (match_operand 2 "immediate_operand")))) + "slm-complex, slm-all-eu") + +(define_insn_reservation "slm_sseimul" 5 + (and (eq_attr "cpu" "slm") + (eq_attr "type" "sseimul")) + "slm-fmul-5c") + +;; rcpss or rsqrtss +(define_insn_reservation "slm_sse" 4 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sse") + (and (eq_attr "atom_sse_attr" "rcp") (eq_attr "mode" "SF")))) + "slm-fmul-4c") + +;; movshdup, movsldup. Suggest to type sseishft +(define_insn_reservation "slm_sse_2" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sse") + (eq_attr "atom_sse_attr" "movdup"))) + "slm-simple-0") + +;; lfence +(define_insn_reservation "slm_sse_3" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sse") + (eq_attr "atom_sse_attr" "lfence"))) + "slm-simple-either") + +;; sfence,clflush,mfence, prefetch +(define_insn_reservation "slm_sse_4" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sse") + (ior (eq_attr "atom_sse_attr" "fence") + (eq_attr "atom_sse_attr" "prefetch")))) + "slm-simple-0") + +;; rcpps, rsqrtss, sqrt, ldmxcsr +(define_insn_reservation "slm_sse_5" 9 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sse") + (ior (ior (eq_attr "atom_sse_attr" "sqrt") + (eq_attr "atom_sse_attr" "mxcsr")) + (and (eq_attr "atom_sse_attr" "rcp") + (eq_attr "mode" "V4SF"))))) + "slm-complex, slm-all-eu*7, nothing") + +;; xmm->xmm +(define_insn_reservation "slm_ssemov" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ssemov") + (and (match_operand 0 "register_operand" "xy") + (match_operand 1 "register_operand" "xy")))) + "slm-simple-either") + +;; reg->xmm +(define_insn_reservation "slm_ssemov_2" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ssemov") + (and (match_operand 0 "register_operand" "xy") + (match_operand 1 "register_operand" "r")))) + "slm-simple-0") + +;; xmm->reg +(define_insn_reservation "slm_ssemov_3" 3 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ssemov") + (and (match_operand 0 "register_operand" "r") + (match_operand 1 "register_operand" "xy")))) + "slm-simple-0, nothing*2") + +;; mov mem +(define_insn_reservation "slm_ssemov_4" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ssemov") + (and (eq_attr "movu" "0") (eq_attr "memory" "!none")))) + "slm-simple-0") + +;; movu mem +(define_insn_reservation "slm_ssemov_5" 2 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ssemov") + (ior (eq_attr "movu" "1") (eq_attr "memory" "!none")))) + "slm-simple-0, nothing") + +;; no memory simple +(define_insn_reservation "slm_sseadd" 3 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sseadd") + (and (eq_attr "memory" "none") + (and (eq_attr "mode" "!V2DF") + (eq_attr "atom_unit" "!complex"))))) + "slm-fadd-3c") + +;; memory simple +(define_insn_reservation "slm_sseadd_mem" 3 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sseadd") + (and (eq_attr "memory" "!none") + (and (eq_attr "mode" "!V2DF") + (eq_attr "atom_unit" "!complex"))))) + "slm-fadd-3c") + +;; maxps, minps, *pd, hadd, hsub +(define_insn_reservation "slm_sseadd_3" 4 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sseadd") + (ior (eq_attr "mode" "V2DF") (eq_attr "atom_unit" "complex")))) + "slm-fadd-4c") + +;; Except dppd/dpps +(define_insn_reservation "slm_ssemul" 5 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ssemul") + (eq_attr "mode" "!SF"))) + "slm-fmul-5c") + +;; Except dppd/dpps, 4 cycle if mulss +(define_insn_reservation "slm_ssemul_2" 4 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ssemul") + (eq_attr "mode" "SF"))) + "slm-fmul-4c") + +(define_insn_reservation "slm_ssecmp" 1 + (and (eq_attr "cpu" "slm") + (eq_attr "type" "ssecmp")) + "slm-simple-either") + +(define_insn_reservation "slm_ssecomi" 1 + (and (eq_attr "cpu" "slm") + (eq_attr "type" "ssecomi")) + "slm-simple-0") + +;; no memory and cvtpi2ps, cvtps2pi, cvttps2pi +(define_insn_reservation "slm_ssecvt" 5 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ssecvt") + (ior (and (match_operand:V2SI 0 "register_operand") + (match_operand:V4SF 1 "register_operand")) + (and (match_operand:V4SF 0 "register_operand") + (match_operand:V2SI 1 "register_operand"))))) + "slm-fp-0, slm-feu-0, nothing*3") + +;; memory and cvtpi2ps, cvtps2pi, cvttps2pi +(define_insn_reservation "slm_ssecvt_mem" 5 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ssecvt") + (ior (and (match_operand:V2SI 0 "register_operand") + (match_operand:V4SF 1 "memory_operand")) + (and (match_operand:V4SF 0 "register_operand") + (match_operand:V2SI 1 "memory_operand"))))) +"slm-fp-0, slm-feu-0, nothing*3") + +;; cvtpd2pi, cvtpi2pd +(define_insn_reservation "slm_ssecvt_1" 2 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ssecvt") + (ior (and (match_operand:V2DF 0 "register_operand") + (match_operand:V2SI 1 "register_operand")) + (and (match_operand:V2SI 0 "register_operand") + (match_operand:V2DF 1 "register_operand"))))) + "slm-fp-0, slm-feu-0") + +;; memory and cvtpd2pi, cvtpi2pd +(define_insn_reservation "slm_ssecvt_1_mem" 2 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ssecvt") + (ior (and (match_operand:V2DF 0 "register_operand") + (match_operand:V2SI 1 "memory_operand")) + (and (match_operand:V2SI 0 "register_operand") + (match_operand:V2DF 1 "memory_operand"))))) + "slm-fp-0, slm-feu-0") + +;; otherwise. 4 cycles average for cvtss2sd +(define_insn_reservation "slm_ssecvt_3" 4 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "ssecvt") + (not (ior (and (match_operand:V2SI 0 "register_operand") + (match_operand:V4SF 1 "nonimmediate_operand")) + (and (match_operand:V4SF 0 "register_operand") + (match_operand:V2SI 1 "nonimmediate_operand")))))) + "slm-fp-0, nothing*3") + +;; memory and cvtsi2sd +(define_insn_reservation "slm_sseicvt" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sseicvt") + (and (match_operand:V2DF 0 "register_operand") + (match_operand:SI 1 "nonimmediate_operand")))) + "slm-fp-0") + +;; otherwise. 8 cycles average for cvtsd2si +(define_insn_reservation "slm_sseicvt_2" 4 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "sseicvt") + (not (and (match_operand:V2DF 0 "register_operand") + (match_operand:SI 1 "memory_operand"))))) + "slm-fp-0, nothing*3") + +(define_insn_reservation "slm_ssediv" 13 + (and (eq_attr "cpu" "slm") + (eq_attr "type" "ssediv")) + "slm-fp-0, slm-feu-0*10, nothing*2") + +;; simple for fmov +(define_insn_reservation "slm_fmov" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "fmov") + (eq_attr "memory" "none"))) + "slm-simple-either") + +;; simple for fmov +(define_insn_reservation "slm_fmov_mem" 1 + (and (eq_attr "cpu" "slm") + (and (eq_attr "type" "fmov") + (eq_attr "memory" "!none"))) + "slm-simple-either") + +;; Define bypass here + +;; There will be 0 cycle stall from cmp/test to jcc + +;; There will be 1 cycle stall from flag producer to cmov and adc/sbb +(define_bypass 2 "slm_icmp, slm_test, slm_alu, slm_alu_carry, + slm_alu1, slm_negnot, slm_incdec, slm_ishift, + slm_ishift1, slm_rotate, slm_rotate1" + "slm_icmov, slm_alu_carry") + +;; lea to shift source stall is 1 cycle +(define_bypass 2 "slm_lea" + "slm_ishift, slm_ishift1, slm_rotate, slm_rotate1" + "!ix86_dep_by_shift_count") + +;; non-lea to shift count stall is 1 cycle +(define_bypass 2 "slm_alu_carry, + slm_alu,slm_alu1,slm_negnot,slm_imov,slm_imovx, + slm_incdec,slm_ishift,slm_ishift1,slm_rotate, + slm_rotate1, slm_setcc, slm_icmov, slm_pop, + slm_alu_mem, slm_alu_carry_mem, slm_alu1_mem, + slm_imovx_mem, slm_imovx_2_mem, + slm_imov_mem, slm_icmov_mem, slm_fmov_mem" + "slm_ishift, slm_ishift1, slm_rotate, slm_rotate1, + slm_ishift_mem, slm_ishift1_mem, + slm_rotate_mem, slm_rotate1_mem" + "ix86_dep_by_shift_count") diff --git a/libgcc/config/i386/cpuinfo.c b/libgcc/config/i386/cpuinfo.c index 0ad0074..39b9d11 100644 --- a/libgcc/config/i386/cpuinfo.c +++ b/libgcc/config/i386/cpuinfo.c @@ -50,6 +50,7 @@ enum processor_vendor enum processor_types { INTEL_ATOM = 1, + INTEL_SLM, INTEL_CORE2, INTEL_COREI7, AMDFAM10H, |