diff options
author | Martin Liska <mliska@suse.cz> | 2022-11-08 12:36:43 +0100 |
---|---|---|
committer | Martin Liska <mliska@suse.cz> | 2022-11-08 12:36:43 +0100 |
commit | 4b13c73bba935443be3207abf26f7ba05f79badc (patch) | |
tree | a6bb1525d07859fa8fc6f61dd13df7ddfd1ac254 /gcc/config | |
parent | 33f5dde0cd15df9cf89b29280d4ff5fcf7b30e66 (diff) | |
parent | fa271afb58423014e2feef9f15c1a87428e64ddc (diff) | |
download | gcc-devel/sphinx.zip gcc-devel/sphinx.tar.gz gcc-devel/sphinx.tar.bz2 |
Merge branch 'master' into devel/sphinxdevel/sphinx
Diffstat (limited to 'gcc/config')
-rw-r--r-- | gcc/config/bpf/bpf.cc | 24 | ||||
-rw-r--r-- | gcc/config/i386/i386-expand.cc | 86 | ||||
-rw-r--r-- | gcc/config/i386/i386-options.cc | 1 | ||||
-rw-r--r-- | gcc/config/i386/i386.opt | 4 | ||||
-rw-r--r-- | gcc/config/i386/predicates.md | 7 | ||||
-rw-r--r-- | gcc/config/i386/sse.md | 12 | ||||
-rw-r--r-- | gcc/config/i386/sync.md | 27 | ||||
-rw-r--r-- | gcc/config/i386/x86-tune.def | 71 |
8 files changed, 139 insertions, 93 deletions
diff --git a/gcc/config/bpf/bpf.cc b/gcc/config/bpf/bpf.cc index ea8ca64..fd4003c 100644 --- a/gcc/config/bpf/bpf.cc +++ b/gcc/config/bpf/bpf.cc @@ -1731,7 +1731,6 @@ handle_attr_preserve (function *fn) { basic_block bb; rtx_insn *insn; - rtx_code_label *label; FOR_EACH_BB_FN (bb, fn) { FOR_BB_INSNS (bb, insn) @@ -1762,28 +1761,7 @@ handle_attr_preserve (function *fn) } if (is_attr_preserve_access (expr)) - { - auto_vec<unsigned int, 16> accessors; - tree container = bpf_core_compute (expr, &accessors); - if (accessors.length () < 1) - continue; - accessors.reverse (); - - container = TREE_TYPE (container); - const char * section_name; - if (DECL_SECTION_NAME (fn->decl)) - section_name = DECL_SECTION_NAME (fn->decl); - else - section_name = ".text"; - - label = gen_label_rtx (); - LABEL_PRESERVE_P (label) = 1; - emit_label (label); - - /* Add the CO-RE relocation information to the BTF container. */ - bpf_core_reloc_add (container, section_name, &accessors, label, - BPF_RELO_FIELD_BYTE_OFFSET); - } + maybe_make_core_relo (expr, BPF_RELO_FIELD_BYTE_OFFSET); } } rtx_insn *seq = get_insns (); diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc index 2e0d12c..9c92b07 100644 --- a/gcc/config/i386/i386-expand.cc +++ b/gcc/config/i386/i386-expand.cc @@ -4510,15 +4510,86 @@ ix86_expand_int_sse_cmp (rtx dest, enum rtx_code code, rtx cop0, rtx cop1, case GTU: break; - case NE: case LE: case LEU: + /* x <= cst can be handled as x < cst + 1 unless there is + wrap around in cst + 1. */ + if (GET_CODE (cop1) == CONST_VECTOR + && GET_MODE_INNER (mode) != TImode) + { + unsigned int n_elts = GET_MODE_NUNITS (mode), i; + machine_mode eltmode = GET_MODE_INNER (mode); + for (i = 0; i < n_elts; ++i) + { + rtx elt = CONST_VECTOR_ELT (cop1, i); + if (!CONST_INT_P (elt)) + break; + if (code == GE) + { + /* For LE punt if some element is signed maximum. */ + if ((INTVAL (elt) & (GET_MODE_MASK (eltmode) >> 1)) + == (GET_MODE_MASK (eltmode) >> 1)) + break; + } + /* For LEU punt if some element is unsigned maximum. */ + else if (elt == constm1_rtx) + break; + } + if (i == n_elts) + { + rtvec v = rtvec_alloc (n_elts); + for (i = 0; i < n_elts; ++i) + RTVEC_ELT (v, i) + = GEN_INT (INTVAL (CONST_VECTOR_ELT (cop1, i)) + 1); + cop1 = gen_rtx_CONST_VECTOR (mode, v); + std::swap (cop0, cop1); + code = code == LE ? GT : GTU; + break; + } + } + /* FALLTHRU */ + case NE: code = reverse_condition (code); *negate = true; break; case GE: case GEU: + /* x >= cst can be handled as x > cst - 1 unless there is + wrap around in cst - 1. */ + if (GET_CODE (cop1) == CONST_VECTOR + && GET_MODE_INNER (mode) != TImode) + { + unsigned int n_elts = GET_MODE_NUNITS (mode), i; + machine_mode eltmode = GET_MODE_INNER (mode); + for (i = 0; i < n_elts; ++i) + { + rtx elt = CONST_VECTOR_ELT (cop1, i); + if (!CONST_INT_P (elt)) + break; + if (code == GE) + { + /* For GE punt if some element is signed minimum. */ + if (INTVAL (elt) < 0 + && ((INTVAL (elt) & (GET_MODE_MASK (eltmode) >> 1)) + == 0)) + break; + } + /* For GEU punt if some element is zero. */ + else if (elt == const0_rtx) + break; + } + if (i == n_elts) + { + rtvec v = rtvec_alloc (n_elts); + for (i = 0; i < n_elts; ++i) + RTVEC_ELT (v, i) + = GEN_INT (INTVAL (CONST_VECTOR_ELT (cop1, i)) - 1); + cop1 = gen_rtx_CONST_VECTOR (mode, v); + code = code == GE ? GT : GTU; + break; + } + } code = reverse_condition (code); *negate = true; /* FALLTHRU */ @@ -4556,6 +4627,11 @@ ix86_expand_int_sse_cmp (rtx dest, enum rtx_code code, rtx cop0, rtx cop1, } } + if (GET_CODE (cop0) == CONST_VECTOR) + cop0 = force_reg (mode, cop0); + else if (GET_CODE (cop1) == CONST_VECTOR) + cop1 = force_reg (mode, cop1); + rtx optrue = op_true ? op_true : CONSTM1_RTX (data_mode); rtx opfalse = op_false ? op_false : CONST0_RTX (data_mode); if (*negate) @@ -4752,13 +4828,13 @@ ix86_expand_int_sse_cmp (rtx dest, enum rtx_code code, rtx cop0, rtx cop1, if (*negate) std::swap (op_true, op_false); + if (GET_CODE (cop1) == CONST_VECTOR) + cop1 = force_reg (mode, cop1); + /* Allow the comparison to be done in one mode, but the movcc to happen in another mode. */ if (data_mode == mode) - { - x = ix86_expand_sse_cmp (dest, code, cop0, cop1, - op_true, op_false); - } + x = ix86_expand_sse_cmp (dest, code, cop0, cop1, op_true, op_false); else { gcc_assert (GET_MODE_SIZE (data_mode) == GET_MODE_SIZE (mode)); diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc index 23ab1f8..e5c77f3 100644 --- a/gcc/config/i386/i386-options.cc +++ b/gcc/config/i386/i386-options.cc @@ -139,6 +139,7 @@ along with GCC; see the file COPYING3. If not see #define m_TREMONT (HOST_WIDE_INT_1U<<PROCESSOR_TREMONT) #define m_SIERRAFOREST (HOST_WIDE_INT_1U<<PROCESSOR_SIERRAFOREST) #define m_GRANDRIDGE (HOST_WIDE_INT_1U<<PROCESSOR_GRANDRIDGE) +#define m_CORE_ATOM (m_SIERRAFOREST | m_GRANDRIDGE) #define m_INTEL (HOST_WIDE_INT_1U<<PROCESSOR_INTEL) #define m_LUJIAZUI (HOST_WIDE_INT_1U<<PROCESSOR_LUJIAZUI) diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt index abb1e5e..415c52e 100644 --- a/gcc/config/i386/i386.opt +++ b/gcc/config/i386/i386.opt @@ -1246,7 +1246,3 @@ Support PREFETCHI built-in functions and code generation. mraoint Target Mask(ISA2_RAOINT) Var(ix86_isa_flags2) Save Support RAOINT built-in functions and code generation. - -mprefer-remote-atomic -Target Var(flag_prefer_remote_atomic) Init(0) -Prefer use remote atomic insn for atomic operations. diff --git a/gcc/config/i386/predicates.md b/gcc/config/i386/predicates.md index 2a3f072..f995503 100644 --- a/gcc/config/i386/predicates.md +++ b/gcc/config/i386/predicates.md @@ -1235,6 +1235,13 @@ (ior (match_operand 0 "register_operand") (match_operand 0 "vector_memory_operand"))) +; Return true when OP is register_operand, vector_memory_operand +; or const_vector. +(define_predicate "vector_or_const_vector_operand" + (ior (match_operand 0 "register_operand") + (match_operand 0 "vector_memory_operand") + (match_code "const_vector"))) + (define_predicate "bcst_mem_operand" (and (match_code "vec_duplicate") (and (match_test "TARGET_AVX512F") diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md index fa93ae7..9a4fc01 100644 --- a/gcc/config/i386/sse.md +++ b/gcc/config/i386/sse.md @@ -4311,7 +4311,7 @@ [(set (match_operand:<sseintvecmode> 0 "register_operand") (match_operator:<sseintvecmode> 1 "" [(match_operand:VI_256 2 "register_operand") - (match_operand:VI_256 3 "nonimmediate_operand")]))] + (match_operand:VI_256 3 "nonimmediate_or_const_vector_operand")]))] "TARGET_AVX2" { bool ok = ix86_expand_int_vec_cmp (operands); @@ -4323,7 +4323,7 @@ [(set (match_operand:<sseintvecmode> 0 "register_operand") (match_operator:<sseintvecmode> 1 "" [(match_operand:VI124_128 2 "register_operand") - (match_operand:VI124_128 3 "vector_operand")]))] + (match_operand:VI124_128 3 "vector_or_const_vector_operand")]))] "TARGET_SSE2" { bool ok = ix86_expand_int_vec_cmp (operands); @@ -4335,7 +4335,7 @@ [(set (match_operand:V2DI 0 "register_operand") (match_operator:V2DI 1 "" [(match_operand:V2DI 2 "register_operand") - (match_operand:V2DI 3 "vector_operand")]))] + (match_operand:V2DI 3 "vector_or_const_vector_operand")]))] "TARGET_SSE4_2" { bool ok = ix86_expand_int_vec_cmp (operands); @@ -4397,7 +4397,7 @@ [(set (match_operand:<sseintvecmode> 0 "register_operand") (match_operator:<sseintvecmode> 1 "" [(match_operand:VI_256 2 "register_operand") - (match_operand:VI_256 3 "nonimmediate_operand")]))] + (match_operand:VI_256 3 "nonimmediate_or_const_vector_operand")]))] "TARGET_AVX2" { bool ok = ix86_expand_int_vec_cmp (operands); @@ -4409,7 +4409,7 @@ [(set (match_operand:<sseintvecmode> 0 "register_operand") (match_operator:<sseintvecmode> 1 "" [(match_operand:VI124_128 2 "register_operand") - (match_operand:VI124_128 3 "vector_operand")]))] + (match_operand:VI124_128 3 "vector_or_const_vector_operand")]))] "TARGET_SSE2" { bool ok = ix86_expand_int_vec_cmp (operands); @@ -4421,7 +4421,7 @@ [(set (match_operand:V2DI 0 "register_operand") (match_operator:V2DI 1 "" [(match_operand:V2DI 2 "register_operand") - (match_operand:V2DI 3 "vector_operand")]))] + (match_operand:V2DI 3 "vector_or_const_vector_operand")]))] "TARGET_SSE4_2" { bool ok = ix86_expand_int_vec_cmp (operands); diff --git a/gcc/config/i386/sync.md b/gcc/config/i386/sync.md index 2508991..e6543a5 100644 --- a/gcc/config/i386/sync.md +++ b/gcc/config/i386/sync.md @@ -791,28 +791,7 @@ (define_code_iterator any_plus_logic [and ior xor plus]) (define_code_attr plus_logic [(and "and") (ior "or") (xor "xor") (plus "add")]) -(define_expand "atomic_<plus_logic><mode>" - [(match_operand:SWI 0 "memory_operand") - (any_plus_logic:SWI (match_dup 0) - (match_operand:SWI 1 "nonmemory_operand")) - (match_operand:SI 2 "const_int_operand")] - "" -{ - if (flag_prefer_remote_atomic - && TARGET_RAOINT && operands[2] == const0_rtx - && (<MODE>mode == SImode || <MODE>mode == DImode)) - { - if (CONST_INT_P (operands[1])) - operands[1] = force_reg (<MODE>mode, operands[1]); - emit_insn (maybe_gen_rao_a (<CODE>, <MODE>mode, operands[0], operands[1])); - } - else - emit_insn (gen_atomic_<plus_logic><mode>_1 (operands[0], operands[1], - operands[2])); - DONE; -}) - -(define_insn "@rao_a<plus_logic><mode>" +(define_insn "rao_a<plus_logic><mode>" [(set (match_operand:SWI48 0 "memory_operand" "+m") (unspec_volatile:SWI48 [(any_plus_logic:SWI48 (match_dup 0) @@ -822,7 +801,7 @@ "TARGET_RAOINT" "a<plus_logic>\t{%1, %0|%0, %1}") -(define_insn "atomic_add<mode>_1" +(define_insn "atomic_add<mode>" [(set (match_operand:SWI 0 "memory_operand" "+m") (unspec_volatile:SWI [(plus:SWI (match_dup 0) @@ -876,7 +855,7 @@ return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}"; }) -(define_insn "atomic_<logic><mode>_1" +(define_insn "atomic_<logic><mode>" [(set (match_operand:SWI 0 "memory_operand" "+m") (unspec_volatile:SWI [(any_logic:SWI (match_dup 0) diff --git a/gcc/config/i386/x86-tune.def b/gcc/config/i386/x86-tune.def index 540e45d..58e29e7 100644 --- a/gcc/config/i386/x86-tune.def +++ b/gcc/config/i386/x86-tune.def @@ -42,7 +42,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see DEF_TUNE (X86_TUNE_SCHEDULE, "schedule", m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_INTEL | m_KNL | m_KNM | m_K6_GEODE | m_AMD_MULTIPLE | m_LUJIAZUI - | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_GENERIC) + | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_CORE_ATOM + | m_GENERIC) /* X86_TUNE_PARTIAL_REG_DEPENDENCY: Enable more register renaming on modern chips. Prefer stores affecting whole integer register @@ -52,7 +53,7 @@ DEF_TUNE (X86_TUNE_PARTIAL_REG_DEPENDENCY, "partial_reg_dependency", m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_INTEL | m_KNL | m_KNM | m_AMD_MULTIPLE | m_LUJIAZUI | m_TREMONT - | m_ALDERLAKE | m_GENERIC) + | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC) /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: This knob promotes all store destinations to be 128bit to allow register renaming on 128bit SSE units, @@ -63,7 +64,7 @@ DEF_TUNE (X86_TUNE_PARTIAL_REG_DEPENDENCY, "partial_reg_dependency", DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY, "sse_partial_reg_dependency", m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10 | m_BDVER | m_ZNVER | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE - | m_GENERIC) + | m_CORE_ATOM | m_GENERIC) /* X86_TUNE_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY: This knob avoids partial write to the destination in scalar SSE conversion from FP @@ -71,20 +72,23 @@ DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY, "sse_partial_reg_dependency", DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY, "sse_partial_reg_fp_converts_dependency", m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10 - | m_BDVER | m_ZNVER | m_LUJIAZUI | m_ALDERLAKE | m_GENERIC) + | m_BDVER | m_ZNVER | m_LUJIAZUI | m_ALDERLAKE | m_CORE_ATOM + | m_GENERIC) /* X86_TUNE_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY: This knob avoids partial write to the destination in scalar SSE conversion from integer to FP. */ DEF_TUNE (X86_TUNE_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY, "sse_partial_reg_converts_dependency", m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_AMDFAM10 - | m_BDVER | m_ZNVER | m_LUJIAZUI | m_ALDERLAKE | m_GENERIC) + | m_BDVER | m_ZNVER | m_LUJIAZUI | m_ALDERLAKE | m_CORE_ATOM + | m_GENERIC) /* X86_TUNE_DEST_FALSE_DEP_FOR_GLC: This knob inserts zero-idiom before several insns to break false dependency on the dest register for GLC micro-architecture. */ DEF_TUNE (X86_TUNE_DEST_FALSE_DEP_FOR_GLC, - "dest_false_dep_for_glc", m_SAPPHIRERAPIDS | m_ALDERLAKE) + "dest_false_dep_for_glc", m_SAPPHIRERAPIDS | m_ALDERLAKE + | m_CORE_ATOM) /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies are resolved on SSE register parts instead of whole registers, so we may @@ -110,14 +114,14 @@ DEF_TUNE (X86_TUNE_MOVX, "movx", m_PPRO | m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE | m_BONNELL | m_SILVERMONT | m_GOLDMONT | m_KNL | m_KNM | m_INTEL | m_GOLDMONT_PLUS | m_GEODE | m_AMD_MULTIPLE | m_LUJIAZUI - | m_CORE_AVX2 | m_TREMONT | m_ALDERLAKE | m_GENERIC) + | m_CORE_AVX2 | m_TREMONT | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC) /* X86_TUNE_MEMORY_MISMATCH_STALL: Avoid partial stores that are followed by full sized loads. */ DEF_TUNE (X86_TUNE_MEMORY_MISMATCH_STALL, "memory_mismatch_stall", m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_INTEL | m_KNL | m_KNM | m_GOLDMONT | m_GOLDMONT_PLUS | m_AMD_MULTIPLE - | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE | m_GENERIC) + | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC) /* X86_TUNE_FUSE_CMP_AND_BRANCH_32: Fuse compare with a subsequent conditional jump instruction for 32 bit TARGET. */ @@ -173,14 +177,14 @@ DEF_TUNE (X86_TUNE_EPILOGUE_USING_MOVE, "epilogue_using_move", /* X86_TUNE_USE_LEAVE: Use "leave" instruction in epilogues where it fits. */ DEF_TUNE (X86_TUNE_USE_LEAVE, "use_leave", m_386 | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE | m_LUJIAZUI - | m_TREMONT | m_ALDERLAKE | m_GENERIC) + | m_TREMONT | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC) /* X86_TUNE_PUSH_MEMORY: Enable generation of "push mem" instructions. Some chips, like 486 and Pentium works faster with separate load and push instructions. */ DEF_TUNE (X86_TUNE_PUSH_MEMORY, "push_memory", m_386 | m_P4_NOCONA | m_CORE_ALL | m_K6_GEODE | m_AMD_MULTIPLE - | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE | m_GENERIC) + | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC) /* X86_TUNE_SINGLE_PUSH: Enable if single push insn is preferred over esp subtraction. */ @@ -250,15 +254,16 @@ DEF_TUNE (X86_TUNE_READ_MODIFY, "read_modify", ~(m_PENT | m_LAKEMONT | m_PPRO)) DEF_TUNE (X86_TUNE_USE_INCDEC, "use_incdec", ~(m_P4_NOCONA | m_CORE2 | m_NEHALEM | m_SANDYBRIDGE | m_BONNELL | m_SILVERMONT | m_INTEL | m_KNL | m_KNM | m_GOLDMONT - | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_LUJIAZUI - | m_GENERIC)) + | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_CORE_ATOM + | m_LUJIAZUI | m_GENERIC)) /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred for DFmode copies */ DEF_TUNE (X86_TUNE_INTEGER_DFMODE_MOVES, "integer_dfmode_moves", ~(m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_GEODE | m_AMD_MULTIPLE | m_LUJIAZUI - | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_GENERIC)) + | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE + | m_CORE_ATOM | m_GENERIC)) /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag will impact LEA instruction selection. */ @@ -296,7 +301,8 @@ DEF_TUNE (X86_TUNE_SINGLE_STRINGOP, "single_stringop", m_386 | m_P4_NOCONA) move/set sequences of bytes with known size. */ DEF_TUNE (X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB, "prefer_known_rep_movsb_stosb", - m_SKYLAKE | m_ALDERLAKE | m_TREMONT | m_CORE_AVX512 | m_LUJIAZUI) + m_SKYLAKE | m_ALDERLAKE | m_CORE_ATOM | m_TREMONT | m_CORE_AVX512 + | m_LUJIAZUI) /* X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES: Enable generation of compact prologues and epilogues by issuing a misaligned moves. This @@ -306,14 +312,14 @@ DEF_TUNE (X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB, DEF_TUNE (X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES, "misaligned_move_string_pro_epilogues", m_386 | m_486 | m_CORE_ALL | m_AMD_MULTIPLE | m_LUJIAZUI | m_TREMONT - | m_ALDERLAKE | m_GENERIC) + | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC) /* X86_TUNE_USE_SAHF: Controls use of SAHF. */ DEF_TUNE (X86_TUNE_USE_SAHF, "use_sahf", m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER | m_ZNVER | m_LUJIAZUI | m_GOLDMONT | m_GOLDMONT_PLUS - | m_TREMONT | m_ALDERLAKE | m_GENERIC) + | m_TREMONT | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC) /* X86_TUNE_USE_CLTD: Controls use of CLTD and CTQO instructions. */ DEF_TUNE (X86_TUNE_USE_CLTD, "use_cltd", @@ -324,13 +330,13 @@ DEF_TUNE (X86_TUNE_USE_CLTD, "use_cltd", DEF_TUNE (X86_TUNE_USE_BT, "use_bt", m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_LAKEMONT | m_AMD_MULTIPLE | m_LUJIAZUI | m_GOLDMONT - | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_GENERIC) + | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC) /* X86_TUNE_AVOID_FALSE_DEP_FOR_BMI: Avoid false dependency for bit-manipulation instructions. */ DEF_TUNE (X86_TUNE_AVOID_FALSE_DEP_FOR_BMI, "avoid_false_dep_for_bmi", - m_SANDYBRIDGE | m_CORE_AVX2 | m_TREMONT | m_ALDERLAKE | m_LUJIAZUI - | m_GENERIC) + m_SANDYBRIDGE | m_CORE_AVX2 | m_TREMONT | m_ALDERLAKE | m_CORE_ATOM + | m_LUJIAZUI | m_GENERIC) /* X86_TUNE_ADJUST_UNROLL: This enables adjusting the unroll factor based on hardware capabilities. Bdver3 hardware has a loop buffer which makes @@ -342,12 +348,13 @@ DEF_TUNE (X86_TUNE_ADJUST_UNROLL, "adjust_unroll_factor", m_BDVER3 | m_BDVER4) if-converted sequence to one. */ DEF_TUNE (X86_TUNE_ONE_IF_CONV_INSN, "one_if_conv_insn", m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_CORE_ALL | m_GOLDMONT - | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_LUJIAZUI | m_GENERIC) + | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_CORE_ATOM | m_LUJIAZUI + | m_GENERIC) /* X86_TUNE_AVOID_MFENCE: Use lock prefixed instructions instead of mfence. */ DEF_TUNE (X86_TUNE_AVOID_MFENCE, "avoid_mfence", m_CORE_ALL | m_BDVER | m_ZNVER | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE - | m_GENERIC) + | m_CORE_ATOM | m_GENERIC) /* X86_TUNE_EXPAND_ABS: This enables a new abs pattern by generating instructions for abs (x) = (((signed) x >> (W-1) ^ x) - @@ -372,7 +379,7 @@ DEF_TUNE (X86_TUNE_USE_SIMODE_FIOP, "use_simode_fiop", ~(m_PENT | m_LAKEMONT | m_PPRO | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_AMD_MULTIPLE | m_LUJIAZUI | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT - | m_ALDERLAKE | m_GENERIC)) + | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC)) /* X86_TUNE_USE_FFREEP: Use freep instruction instead of fstp. */ DEF_TUNE (X86_TUNE_USE_FFREEP, "use_ffreep", m_AMD_MULTIPLE | m_LUJIAZUI) @@ -381,7 +388,8 @@ DEF_TUNE (X86_TUNE_USE_FFREEP, "use_ffreep", m_AMD_MULTIPLE | m_LUJIAZUI) DEF_TUNE (X86_TUNE_EXT_80387_CONSTANTS, "ext_80387_constants", m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BONNELL | m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_K6_GEODE | m_ATHLON_K8 | m_LUJIAZUI - | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_GENERIC) + | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE | m_CORE_ATOM + | m_GENERIC) /*****************************************************************************/ /* SSE instruction selection tuning */ @@ -397,14 +405,15 @@ DEF_TUNE (X86_TUNE_GENERAL_REGS_SSE_SPILL, "general_regs_sse_spill", DEF_TUNE (X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL, "sse_unaligned_load_optimal", m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE - | m_AMDFAM10 | m_BDVER | m_BTVER | m_ZNVER | m_LUJIAZUI | m_GENERIC) + | m_CORE_ATOM | m_AMDFAM10 | m_BDVER | m_BTVER | m_ZNVER | m_LUJIAZUI + | m_GENERIC) /* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL: Use movups for misaligned stores instead of a sequence loading registers by parts. */ DEF_TUNE (X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL, "sse_unaligned_store_optimal", m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2 | m_SILVERMONT | m_KNL | m_KNM | m_INTEL | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE - | m_BDVER | m_ZNVER | m_LUJIAZUI | m_GENERIC) + | m_CORE_ATOM | m_BDVER | m_ZNVER | m_LUJIAZUI | m_GENERIC) /* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL: Use packed single precision 128bit instructions instead of double where possible. */ @@ -414,13 +423,13 @@ DEF_TUNE (X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL, "sse_packed_single_insn_optim /* X86_TUNE_SSE_TYPELESS_STORES: Always movaps/movups for 128bit stores. */ DEF_TUNE (X86_TUNE_SSE_TYPELESS_STORES, "sse_typeless_stores", m_AMD_MULTIPLE | m_LUJIAZUI | m_CORE_ALL | m_TREMONT | m_ALDERLAKE - | m_GENERIC) + | m_CORE_ATOM | m_GENERIC) /* X86_TUNE_SSE_LOAD0_BY_PXOR: Always use pxor to load0 as opposed to xorps/xorpd and other variants. */ DEF_TUNE (X86_TUNE_SSE_LOAD0_BY_PXOR, "sse_load0_by_pxor", m_PPRO | m_P4_NOCONA | m_CORE_ALL | m_BDVER | m_BTVER | m_ZNVER - | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE | m_GENERIC) + | m_LUJIAZUI | m_TREMONT | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC) /* X86_TUNE_INTER_UNIT_MOVES_TO_VEC: Enable moves in from integer to SSE registers. If disabled, the moves will be done by storing @@ -467,22 +476,22 @@ DEF_TUNE (X86_TUNE_SLOW_PSHUFB, "slow_pshufb", /* X86_TUNE_AVOID_4BYTE_PREFIXES: Avoid instructions requiring 4+ bytes of prefixes. */ DEF_TUNE (X86_TUNE_AVOID_4BYTE_PREFIXES, "avoid_4byte_prefixes", m_SILVERMONT | m_GOLDMONT | m_GOLDMONT_PLUS | m_TREMONT | m_ALDERLAKE - | m_INTEL) + | m_CORE_ATOM | m_INTEL) /* X86_TUNE_USE_GATHER_2PARTS: Use gather instructions for vectors with 2 elements. */ DEF_TUNE (X86_TUNE_USE_GATHER_2PARTS, "use_gather_2parts", - ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ALDERLAKE | m_GENERIC)) + ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC)) /* X86_TUNE_USE_GATHER_4PARTS: Use gather instructions for vectors with 4 elements. */ DEF_TUNE (X86_TUNE_USE_GATHER_4PARTS, "use_gather_4parts", - ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ALDERLAKE | m_GENERIC)) + ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER3 | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC)) /* X86_TUNE_USE_GATHER: Use gather instructions for vectors with 8 or more elements. */ DEF_TUNE (X86_TUNE_USE_GATHER, "use_gather", - ~(m_ZNVER1 | m_ZNVER2 | m_ALDERLAKE | m_GENERIC)) + ~(m_ZNVER1 | m_ZNVER2 | m_ALDERLAKE | m_CORE_ATOM | m_GENERIC)) /* X86_TUNE_AVOID_128FMA_CHAINS: Avoid creating loops with tight 128bit or smaller FMA chain. */ |