aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--clang/docs/AMDGPUSupport.rst4
-rw-r--r--clang/docs/HIPSupport.rst3
-rw-r--r--clang/docs/Modules.rst17
-rw-r--r--clang/docs/OpenMPSupport.rst1324
-rw-r--r--clang/docs/ReleaseNotes.rst1
-rw-r--r--clang/include/clang/AST/OpenMPClause.h80
-rw-r--r--clang/include/clang/AST/RecursiveASTVisitor.h6
-rw-r--r--clang/include/clang/Basic/Builtins.def1
-rw-r--r--clang/include/clang/Basic/BuiltinsAMDGPU.def41
-rw-r--r--clang/include/clang/Basic/OpenMPKinds.def8
-rw-r--r--clang/include/clang/Basic/OpenMPKinds.h7
-rw-r--r--clang/include/clang/Sema/SemaOpenMP.h6
-rw-r--r--clang/lib/AST/ASTContext.cpp5
-rw-r--r--clang/lib/AST/OpenMPClause.cpp8
-rw-r--r--clang/lib/AST/StmtProfile.cpp2
-rw-r--r--clang/lib/Basic/OpenMPKinds.cpp19
-rw-r--r--clang/lib/Basic/Targets/AMDGPU.cpp6
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp22
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp6
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp81
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/ARM.cpp249
-rw-r--r--clang/lib/Driver/ToolChains/ZOS.cpp4
-rw-r--r--clang/lib/Frontend/TextDiagnostic.cpp474
-rw-r--r--clang/lib/Lex/HeaderSearch.cpp4
-rw-r--r--clang/lib/Parse/ParseOpenMP.cpp1
-rw-r--r--clang/lib/Sema/SemaAMDGPU.cpp43
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp21
-rw-r--r--clang/lib/Sema/TreeTransform.h7
-rw-r--r--clang/lib/Serialization/ASTReader.cpp14
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp6
-rw-r--r--clang/test/CIR/CodeGen/builtin_prefetch.c (renamed from clang/test/CIR/CodeGen/builtin_prefetech.c)0
-rw-r--r--clang/test/CodeGen/AArch64/neon-across.c132
-rw-r--r--clang/test/CodeGen/AArch64/neon-intrinsics.c20
-rw-r--r--clang/test/CodeGen/builtins-extended-image.c1528
-rw-r--r--clang/test/CodeGenCXX/ubsan-coroutines.cpp1
-rw-r--r--clang/test/CodeGenHIP/maybe_undef-attr-verify.hip2
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-wave32.cl6
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-wave64.cl4
-rw-r--r--clang/test/DebugInfo/Generic/bit-int.c4
-rw-r--r--clang/test/DebugInfo/Generic/macro-info.c35
-rw-r--r--clang/test/Driver/amdgpu-macros.cl16
-rw-r--r--clang/test/Driver/fat-archive-unbundle-ext.c2
-rw-r--r--clang/test/Driver/hip-macros.hip23
-rw-r--r--clang/test/Driver/hip-wavefront-size-deprecation-diagnostics.hip115
-rw-r--r--clang/test/Headers/cuda_with_openmp.cu2
-rw-r--r--clang/test/OpenMP/task_ast_print.cpp26
-rw-r--r--clang/test/OpenMP/task_codegen.cpp33
-rwxr-xr-xclang/test/OpenMP/task_threadset_messages.cpp99
-rw-r--r--clang/test/OpenMP/taskloop_ast_print.cpp16
-rw-r--r--clang/test/OpenMP/taskloop_codegen.cpp53
-rw-r--r--clang/test/Preprocessor/predefined-arch-macros.c2
-rw-r--r--clang/test/SemaOpenCL/builtins-extended-image-param-gfx1100-err.cl227
-rw-r--r--clang/test/SemaOpenCL/builtins-extended-image-param-gfx942-err.cl227
-rw-r--r--clang/tools/libclang/CIndex.cpp2
-rwxr-xr-xclang/www/cxx_dr_status.html260
-rw-r--r--compiler-rt/test/asan/TestCases/log-path_test.cpp3
-rw-r--r--compiler-rt/test/asan/TestCases/scariness_score_test.cpp4
-rw-r--r--compiler-rt/test/asan/lit.cfg.py3
-rw-r--r--compiler-rt/test/lit.common.cfg.py2
-rw-r--r--compiler-rt/test/profile/Linux/instrprof-debug-info-correlate-warnings.c2
-rw-r--r--flang/include/flang/Lower/OpenMP/Clauses.h1
-rw-r--r--flang/include/flang/Parser/dump-parse-tree.h2
-rw-r--r--flang/include/flang/Parser/parse-tree.h8
-rw-r--r--flang/include/flang/Semantics/dump-expr.h3
-rw-r--r--flang/lib/Lower/OpenMP/Clauses.cpp15
-rw-r--r--flang/lib/Parser/prescan.cpp2
-rw-r--r--flang/lib/Semantics/check-omp-structure.cpp1
-rw-r--r--flang/test/Parser/inline-directives.f9029
-rw-r--r--libc/CMakeLists.txt2
-rw-r--r--libc/config/linux/x86_64/exclude.txt8
-rw-r--r--libc/include/locale.yaml2
-rw-r--r--libc/include/stdio.yaml2
-rw-r--r--libc/include/stdlib.yaml2
-rw-r--r--libc/include/string.yaml2
-rw-r--r--libc/include/time.yaml2
-rw-r--r--libc/include/wchar.yaml8
-rw-r--r--libc/src/time/strftime.cpp2
-rw-r--r--libc/src/time/strftime_l.cpp2
-rw-r--r--libc/test/src/time/strftime_test.cpp20
-rw-r--r--libc/utils/hdrgen/hdrgen/enumeration.py16
-rw-r--r--libc/utils/hdrgen/hdrgen/function.py16
-rw-r--r--libc/utils/hdrgen/hdrgen/header.py81
-rw-r--r--libc/utils/hdrgen/hdrgen/macro.py16
-rwxr-xr-xlibc/utils/hdrgen/hdrgen/main.py1
-rw-r--r--libc/utils/hdrgen/hdrgen/object.py16
-rw-r--r--libc/utils/hdrgen/hdrgen/symbol.py41
-rw-r--r--libc/utils/hdrgen/hdrgen/type.py20
-rw-r--r--libc/utils/hdrgen/hdrgen/yaml_to_classes.py2
-rw-r--r--libc/utils/hdrgen/tests/expected_output/custom.h21
-rw-r--r--libc/utils/hdrgen/tests/expected_output/sorting.h24
-rw-r--r--libc/utils/hdrgen/tests/expected_output/test_header.h1
-rw-r--r--libc/utils/hdrgen/tests/expected_output/test_small.json1
-rw-r--r--libc/utils/hdrgen/tests/input/custom-common.yaml6
-rw-r--r--libc/utils/hdrgen/tests/input/custom.yaml13
-rw-r--r--libc/utils/hdrgen/tests/input/sorting.yaml20
-rw-r--r--libc/utils/hdrgen/tests/test_integration.py14
-rw-r--r--libcxx/include/__config11
-rw-r--r--libcxx/include/__configuration/abi.h8
-rw-r--r--libcxx/include/__format/format_arg.h17
-rw-r--r--libcxx/include/__format/format_context.h4
-rw-r--r--libcxx/include/__hash_table12
-rw-r--r--libcxx/include/__iterator/concepts.h15
-rw-r--r--libcxx/include/__math/traits.h15
-rw-r--r--libcxx/include/__ranges/transform_view.h3
-rw-r--r--libcxx/include/__tree12
-rw-r--r--libcxx/include/__type_traits/reference_constructs_from_temporary.h6
-rw-r--r--libcxx/include/forward_list11
-rw-r--r--libcxx/include/list11
-rw-r--r--libcxx/include/tuple4
-rw-r--r--libcxx/include/variant2
-rw-r--r--libcxx/test/libcxx-03/utilities/meta/is_referenceable.compile.pass.cpp2
-rw-r--r--libcxx/test/libcxx/input.output/iostreams.base/ios.base/ios.base.cons/dtor.uninitialized.pass.cpp6
-rw-r--r--libcxx/test/libcxx/numerics/c.math/constexpr-cxx23-clang.pass.cpp7
-rw-r--r--libcxx/test/libcxx/utilities/expected/expected.expected/transform_error.mandates.verify.cpp27
-rw-r--r--libcxx/test/libcxx/utilities/expected/expected.void/transform_error.mandates.verify.cpp27
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/filebuf.virtuals/setbuf.pass.cpp6
-rw-r--r--libcxx/test/std/input.output/iostream.format/input.streams/istream.unformatted/sync.pass.cpp6
-rw-r--r--libcxx/test/std/localization/locale.categories/category.collate/locale.collate.byname/compare.pass.cpp16
-rw-r--r--libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_fr_FR.pass.cpp5
-rw-r--r--libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_ru_RU.pass.cpp3
-rw-r--r--libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_zh_CN.pass.cpp27
-rw-r--r--libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_fr_FR.pass.cpp5
-rw-r--r--libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_ru_RU.pass.cpp3
-rw-r--r--libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_zh_CN.pass.cpp43
-rw-r--r--libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/curr_symbol.pass.cpp15
-rw-r--r--libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/grouping.pass.cpp5
-rw-r--r--libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/neg_format.pass.cpp35
-rw-r--r--libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/pos_format.pass.cpp10
-rw-r--r--libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp6
-rw-r--r--libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp6
-rw-r--r--libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp6
-rw-r--r--libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/grouping.pass.cpp7
-rw-r--r--libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/thousands_sep.pass.cpp5
-rw-r--r--libcxx/test/std/time/time.duration/time.duration.nonmember/ostream.pass.cpp10
-rw-r--r--libcxx/test/std/time/time.syn/formatter.duration.pass.cpp51
-rw-r--r--libcxx/test/std/time/time.syn/formatter.file_time.pass.cpp19
-rw-r--r--libcxx/test/std/time/time.syn/formatter.hh_mm_ss.pass.cpp35
-rw-r--r--libcxx/test/std/time/time.syn/formatter.local_time.pass.cpp19
-rw-r--r--libcxx/test/std/time/time.syn/formatter.sys_time.pass.cpp19
-rw-r--r--libcxx/test/std/utilities/format/format.arguments/format.arg/visit.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.arguments/format.arg/visit.return_type.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.arguments/format.arg/visit_format_arg.deprecated.verify.cpp1
-rw-r--r--libcxx/test/std/utilities/format/format.arguments/format.arg/visit_format_arg.pass.cpp6
-rw-r--r--libcxx/test/std/utilities/format/format.arguments/format.args/get.pass.cpp6
-rw-r--r--libcxx/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR20855_tuple_ref_binding_diagnostics.pass.cpp22
-rw-r--r--libcxx/test/std/utilities/variant/variant.visit.member/robust_against_adl.pass.cpp1
-rw-r--r--libcxx/test/std/utilities/variant/variant.visit.member/visit.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp2
-rw-r--r--libcxx/test/support/locale_helpers.h12
-rw-r--r--libcxx/test/support/test_basic_format_arg.h2
-rw-r--r--libcxx/test/support/test_macros.h7
-rw-r--r--libcxxabi/test/uncaught_exception.pass.cpp6
-rw-r--r--lld/ELF/SyntheticSections.cpp7
-rw-r--r--lld/ELF/SyntheticSections.h4
-rw-r--r--lld/ELF/Writer.cpp7
-rw-r--r--lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py2
-rw-r--r--lldb/source/Host/windows/ProcessLauncherWindows.cpp17
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp4
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h1
-rw-r--r--lldb/test/API/commands/register/register/aarch64_dynamic_regset/TestArm64DynamicRegsets.py7
-rw-r--r--lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/optional/TestDataFormatterLibcxxOptionalSimulator.py2
-rw-r--r--lldb/test/API/lang/cpp/libcxx-internals-recognizer/TestLibcxxInternalsRecognizer.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py1
-rw-r--r--lldb/unittests/SymbolFile/DWARF/DWARFASTParserClangTests.cpp90
-rw-r--r--llvm/docs/CommandGuide/llvm-cxxfilt.rst5
-rw-r--r--llvm/docs/DeveloperPolicy.rst49
-rw-r--r--llvm/docs/GettingInvolved.rst8
-rw-r--r--llvm/include/llvm/ADT/GenericCycleImpl.h11
-rw-r--r--llvm/include/llvm/ADT/GenericCycleInfo.h1
-rw-r--r--llvm/include/llvm/ADT/TypeSwitch.h17
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h3
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/ClauseT.h14
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/OMP.td6
-rw-r--r--llvm/include/llvm/Support/GenericLoopInfo.h11
-rw-r--r--llvm/include/llvm/Support/GenericLoopInfoImpl.h32
-rw-r--r--llvm/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h2
-rw-r--r--llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h29
-rw-r--r--llvm/include/llvm/Transforms/Utils/ControlFlowUtils.h6
-rw-r--r--llvm/lib/Analysis/DependenceAnalysis.cpp22
-rw-r--r--llvm/lib/Frontend/Driver/CodeGenOptions.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp20
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp15
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp14
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp5
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp10
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp5
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.h2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonSubtarget.cpp2
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp1
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp14
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoP.td5
-rw-r--r--llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp1
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp145
-rw-r--r--llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp20
-rw-r--r--llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/IndVarSimplify.cpp85
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp87
-rw-r--r--llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp38
-rw-r--r--llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp20
-rw-r--r--llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp60
-rw-r--r--llvm/lib/Transforms/Utils/BasicBlockUtils.cpp73
-rw-r--r--llvm/lib/Transforms/Utils/ControlFlowUtils.cpp5
-rw-r--r--llvm/lib/Transforms/Utils/FixIrreducible.cpp126
-rw-r--r--llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp19
-rw-r--r--llvm/lib/Transforms/Utils/UnifyLoopExits.cpp77
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp25
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/GCD.ll6
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll4
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/compute-absolute-value.ll2
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/gcd-miv-overflow.ll63
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/strong-siv-overflow.ll68
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/symbolic-rdiv-overflow.ll137
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/weak-crossing-siv-overflow.ll125
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/weak-zero-siv-overflow.ll122
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-srl-and.ll42
-rw-r--r--llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll414
-rw-r--r--llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll424
-rw-r--r--llvm/test/CodeGen/AArch64/signbit-test.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/signed-truncation-check.ll434
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll612
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.ll66
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.mir37
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll63
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll67
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.mir524
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir19
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir24
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir479
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/sub.ll535
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsic.ll173
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/llc-pipeline.ll23
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll79
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll70
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll75
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll77
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll48
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll49
-rw-r--r--llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll23
-rw-r--r--llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll198
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/wqm.ll18
-rw-r--r--llvm/test/CodeGen/ARM/strict-fp-func.ll13
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/ir-instruction/avg.ll307
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/ir-instruction/avg.ll307
-rw-r--r--llvm/test/CodeGen/PowerPC/combine-sext-and-shl-after-isel.ll100
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-stackmap.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rv64p.ll6
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll70
-rw-r--r--llvm/test/CodeGen/X86/bittest-big-integer.ll6925
-rw-r--r--llvm/test/CodeGen/X86/ldexp-avx512.ll467
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll84
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop1.s4
-rw-r--r--llvm/test/Transforms/FixIrreducible/bug45623.ll109
-rw-r--r--llvm/test/Transforms/FixIrreducible/callbr.ll869
-rw-r--r--llvm/test/Transforms/FixIrreducible/nested.ll676
-rw-r--r--llvm/test/Transforms/FixIrreducible/unreachable.ll23
-rw-r--r--llvm/test/Transforms/GVN/assume-equal.ll44
-rw-r--r--llvm/test/Transforms/IndVarSimplify/AMDGPU/addrspace-7-doesnt-crash.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/ARM/code-size.ll22
-rw-r--r--llvm/test/Transforms/IndVarSimplify/ARM/indvar-unroll-imm-cost.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/X86/inner-loop-by-latch-cond.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/exit-count-select.ll14
-rw-r--r--llvm/test/Transforms/IndVarSimplify/finite-exit-comparisons.ll6
-rw-r--r--llvm/test/Transforms/IndVarSimplify/pr116483.ll8
-rw-r--r--llvm/test/Transforms/IndVarSimplify/pr24783.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/pr39673.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/pr63763.ll6
-rw-r--r--llvm/test/Transforms/IndVarSimplify/replace-loop-exit-folds.ll21
-rw-r--r--llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-values-phi.ll8
-rw-r--r--llvm/test/Transforms/IndVarSimplify/scev-expander-preserve-lcssa.ll14
-rw-r--r--llvm/test/Transforms/IndVarSimplify/scev-invalidation.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/sentinel.ll14
-rw-r--r--llvm/test/Transforms/IndVarSimplify/sink-from-preheader.ll32
-rw-r--r--llvm/test/Transforms/IndVarSimplify/sink-trapping.ll19
-rw-r--r--llvm/test/Transforms/IndVarSimplify/zext-nuw.ll2
-rw-r--r--llvm/test/Transforms/LICM/scalar-promote.ll6
-rw-r--r--llvm/test/Transforms/LICM/sink-alloca.ll (renamed from llvm/test/Transforms/IndVarSimplify/sink-alloca.ll)6
-rw-r--r--llvm/test/Transforms/LICM/sink-from-preheader.ll185
-rw-r--r--llvm/test/Transforms/LICM/sink-trapping.ll28
-rw-r--r--llvm/test/Transforms/LoopDeletion/invalidate-scev-after-hoisting.ll2
-rw-r--r--llvm/test/Transforms/LoopDistribute/laa-invalidation.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/AArch64/prefer-all.ll144
-rw-r--r--llvm/test/Transforms/LoopUnroll/followup.ll35
-rw-r--r--llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll2
-rw-r--r--llvm/test/Transforms/MemCpyOpt/stack-move.ll58
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll2
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll4
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll2
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll20
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll6
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll49
-rw-r--r--llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-profile.ll89
-rw-r--r--llvm/test/Transforms/SimpleLoopUnswitch/pr60736.ll11
-rw-r--r--llvm/test/Transforms/SimpleLoopUnswitch/simple-unswitch-profile.ll157
-rw-r--r--llvm/test/Transforms/UnifyLoopExits/basic.ll131
-rw-r--r--llvm/test/Transforms/UnifyLoopExits/integer_guards.ll410
-rw-r--r--llvm/test/Transforms/UnifyLoopExits/nested.ll142
-rw-r--r--llvm/test/Transforms/UnifyLoopExits/restore-ssa.ll236
-rw-r--r--llvm/test/Transforms/UnifyLoopExits/undef-phis.ll68
-rw-r--r--llvm/test/lit.cfg.py15
-rw-r--r--llvm/unittests/ADT/TypeSwitchTest.cpp41
-rw-r--r--llvm/utils/UpdateTestChecks/common.py238
-rw-r--r--llvm/utils/UpdateTestChecks/mir.py362
-rw-r--r--llvm/utils/lit/lit/TestRunner.py28
-rw-r--r--llvm/utils/lit/lit/builtin_commands/_launch_with_limit.py4
-rw-r--r--llvm/utils/lit/tests/Inputs/shtest-readfile/env.txt6
-rw-r--r--llvm/utils/lit/tests/Inputs/shtest-readfile/lit.cfg1
-rw-r--r--llvm/utils/lit/tests/Inputs/shtest-ulimit-nondarwin/ulimit_okay.txt1
-rw-r--r--llvm/utils/lit/tests/Inputs/shtest-ulimit-nondarwin/ulimit_unlimited.txt6
-rw-r--r--llvm/utils/lit/tests/Inputs/shtest-ulimit/print_limits.py2
-rw-r--r--llvm/utils/lit/tests/Inputs/shtest-ulimit/ulimit_okay.txt1
-rw-r--r--llvm/utils/lit/tests/shtest-readfile-external.py2
-rw-r--r--llvm/utils/lit/tests/shtest-readfile.py6
-rw-r--r--llvm/utils/lit/tests/shtest-ulimit-nondarwin.py10
-rw-r--r--llvm/utils/lit/tests/shtest-ulimit.py2
-rwxr-xr-xllvm/utils/update_givaluetracking_test_checks.py3
-rwxr-xr-xllvm/utils/update_mir_test_checks.py121
-rw-r--r--mlir/include/mlir/Dialect/ControlFlow/Transforms/StructuralTypeConversions.h48
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td4
-rw-r--r--mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp64
-rw-r--r--mlir/lib/Dialect/ControlFlow/Transforms/CMakeLists.txt1
-rw-r--r--mlir/lib/Dialect/ControlFlow/Transforms/StructuralTypeConversions.cpp169
-rw-r--r--mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp37
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp6
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp6
-rw-r--r--mlir/lib/Dialect/Tensor/IR/TensorOps.cpp4
-rw-r--r--mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp2
-rw-r--r--mlir/lib/Query/Query.cpp5
-rw-r--r--mlir/lib/Support/Timing.cpp1
-rw-r--r--mlir/test/Conversion/SCFToGPU/parallel_loop.mlir32
-rw-r--r--mlir/test/Dialect/Linalg/canonicalize.mlir2
-rw-r--r--mlir/test/Dialect/Linalg/generalize-named-ops.mlir22
-rw-r--r--mlir/test/Dialect/Linalg/invalid.mlir10
-rw-r--r--mlir/test/Dialect/Linalg/one-shot-bufferize.mlir2
-rw-r--r--mlir/test/Dialect/Linalg/roundtrip.mlir18
-rw-r--r--mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir2
-rw-r--r--mlir/test/Dialect/Tensor/bufferize.mlir2
-rw-r--r--mlir/test/Interfaces/TilingInterface/lower-to-loops-using-interface.mlir6
-rw-r--r--mlir/test/Transforms/test-legalize-type-conversion.mlir22
-rw-r--r--mlir/test/lib/Dialect/Test/CMakeLists.txt1
-rw-r--r--mlir/test/lib/Dialect/Test/TestPatterns.cpp7
-rw-r--r--mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.cpp2
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel3
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel1
354 files changed, 16000 insertions, 10104 deletions
diff --git a/clang/docs/AMDGPUSupport.rst b/clang/docs/AMDGPUSupport.rst
index 3eada5f..18e3de8 100644
--- a/clang/docs/AMDGPUSupport.rst
+++ b/clang/docs/AMDGPUSupport.rst
@@ -49,10 +49,6 @@ Predefined Macros
- Defined as 1 if the CU mode is enabled and 0 if the WGP mode is enabled.
* - ``__AMDGCN_UNSAFE_FP_ATOMICS__``
- Defined if unsafe floating-point atomics are allowed.
- * - ``__AMDGCN_WAVEFRONT_SIZE__``
- - Defines the wavefront size. Allowed values are 32 and 64 (deprecated).
- * - ``__AMDGCN_WAVEFRONT_SIZE``
- - Alias to ``__AMDGCN_WAVEFRONT_SIZE__`` (deprecated).
* - ``__HAS_FMAF__``
- Defined if FMAF instruction is available (deprecated).
* - ``__HAS_LDEXPF__``
diff --git a/clang/docs/HIPSupport.rst b/clang/docs/HIPSupport.rst
index ec2af2a..ab9ea11 100644
--- a/clang/docs/HIPSupport.rst
+++ b/clang/docs/HIPSupport.rst
@@ -180,8 +180,7 @@ Predefined Macros
- Alias to ``__HIP_API_PER_THREAD_DEFAULT_STREAM__``. Deprecated.
Note that some architecture specific AMDGPU macros will have default values when
-used from the HIP host compilation. Other :doc:`AMDGPU macros <AMDGPUSupport>`
-like ``__AMDGCN_WAVEFRONT_SIZE__`` (deprecated) will default to 64 for example.
+used from the HIP host compilation.
Compilation Modes
=================
diff --git a/clang/docs/Modules.rst b/clang/docs/Modules.rst
index acbe45e..e45ee9f 100644
--- a/clang/docs/Modules.rst
+++ b/clang/docs/Modules.rst
@@ -421,13 +421,7 @@ As an example, the module map file for the C standard library might look a bit l
.. parsed-literal::
- module std [system] [extern_c] {
- module assert {
- textual header "assert.h"
- header "bits/assert-decls.h"
- export *
- }
-
+ module std [system] {
module complex {
header "complex.h"
export *
@@ -440,7 +434,6 @@ As an example, the module map file for the C standard library might look a bit l
module errno {
header "errno.h"
- header "sys/errno.h"
export *
}
@@ -673,14 +666,14 @@ of checking *use-declaration*\s, and must still be a lexically-valid header
file. In the future, we intend to pre-tokenize such headers and include the
token sequence within the prebuilt module representation.
-A header with the ``exclude`` specifier is excluded from the module. It will not be included when the module is built, nor will it be considered to be part of the module, even if an ``umbrella`` header or directory would otherwise make it part of the module.
+A header with the ``exclude`` specifier is excluded from the module. It will not be included when the module is built, nor will it be considered to be part of the module, even if an ``umbrella`` directory would otherwise make it part of the module.
-**Example:** The C header ``assert.h`` is an excellent candidate for a textual header, because it is meant to be included multiple times (possibly with different ``NDEBUG`` settings). However, declarations within it should typically be split into a separate modular header.
+**Example:** A "X macro" header is an excellent candidate for a textual header, because it is can't be compiled standalone, and by itself does not contain any declarations.
.. parsed-literal::
- module std [system] {
- textual header "assert.h"
+ module MyLib [system] {
+ textual header "xmacros.h"
}
A given header shall not be referenced by more than one *header-declaration*.
diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst
index 61b5bab..10a8d09 100644
--- a/clang/docs/OpenMPSupport.rst
+++ b/clang/docs/OpenMPSupport.rst
@@ -1,662 +1,662 @@
-.. raw:: html
-
- <style type="text/css">
- .none { background-color: #FFCCCC }
- .part { background-color: #FFFF99 }
- .good { background-color: #CCFF99 }
- </style>
-
-.. role:: none
-.. role:: part
-.. role:: good
-
-.. contents::
- :local:
-
-==============
-OpenMP Support
-==============
-
-Clang fully supports OpenMP 4.5, almost all of 5.0 and most of 5.1/2.
-Clang supports offloading to X86_64, AArch64, PPC64[LE], NVIDIA GPUs (all models) and AMD GPUs (all models).
-
-In addition, the LLVM OpenMP runtime `libomp` supports the OpenMP Tools
-Interface (OMPT) on x86, x86_64, AArch64, and PPC64 on Linux, Windows, and macOS.
-OMPT is also supported for NVIDIA and AMD GPUs.
-
-For the list of supported features from OpenMP 5.0 and 5.1
-see `OpenMP implementation details`_ and `OpenMP 51 implementation details`_.
-
-General improvements
-====================
-- New collapse clause scheme to avoid expensive remainder operations.
- Compute loop index variables after collapsing a loop nest via the
- collapse clause by replacing the expensive remainder operation with
- multiplications and additions.
-
-- When using the collapse clause on a loop nest the default behavior
- is to automatically extend the representation of the loop counter to
- 64 bits for the cases where the sizes of the collapsed loops are not
- known at compile time. To prevent this conservative choice and use
- at most 32 bits, compile your program with the
- `-fopenmp-optimistic-collapse`.
-
-
-GPU devices support
-===================
-
-Data-sharing modes
-------------------
-
-Clang supports two data-sharing models for Cuda devices: `Generic` and `Cuda`
-modes. The default mode is `Generic`. `Cuda` mode can give an additional
-performance and can be activated using the `-fopenmp-cuda-mode` flag. In
-`Generic` mode all local variables that can be shared in the parallel regions
-are stored in the global memory. In `Cuda` mode local variables are not shared
-between the threads and it is user responsibility to share the required data
-between the threads in the parallel regions. Often, the optimizer is able to
-reduce the cost of `Generic` mode to the level of `Cuda` mode, but the flag,
-as well as other assumption flags, can be used for tuning.
-
-Features not supported or with limited support for Cuda devices
----------------------------------------------------------------
-
-- Cancellation constructs are not supported.
-
-- Doacross loop nest is not supported.
-
-- User-defined reductions are supported only for trivial types.
-
-- Nested parallelism: inner parallel regions are executed sequentially.
-
-- Debug information for OpenMP target regions is supported, but sometimes it may
- be required to manually specify the address class of the inspected variables.
- In some cases the local variables are actually allocated in the global memory,
- but the debug info may be not aware of it.
-
-
-.. _OpenMP implementation details:
-
-OpenMP 5.0 Implementation Details
-=================================
-
-The following table provides a quick overview over various OpenMP 5.0 features
-and their implementation status. Please post on the
-`Discourse forums (Runtimes - OpenMP category)`_ for more
-information or if you want to help with the
-implementation.
-
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-|Category | Feature | Status | Reviews |
-+==============================+==============================================================+==========================+=======================================================================+
-| loop | support != in the canonical loop form | :good:`done` | D54441 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| loop | #pragma omp loop (directive) | :part:`partial` | D145823 (combined forms) |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| loop | #pragma omp loop bind | :part:`worked on` | D144634 (needs review) |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| loop | collapse imperfectly nested loop | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| loop | collapse non-rectangular nested loop | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| loop | C++ range-base for loop | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| loop | clause: if for SIMD directives | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| loop | inclusive scan (matching C++17 PSTL) | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| memory management | memory allocators | :good:`done` | r341687,r357929 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| memory management | allocate directive and allocate clause | :good:`done` | r355614,r335952 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| OMPD | OMPD interfaces | :good:`done` | https://reviews.llvm.org/D99914 (Supports only HOST(CPU) and Linux |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| OMPT | OMPT interfaces (callback support) | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| thread affinity | thread affinity | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | taskloop reduction | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | task affinity | :part:`not upstream` | https://github.com/jklinkenberg/openmp/tree/task-affinity |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | clause: depend on the taskwait construct | :good:`done` | D113540 (regular codegen only) |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | depend objects and detachable tasks | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | mutexinoutset dependence-type for tasks | :good:`done` | D53380,D57576 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | combined taskloop constructs | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | master taskloop | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | parallel master taskloop | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | master taskloop simd | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | parallel master taskloop simd | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| SIMD | atomic and simd constructs inside SIMD code | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| SIMD | SIMD nontemporal | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | infer target functions from initializers | :part:`worked on` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | infer target variables from initializers | :good:`done` | D146418 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | OMP_TARGET_OFFLOAD environment variable | :good:`done` | D50522 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | support full 'defaultmap' functionality | :good:`done` | D69204 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | device specific functions | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | clause: device_type | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | clause: extended device | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | clause: uses_allocators clause | :good:`done` | https://github.com/llvm/llvm-project/pull/157025 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | clause: in_reduction | :part:`worked on` | r308768 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | omp_get_device_num() | :good:`done` | D54342,D128347 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | structure mapping of references | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | nested target declare | :good:`done` | D51378 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | implicitly map 'this' (this[:1]) | :good:`done` | D55982 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | allow access to the reference count (omp_target_is_present) | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | requires directive | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | clause: unified_shared_memory | :good:`done` | D52625,D52359 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | clause: unified_address | :part:`partial` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | clause: reverse_offload | :part:`partial` | D52780,D155003 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | clause: atomic_default_mem_order | :good:`done` | D53513 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | clause: dynamic_allocators | :part:`unclaimed parts` | D53079 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | user-defined mappers | :good:`done` | D56326,D58638,D58523,D58074,D60972,D59474 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | map array-section with implicit mapper | :good:`done` | https://github.com/llvm/llvm-project/pull/101101 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | mapping lambda expression | :good:`done` | D51107 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | clause: use_device_addr for target data | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | support close modifier on map clause | :good:`done` | D55719,D55892 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | teams construct on the host device | :good:`done` | r371553 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | support non-contiguous array sections for target update | :good:`done` | https://github.com/llvm/llvm-project/pull/144635 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | pointer attachment | :part:`being repaired` | @abhinavgaba (https://github.com/llvm/llvm-project/pull/153683) |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| atomic | hints for the atomic construct | :good:`done` | D51233 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| base language | C11 support | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| base language | C++11/14/17 support | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| base language | lambda support | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | array shaping | :good:`done` | D74144 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | library shutdown (omp_pause_resource[_all]) | :good:`done` | D55078 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | metadirectives | :part:`mostly done` | D91944, https://github.com/llvm/llvm-project/pull/128640 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | conditional modifier for lastprivate clause | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | iterator and multidependences | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | depobj directive and depobj dependency kind | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | user-defined function variants | :good:`done`. | D67294, D64095, D71847, D71830, D109635 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | pointer/reference to pointer based array reductions | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | prevent new type definitions in clauses | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| memory model | memory model update (seq_cst, acq_rel, release, acquire,...) | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-
-
-.. _OpenMP 51 implementation details:
-
-OpenMP 5.1 Implementation Details
-=================================
-
-The following table provides a quick overview over various OpenMP 5.1 features
-and their implementation status.
-Please post on the
-`Discourse forums (Runtimes - OpenMP category)`_ for more
-information or if you want to help with the
-implementation.
-
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-|Category | Feature | Status | Reviews |
-+==============================+==============================================================+==========================+=======================================================================+
-| atomic | 'compare' clause on atomic construct | :good:`done` | D120290, D120007, D118632, D120200, D116261, D118547, D116637 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| atomic | 'fail' clause on atomic construct | :part:`worked on` | D123235 (in progress) |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| base language | C++ attribute specifier syntax | :good:`done` | D105648 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | 'present' map type modifier | :good:`done` | D83061, D83062, D84422 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | 'present' motion modifier | :good:`done` | D84711, D84712 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | 'present' in defaultmap clause | :good:`done` | D92427 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | map clause reordering based on 'present' modifier | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | device-specific environment variables | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | omp_target_is_accessible routine | :good:`done` | https://github.com/llvm/llvm-project/pull/138294 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | omp_get_mapped_ptr routine | :good:`done` | D141545 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | new async target memory copy routines | :good:`done` | D136103 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | thread_limit clause on target construct | :part:`partial` | D141540 (offload), D152054 (host, in progress) |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | has_device_addr clause on target construct | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | iterators in map clause or motion clauses | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | indirect clause on declare target directive | :part:`In Progress` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | allow virtual functions calls for mapped object on device | :part:`partial` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | interop construct | :part:`partial` | parsing/sema done: D98558, D98834, D98815 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| device | assorted routines for querying interoperable properties | :part:`partial` | D106674 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| loop | Loop tiling transformation | :good:`done` | D76342 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| loop | Loop unrolling transformation | :good:`done` | D99459 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| loop | 'reproducible'/'unconstrained' modifiers in 'order' clause | :part:`partial` | D127855 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| memory management | alignment for allocate directive and clause | :good:`done` | D115683 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| memory management | 'allocator' modifier for allocate clause | :good:`done` | https://github.com/llvm/llvm-project/pull/114883 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| memory management | 'align' modifier for allocate clause | :good:`done` | https://github.com/llvm/llvm-project/pull/121814 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| memory management | new memory management routines | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| memory management | changes to omp_alloctrait_key enum | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| memory model | seq_cst clause on flush construct | :good:`done` | https://github.com/llvm/llvm-project/pull/114072 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | 'omp_all_memory' keyword and use in 'depend' clause | :good:`done` | D125828, D126321 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | error directive | :good:`done` | D139166 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | scope construct | :good:`done` | D157933, https://github.com/llvm/llvm-project/pull/109197 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | routines for controlling and querying team regions | :part:`partial` | D95003 (libomp only) |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | changes to ompt_scope_endpoint_t enum | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | omp_display_env routine | :good:`done` | D74956 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | extended OMP_PLACES syntax | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | OMP_NUM_TEAMS and OMP_TEAMS_THREAD_LIMIT env vars | :good:`done` | D138769 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | 'target_device' selector in context specifier | :none:`worked on` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | begin/end declare variant | :good:`done` | D71179 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | dispatch construct and function variant argument adjustment | :part:`worked on` | D99537, D99679 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | assumes directives | :part:`worked on` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | assume directive | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | nothing directive | :good:`done` | D123286 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | masked construct and related combined constructs | :good:`done` | D99995, D100514, PR-121741(parallel_masked_taskloop) |
-| | | | PR-121746(parallel_masked_task_loop_simd),PR-121914(masked_taskloop) |
-| | | | PR-121916(masked_taskloop_simd) |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| misc | default(firstprivate) & default(private) | :good:`done` | D75591 (firstprivate), D125912 (private) |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| other | deprecating master construct | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| OMPT | new barrier types added to ompt_sync_region_t enum | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| OMPT | async data transfers added to ompt_target_data_op_t enum | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| OMPT | new barrier state values added to ompt_state_t enum | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| OMPT | new 'emi' callbacks for external monitoring interfaces | :good:`done` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| OMPT | device tracing interface | :none:`in progress` | jplehr |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | 'strict' modifier for taskloop construct | :none:`unclaimed` | |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | inoutset in depend clause | :good:`done` | D97085, D118383 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-| task | nowait clause on taskwait | :part:`partial` | parsing/sema done: D131830, D141531 |
-+------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
-
-
-.. _OpenMP 5.2 implementation details:
-
-OpenMP 5.2 Implementation Details
-=================================
-
-The following table provides a quick overview of various OpenMP 5.2 features
-and their implementation status. Please post on the
-`Discourse forums (Runtimes - OpenMP category)`_ for more
-information or if you want to help with the
-implementation.
-
-
-
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-|Feature | C/C++ Status | Fortran Status | Reviews |
-+=============================================================+===========================+===========================+==========================================================================+
-| omp_in_explicit_task() | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| semantics of explicit_task_var and implicit_task_var | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| ompx sentinel for C/C++ directive extensions | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| ompx prefix for clause extensions | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| if clause on teams construct | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| step modifier added | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| declare mapper: Add iterator modifier on map clause | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| declare mapper: Add iterator modifier on map clause | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| memspace and traits modifiers to uses allocator i | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Add otherwise clause to metadirectives | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| doacross clause with support for omp_cur_iteration | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| position of interop_type in init clause on iterop | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| implicit map type for target enter/exit data | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| work OMPT type for work-sharing loop constructs | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| allocate and firstprivate on scope directive | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Change loop consistency for order clause | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Add memspace and traits modifiers to uses_allocators | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Keep original base pointer on map w/o matched candidate | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Pure procedure support for certain directives | :none:`N/A` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| ALLOCATE statement support for allocators | :none:`N/A` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| dispatch construct extension to support end directive | :none:`N/A` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-
-
-
-.. _OpenMP 5.2 Deprecations:
-
-OpenMP 5.2 Deprecations
-=======================
-
-
-
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| | C/C++ Status | Fortran Status | Reviews |
-+=============================================================+===========================+===========================+==========================================================================+
-| Linear clause syntax | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| The minus operator | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Map clause modifiers without commas | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| The use of allocate directives with ALLOCATE statement | :good:`N/A` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| uses_allocators list syntax | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| The default clause on metadirectives | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| The delimited form of the declare target directive | :none:`unclaimed` | :good:`N/A` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| The use of the to clause on the declare target directive | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| The syntax of the destroy clause on the depobj construct | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| keyword source and sink as task-dependence modifiers | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| interop types in any position on init clause of interop | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| ompd prefix usage for some ICVs | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-
-.. _OpenMP 6.0 implementation details:
-
-OpenMP 6.0 Implementation Details
-=================================
-
-The following table provides a quick overview of various OpenMP 6.0 features
-and their implementation status. Please post on the
-`Discourse forums (Runtimes - OpenMP category)`_ for more
-information or if you want to help with the
-implementation.
-
-
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-|Feature | C/C++ Status | Fortran Status | Reviews |
-+=============================================================+===========================+===========================+==========================================================================+
-| free-agent threads | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| threadset clause | :part:`in progress` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Recording of task graphs | :part:`in progress` | :part:`in progress` | clang: jtb20, flang: kparzysz |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Parallel inductions | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| init_complete for scan directive | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| loop interchange transformation | :good:`done` | :none:`unclaimed` | Clang (interchange): https://github.com/llvm/llvm-project/pull/93022 |
-| | | | Clang (permutation): https://github.com/llvm/llvm-project/pull/92030 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| loop reverse transformation | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/92916 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| loop stripe transformation | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/119891 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| loop fusion transformation | :part:`in progress` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/139293 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| loop index set splitting transformation | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| loop transformation apply clause | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| loop fuse transformation | :good:`done` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| workdistribute construct | | :none:`in progress` | @skc7, @mjklemm |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| task_iteration | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| memscope clause for atomic and flush | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| transparent clause (hull tasks) | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| rule-based compound directives | :part:`In Progress` | :part:`In Progress` | kparzysz |
-| | | | Testing for Fortran missing |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| C23, C++23 | :none:`unclaimed` | | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Fortran 2023 | | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| decl attribute for declarative directives | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| C attribute syntax | :none:`unclaimed` | | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| pure directives in DO CONCURRENT | | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Optional argument for all clauses | :none:`partial` | :none:`In Progress` | Parse/Sema (nowait): https://github.com/llvm/llvm-project/pull/159628 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Function references for locator list items | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| All clauses accept directive name modifier | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Extensions to depobj construct | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Extensions to atomic construct | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Private reductions | :good:`mostly` | :none:`unclaimed` | Parse/Sema:https://github.com/llvm/llvm-project/pull/129938 |
-| | | | Codegen: https://github.com/llvm/llvm-project/pull/134709 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Self maps | :part:`partial` | :none:`unclaimed` | parsing/sema done: https://github.com/llvm/llvm-project/pull/129888 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Release map type for declare mapper | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Extensions to interop construct | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| no_openmp_constructs | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/125933 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| safe_sync and progress with identifier and API | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| OpenMP directives in concurrent loop regions | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/125621 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| atomics constructs on concurrent loop regions | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/125621 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Loop construct with DO CONCURRENT | | :part:`In Progress` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| device_type clause for target construct | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| nowait for ancestor target directives | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| New API for devices' num_teams/thread_limit | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Host and device environment variables | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| num_threads ICV and clause accepts list | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Numeric names for environment variables | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Increment between places for OMP_PLACES | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| OMP_AVAILABLE_DEVICES envirable | :none:`unclaimed` | :none:`unclaimed` | (should wait for "Traits for default device envirable" being done) |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Traits for default device envirable | :part:`in progress` | :none:`unclaimed` | ro-i |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Optionally omit array length expression | :good:`done` | :none:`unclaimed` | (Parse) https://github.com/llvm/llvm-project/pull/148048, |
-| | | | (Sema) https://github.com/llvm/llvm-project/pull/152786 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Canonical loop sequences | :part:`in progress` | :part:`in progress` | Clang: https://github.com/llvm/llvm-project/pull/139293 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Clarifications to Fortran map semantics | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| default clause at target construct | :part:`In Progress` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| ref count update use_device_{ptr, addr} | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Clarifications to implicit reductions | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| ref modifier for map clauses | :part:`In Progress` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| map-type modifiers in arbitrary position | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/90499 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Lift nesting restriction on concurrent loop | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/125621 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| priority clause for target constructs | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| changes to target_data construct | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Non-const do_not_sync for nowait/nogroup | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| need_device_addr modifier for adjust_args clause | :part:`partial` | :none:`unclaimed` | Parsing/Sema: https://github.com/llvm/llvm-project/pull/143442 |
-| | | | https://github.com/llvm/llvm-project/pull/149586 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Prescriptive num_threads | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/160659 |
-| | | | https://github.com/llvm/llvm-project/pull/146403 |
-| | | | https://github.com/llvm/llvm-project/pull/146404 |
-| | | | https://github.com/llvm/llvm-project/pull/146405 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Message and severity clauses | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/146093 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Local clause on declare target | :part:`In Progress` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| groupprivate directive | :part:`In Progress` | :part:`partial` | Flang: kparzysz, mjklemm |
-| | | | |
-| | | | Flang parser: https://github.com/llvm/llvm-project/pull/153807 |
-| | | | Flang sema: https://github.com/llvm/llvm-project/pull/154779 |
-| | | | Clang parse/sema: https://github.com/llvm/llvm-project/pull/158134 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| variable-category on default clause | :good:`done` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| Changes to omp_target_is_accessible | :part:`In Progress` | :part:`In Progress` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| defaultmap implicit-behavior 'storage' | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/158336 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| defaultmap implicit-behavior 'private' | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/158712 |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-
-.. _OpenMP 6.1 implementation details:
-
-OpenMP 6.1 Implementation Details (Experimental)
-================================================
-
-The following table provides a quick overview over various OpenMP 6.1 features
-and their implementation status. Since OpenMP 6.1 has not yet been released, the
-following features are experimental and are subject to change at any time.
-Please post on the `Discourse forums (Runtimes - OpenMP category)`_ for more
-information or if you want to help with the
-implementation.
-
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-|Feature | C/C++ Status | Fortran Status | Reviews |
-+=============================================================+===========================+===========================+==========================================================================+
-| dyn_groupprivate clause | :part:`In Progress` | :part:`In Progress` | C/C++: kevinsala (https://github.com/llvm/llvm-project/pull/152651 |
-| | | | https://github.com/llvm/llvm-project/pull/152830 |
-| | | | https://github.com/llvm/llvm-project/pull/152831) |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| loop flatten transformation | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| loop grid/tile modifiers for sizes clause | :none:`unclaimed` | :none:`unclaimed` | |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-| attach map-type modifier | :part:`In Progress` | :none:`unclaimed` | C/C++: @abhinavgaba; |
-| | | | RT: @abhinavgaba (https://github.com/llvm/llvm-project/pull/149036, |
-| | | | https://github.com/llvm/llvm-project/pull/158370) |
-+-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
-
-
-OpenMP Extensions
-=================
-
-The following table provides a quick overview over various OpenMP
-extensions and their implementation status. These extensions are not
-currently defined by any standard, so links to associated LLVM
-documentation are provided. As these extensions mature, they will be
-considered for standardization. Please post on the
-`Discourse forums (Runtimes - OpenMP category)`_ to provide feedback.
-
-+------------------------------+-----------------------------------------------------------------------------------+--------------------------+--------------------------------------------------------+
-|Category | Feature | Status | Reviews |
-+==============================+===================================================================================+==========================+========================================================+
-| atomic extension | `'atomic' strictly nested within 'teams' | :good:`prototyped` | D126323 |
-| | <https://openmp.llvm.org/docs/openacc/OpenMPExtensions.html#atomicWithinTeams>`_ | | |
-+------------------------------+-----------------------------------------------------------------------------------+--------------------------+--------------------------------------------------------+
-| device extension | `'ompx_hold' map type modifier | :good:`prototyped` | D106509, D106510 |
-| | <https://openmp.llvm.org/docs/openacc/OpenMPExtensions.html#ompx-hold>`_ | | |
-+------------------------------+-----------------------------------------------------------------------------------+--------------------------+--------------------------------------------------------+
-| device extension | `'ompx_bare' clause on 'target teams' construct | :good:`prototyped` | #66844, #70612 |
-| | <https://www.osti.gov/servlets/purl/2205717>`_ | | |
-+------------------------------+-----------------------------------------------------------------------------------+--------------------------+--------------------------------------------------------+
-| device extension | Multi-dim 'num_teams' and 'thread_limit' clause on 'target teams ompx_bare' | :good:`partial` | #99732, #101407, #102715 |
-| | construct | | |
-+------------------------------+-----------------------------------------------------------------------------------+--------------------------+--------------------------------------------------------+
-
-.. _Discourse forums (Runtimes - OpenMP category): https://discourse.llvm.org/c/runtimes/openmp/35
+.. raw:: html
+
+ <style type="text/css">
+ .none { background-color: #FFCCCC }
+ .part { background-color: #FFFF99 }
+ .good { background-color: #CCFF99 }
+ </style>
+
+.. role:: none
+.. role:: part
+.. role:: good
+
+.. contents::
+ :local:
+
+==============
+OpenMP Support
+==============
+
+Clang fully supports OpenMP 4.5, almost all of 5.0 and most of 5.1/2.
+Clang supports offloading to X86_64, AArch64, PPC64[LE], NVIDIA GPUs (all models) and AMD GPUs (all models).
+
+In addition, the LLVM OpenMP runtime `libomp` supports the OpenMP Tools
+Interface (OMPT) on x86, x86_64, AArch64, and PPC64 on Linux, Windows, and macOS.
+OMPT is also supported for NVIDIA and AMD GPUs.
+
+For the list of supported features from OpenMP 5.0 and 5.1
+see `OpenMP implementation details`_ and `OpenMP 51 implementation details`_.
+
+General improvements
+====================
+- New collapse clause scheme to avoid expensive remainder operations.
+ Compute loop index variables after collapsing a loop nest via the
+ collapse clause by replacing the expensive remainder operation with
+ multiplications and additions.
+
+- When using the collapse clause on a loop nest the default behavior
+ is to automatically extend the representation of the loop counter to
+ 64 bits for the cases where the sizes of the collapsed loops are not
+ known at compile time. To prevent this conservative choice and use
+ at most 32 bits, compile your program with the
+ `-fopenmp-optimistic-collapse`.
+
+
+GPU devices support
+===================
+
+Data-sharing modes
+------------------
+
+Clang supports two data-sharing models for Cuda devices: `Generic` and `Cuda`
+modes. The default mode is `Generic`. `Cuda` mode can give an additional
+performance and can be activated using the `-fopenmp-cuda-mode` flag. In
+`Generic` mode all local variables that can be shared in the parallel regions
+are stored in the global memory. In `Cuda` mode local variables are not shared
+between the threads and it is user responsibility to share the required data
+between the threads in the parallel regions. Often, the optimizer is able to
+reduce the cost of `Generic` mode to the level of `Cuda` mode, but the flag,
+as well as other assumption flags, can be used for tuning.
+
+Features not supported or with limited support for Cuda devices
+---------------------------------------------------------------
+
+- Cancellation constructs are not supported.
+
+- Doacross loop nest is not supported.
+
+- User-defined reductions are supported only for trivial types.
+
+- Nested parallelism: inner parallel regions are executed sequentially.
+
+- Debug information for OpenMP target regions is supported, but sometimes it may
+ be required to manually specify the address class of the inspected variables.
+ In some cases the local variables are actually allocated in the global memory,
+ but the debug info may be not aware of it.
+
+
+.. _OpenMP implementation details:
+
+OpenMP 5.0 Implementation Details
+=================================
+
+The following table provides a quick overview over various OpenMP 5.0 features
+and their implementation status. Please post on the
+`Discourse forums (Runtimes - OpenMP category)`_ for more
+information or if you want to help with the
+implementation.
+
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+|Category | Feature | Status | Reviews |
++==============================+==============================================================+==========================+=======================================================================+
+| loop | support != in the canonical loop form | :good:`done` | D54441 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| loop | #pragma omp loop (directive) | :part:`partial` | D145823 (combined forms) |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| loop | #pragma omp loop bind | :part:`worked on` | D144634 (needs review) |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| loop | collapse imperfectly nested loop | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| loop | collapse non-rectangular nested loop | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| loop | C++ range-base for loop | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| loop | clause: if for SIMD directives | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| loop | inclusive scan (matching C++17 PSTL) | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| memory management | memory allocators | :good:`done` | r341687,r357929 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| memory management | allocate directive and allocate clause | :good:`done` | r355614,r335952 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| OMPD | OMPD interfaces | :good:`done` | https://reviews.llvm.org/D99914 (Supports only HOST(CPU) and Linux |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| OMPT | OMPT interfaces (callback support) | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| thread affinity | thread affinity | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | taskloop reduction | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | task affinity | :part:`not upstream` | https://github.com/jklinkenberg/openmp/tree/task-affinity |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | clause: depend on the taskwait construct | :good:`done` | D113540 (regular codegen only) |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | depend objects and detachable tasks | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | mutexinoutset dependence-type for tasks | :good:`done` | D53380,D57576 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | combined taskloop constructs | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | master taskloop | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | parallel master taskloop | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | master taskloop simd | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | parallel master taskloop simd | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| SIMD | atomic and simd constructs inside SIMD code | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| SIMD | SIMD nontemporal | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | infer target functions from initializers | :part:`worked on` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | infer target variables from initializers | :good:`done` | D146418 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | OMP_TARGET_OFFLOAD environment variable | :good:`done` | D50522 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | support full 'defaultmap' functionality | :good:`done` | D69204 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | device specific functions | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | clause: device_type | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | clause: extended device | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | clause: uses_allocators clause | :good:`done` | https://github.com/llvm/llvm-project/pull/157025 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | clause: in_reduction | :part:`worked on` | r308768 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | omp_get_device_num() | :good:`done` | D54342,D128347 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | structure mapping of references | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | nested target declare | :good:`done` | D51378 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | implicitly map 'this' (this[:1]) | :good:`done` | D55982 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | allow access to the reference count (omp_target_is_present) | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | requires directive | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | clause: unified_shared_memory | :good:`done` | D52625,D52359 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | clause: unified_address | :part:`partial` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | clause: reverse_offload | :part:`partial` | D52780,D155003 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | clause: atomic_default_mem_order | :good:`done` | D53513 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | clause: dynamic_allocators | :part:`unclaimed parts` | D53079 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | user-defined mappers | :good:`done` | D56326,D58638,D58523,D58074,D60972,D59474 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | map array-section with implicit mapper | :good:`done` | https://github.com/llvm/llvm-project/pull/101101 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | mapping lambda expression | :good:`done` | D51107 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | clause: use_device_addr for target data | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | support close modifier on map clause | :good:`done` | D55719,D55892 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | teams construct on the host device | :good:`done` | r371553 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | support non-contiguous array sections for target update | :good:`done` | https://github.com/llvm/llvm-project/pull/144635 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | pointer attachment | :part:`being repaired` | @abhinavgaba (https://github.com/llvm/llvm-project/pull/153683) |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| atomic | hints for the atomic construct | :good:`done` | D51233 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| base language | C11 support | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| base language | C++11/14/17 support | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| base language | lambda support | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | array shaping | :good:`done` | D74144 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | library shutdown (omp_pause_resource[_all]) | :good:`done` | D55078 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | metadirectives | :part:`mostly done` | D91944, https://github.com/llvm/llvm-project/pull/128640 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | conditional modifier for lastprivate clause | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | iterator and multidependences | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | depobj directive and depobj dependency kind | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | user-defined function variants | :good:`done`. | D67294, D64095, D71847, D71830, D109635 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | pointer/reference to pointer based array reductions | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | prevent new type definitions in clauses | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| memory model | memory model update (seq_cst, acq_rel, release, acquire,...) | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+
+
+.. _OpenMP 51 implementation details:
+
+OpenMP 5.1 Implementation Details
+=================================
+
+The following table provides a quick overview over various OpenMP 5.1 features
+and their implementation status.
+Please post on the
+`Discourse forums (Runtimes - OpenMP category)`_ for more
+information or if you want to help with the
+implementation.
+
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+|Category | Feature | Status | Reviews |
++==============================+==============================================================+==========================+=======================================================================+
+| atomic | 'compare' clause on atomic construct | :good:`done` | D120290, D120007, D118632, D120200, D116261, D118547, D116637 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| atomic | 'fail' clause on atomic construct | :part:`worked on` | D123235 (in progress) |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| base language | C++ attribute specifier syntax | :good:`done` | D105648 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | 'present' map type modifier | :good:`done` | D83061, D83062, D84422 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | 'present' motion modifier | :good:`done` | D84711, D84712 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | 'present' in defaultmap clause | :good:`done` | D92427 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | map clause reordering based on 'present' modifier | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | device-specific environment variables | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | omp_target_is_accessible routine | :good:`done` | https://github.com/llvm/llvm-project/pull/138294 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | omp_get_mapped_ptr routine | :good:`done` | D141545 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | new async target memory copy routines | :good:`done` | D136103 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | thread_limit clause on target construct | :part:`partial` | D141540 (offload), D152054 (host, in progress) |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | has_device_addr clause on target construct | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | iterators in map clause or motion clauses | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | indirect clause on declare target directive | :part:`In Progress` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | allow virtual functions calls for mapped object on device | :part:`partial` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | interop construct | :part:`partial` | parsing/sema done: D98558, D98834, D98815 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| device | assorted routines for querying interoperable properties | :part:`partial` | D106674 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| loop | Loop tiling transformation | :good:`done` | D76342 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| loop | Loop unrolling transformation | :good:`done` | D99459 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| loop | 'reproducible'/'unconstrained' modifiers in 'order' clause | :part:`partial` | D127855 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| memory management | alignment for allocate directive and clause | :good:`done` | D115683 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| memory management | 'allocator' modifier for allocate clause | :good:`done` | https://github.com/llvm/llvm-project/pull/114883 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| memory management | 'align' modifier for allocate clause | :good:`done` | https://github.com/llvm/llvm-project/pull/121814 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| memory management | new memory management routines | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| memory management | changes to omp_alloctrait_key enum | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| memory model | seq_cst clause on flush construct | :good:`done` | https://github.com/llvm/llvm-project/pull/114072 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | 'omp_all_memory' keyword and use in 'depend' clause | :good:`done` | D125828, D126321 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | error directive | :good:`done` | D139166 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | scope construct | :good:`done` | D157933, https://github.com/llvm/llvm-project/pull/109197 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | routines for controlling and querying team regions | :part:`partial` | D95003 (libomp only) |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | changes to ompt_scope_endpoint_t enum | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | omp_display_env routine | :good:`done` | D74956 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | extended OMP_PLACES syntax | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | OMP_NUM_TEAMS and OMP_TEAMS_THREAD_LIMIT env vars | :good:`done` | D138769 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | 'target_device' selector in context specifier | :none:`worked on` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | begin/end declare variant | :good:`done` | D71179 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | dispatch construct and function variant argument adjustment | :part:`worked on` | D99537, D99679 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | assumes directives | :part:`worked on` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | assume directive | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | nothing directive | :good:`done` | D123286 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | masked construct and related combined constructs | :good:`done` | D99995, D100514, PR-121741(parallel_masked_taskloop) |
+| | | | PR-121746(parallel_masked_task_loop_simd),PR-121914(masked_taskloop) |
+| | | | PR-121916(masked_taskloop_simd) |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| misc | default(firstprivate) & default(private) | :good:`done` | D75591 (firstprivate), D125912 (private) |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| other | deprecating master construct | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| OMPT | new barrier types added to ompt_sync_region_t enum | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| OMPT | async data transfers added to ompt_target_data_op_t enum | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| OMPT | new barrier state values added to ompt_state_t enum | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| OMPT | new 'emi' callbacks for external monitoring interfaces | :good:`done` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| OMPT | device tracing interface | :none:`in progress` | jplehr |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | 'strict' modifier for taskloop construct | :none:`unclaimed` | |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | inoutset in depend clause | :good:`done` | D97085, D118383 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+| task | nowait clause on taskwait | :part:`partial` | parsing/sema done: D131830, D141531 |
++------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+
+
+
+.. _OpenMP 5.2 implementation details:
+
+OpenMP 5.2 Implementation Details
+=================================
+
+The following table provides a quick overview of various OpenMP 5.2 features
+and their implementation status. Please post on the
+`Discourse forums (Runtimes - OpenMP category)`_ for more
+information or if you want to help with the
+implementation.
+
+
+
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+|Feature | C/C++ Status | Fortran Status | Reviews |
++=============================================================+===========================+===========================+==========================================================================+
+| omp_in_explicit_task() | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| semantics of explicit_task_var and implicit_task_var | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| ompx sentinel for C/C++ directive extensions | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| ompx prefix for clause extensions | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| if clause on teams construct | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| step modifier added | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| declare mapper: Add iterator modifier on map clause | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| declare mapper: Add iterator modifier on map clause | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| memspace and traits modifiers to uses allocator i | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Add otherwise clause to metadirectives | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| doacross clause with support for omp_cur_iteration | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| position of interop_type in init clause on iterop | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| implicit map type for target enter/exit data | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| work OMPT type for work-sharing loop constructs | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| allocate and firstprivate on scope directive | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Change loop consistency for order clause | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Add memspace and traits modifiers to uses_allocators | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Keep original base pointer on map w/o matched candidate | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Pure procedure support for certain directives | :none:`N/A` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| ALLOCATE statement support for allocators | :none:`N/A` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| dispatch construct extension to support end directive | :none:`N/A` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+
+
+
+.. _OpenMP 5.2 Deprecations:
+
+OpenMP 5.2 Deprecations
+=======================
+
+
+
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| | C/C++ Status | Fortran Status | Reviews |
++=============================================================+===========================+===========================+==========================================================================+
+| Linear clause syntax | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| The minus operator | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Map clause modifiers without commas | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| The use of allocate directives with ALLOCATE statement | :good:`N/A` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| uses_allocators list syntax | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| The default clause on metadirectives | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| The delimited form of the declare target directive | :none:`unclaimed` | :good:`N/A` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| The use of the to clause on the declare target directive | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| The syntax of the destroy clause on the depobj construct | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| keyword source and sink as task-dependence modifiers | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| interop types in any position on init clause of interop | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| ompd prefix usage for some ICVs | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+
+.. _OpenMP 6.0 implementation details:
+
+OpenMP 6.0 Implementation Details
+=================================
+
+The following table provides a quick overview of various OpenMP 6.0 features
+and their implementation status. Please post on the
+`Discourse forums (Runtimes - OpenMP category)`_ for more
+information or if you want to help with the
+implementation.
+
+
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+|Feature | C/C++ Status | Fortran Status | Reviews |
++=============================================================+===========================+===========================+==========================================================================+
+| free-agent threads | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| threadset clause | :part:`partial` | :none:`unclaimed` | Parse/Sema/Codegen : https://github.com/llvm/llvm-project/pull/13580 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Recording of task graphs | :part:`in progress` | :part:`in progress` | clang: jtb20, flang: kparzysz |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Parallel inductions | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| init_complete for scan directive | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| loop interchange transformation | :good:`done` | :none:`unclaimed` | Clang (interchange): https://github.com/llvm/llvm-project/pull/93022 |
+| | | | Clang (permutation): https://github.com/llvm/llvm-project/pull/92030 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| loop reverse transformation | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/92916 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| loop stripe transformation | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/119891 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| loop fusion transformation | :part:`in progress` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/139293 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| loop index set splitting transformation | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| loop transformation apply clause | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| loop fuse transformation | :good:`done` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| workdistribute construct | | :none:`in progress` | @skc7, @mjklemm |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| task_iteration | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| memscope clause for atomic and flush | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| transparent clause (hull tasks) | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| rule-based compound directives | :part:`In Progress` | :part:`In Progress` | kparzysz |
+| | | | Testing for Fortran missing |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| C23, C++23 | :none:`unclaimed` | | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Fortran 2023 | | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| decl attribute for declarative directives | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| C attribute syntax | :none:`unclaimed` | | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| pure directives in DO CONCURRENT | | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Optional argument for all clauses | :none:`partial` | :none:`In Progress` | Parse/Sema (nowait): https://github.com/llvm/llvm-project/pull/159628 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Function references for locator list items | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| All clauses accept directive name modifier | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Extensions to depobj construct | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Extensions to atomic construct | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Private reductions | :good:`mostly` | :none:`unclaimed` | Parse/Sema:https://github.com/llvm/llvm-project/pull/129938 |
+| | | | Codegen: https://github.com/llvm/llvm-project/pull/134709 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Self maps | :part:`partial` | :none:`unclaimed` | parsing/sema done: https://github.com/llvm/llvm-project/pull/129888 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Release map type for declare mapper | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Extensions to interop construct | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| no_openmp_constructs | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/125933 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| safe_sync and progress with identifier and API | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| OpenMP directives in concurrent loop regions | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/125621 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| atomics constructs on concurrent loop regions | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/125621 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Loop construct with DO CONCURRENT | | :part:`In Progress` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| device_type clause for target construct | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| nowait for ancestor target directives | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| New API for devices' num_teams/thread_limit | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Host and device environment variables | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| num_threads ICV and clause accepts list | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Numeric names for environment variables | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Increment between places for OMP_PLACES | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| OMP_AVAILABLE_DEVICES envirable | :none:`unclaimed` | :none:`unclaimed` | (should wait for "Traits for default device envirable" being done) |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Traits for default device envirable | :part:`in progress` | :none:`unclaimed` | ro-i |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Optionally omit array length expression | :good:`done` | :none:`unclaimed` | (Parse) https://github.com/llvm/llvm-project/pull/148048, |
+| | | | (Sema) https://github.com/llvm/llvm-project/pull/152786 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Canonical loop sequences | :part:`in progress` | :part:`in progress` | Clang: https://github.com/llvm/llvm-project/pull/139293 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Clarifications to Fortran map semantics | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| default clause at target construct | :part:`In Progress` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| ref count update use_device_{ptr, addr} | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Clarifications to implicit reductions | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| ref modifier for map clauses | :part:`In Progress` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| map-type modifiers in arbitrary position | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/90499 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Lift nesting restriction on concurrent loop | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/125621 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| priority clause for target constructs | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| changes to target_data construct | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Non-const do_not_sync for nowait/nogroup | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| need_device_addr modifier for adjust_args clause | :part:`partial` | :none:`unclaimed` | Parsing/Sema: https://github.com/llvm/llvm-project/pull/143442 |
+| | | | https://github.com/llvm/llvm-project/pull/149586 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Prescriptive num_threads | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/160659 |
+| | | | https://github.com/llvm/llvm-project/pull/146403 |
+| | | | https://github.com/llvm/llvm-project/pull/146404 |
+| | | | https://github.com/llvm/llvm-project/pull/146405 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Message and severity clauses | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/146093 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Local clause on declare target | :part:`In Progress` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| groupprivate directive | :part:`In Progress` | :part:`partial` | Flang: kparzysz, mjklemm |
+| | | | |
+| | | | Flang parser: https://github.com/llvm/llvm-project/pull/153807 |
+| | | | Flang sema: https://github.com/llvm/llvm-project/pull/154779 |
+| | | | Clang parse/sema: https://github.com/llvm/llvm-project/pull/158134 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| variable-category on default clause | :good:`done` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| Changes to omp_target_is_accessible | :part:`In Progress` | :part:`In Progress` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| defaultmap implicit-behavior 'storage' | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/158336 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| defaultmap implicit-behavior 'private' | :good:`done` | :none:`unclaimed` | https://github.com/llvm/llvm-project/pull/158712 |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+
+.. _OpenMP 6.1 implementation details:
+
+OpenMP 6.1 Implementation Details (Experimental)
+================================================
+
+The following table provides a quick overview over various OpenMP 6.1 features
+and their implementation status. Since OpenMP 6.1 has not yet been released, the
+following features are experimental and are subject to change at any time.
+Please post on the `Discourse forums (Runtimes - OpenMP category)`_ for more
+information or if you want to help with the
+implementation.
+
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+|Feature | C/C++ Status | Fortran Status | Reviews |
++=============================================================+===========================+===========================+==========================================================================+
+| dyn_groupprivate clause | :part:`In Progress` | :part:`In Progress` | C/C++: kevinsala (https://github.com/llvm/llvm-project/pull/152651 |
+| | | | https://github.com/llvm/llvm-project/pull/152830 |
+| | | | https://github.com/llvm/llvm-project/pull/152831) |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| loop flatten transformation | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| loop grid/tile modifiers for sizes clause | :none:`unclaimed` | :none:`unclaimed` | |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+| attach map-type modifier | :part:`In Progress` | :none:`unclaimed` | C/C++: @abhinavgaba; |
+| | | | RT: @abhinavgaba (https://github.com/llvm/llvm-project/pull/149036, |
+| | | | https://github.com/llvm/llvm-project/pull/158370) |
++-------------------------------------------------------------+---------------------------+---------------------------+--------------------------------------------------------------------------+
+
+
+OpenMP Extensions
+=================
+
+The following table provides a quick overview over various OpenMP
+extensions and their implementation status. These extensions are not
+currently defined by any standard, so links to associated LLVM
+documentation are provided. As these extensions mature, they will be
+considered for standardization. Please post on the
+`Discourse forums (Runtimes - OpenMP category)`_ to provide feedback.
+
++------------------------------+-----------------------------------------------------------------------------------+--------------------------+--------------------------------------------------------+
+|Category | Feature | Status | Reviews |
++==============================+===================================================================================+==========================+========================================================+
+| atomic extension | `'atomic' strictly nested within 'teams' | :good:`prototyped` | D126323 |
+| | <https://openmp.llvm.org/docs/openacc/OpenMPExtensions.html#atomicWithinTeams>`_ | | |
++------------------------------+-----------------------------------------------------------------------------------+--------------------------+--------------------------------------------------------+
+| device extension | `'ompx_hold' map type modifier | :good:`prototyped` | D106509, D106510 |
+| | <https://openmp.llvm.org/docs/openacc/OpenMPExtensions.html#ompx-hold>`_ | | |
++------------------------------+-----------------------------------------------------------------------------------+--------------------------+--------------------------------------------------------+
+| device extension | `'ompx_bare' clause on 'target teams' construct | :good:`prototyped` | #66844, #70612 |
+| | <https://www.osti.gov/servlets/purl/2205717>`_ | | |
++------------------------------+-----------------------------------------------------------------------------------+--------------------------+--------------------------------------------------------+
+| device extension | Multi-dim 'num_teams' and 'thread_limit' clause on 'target teams ompx_bare' | :good:`partial` | #99732, #101407, #102715 |
+| | construct | | |
++------------------------------+-----------------------------------------------------------------------------------+--------------------------+--------------------------------------------------------+
+
+.. _Discourse forums (Runtimes - OpenMP category): https://discourse.llvm.org/c/runtimes/openmp/35
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index add1582..8435f36 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -661,6 +661,7 @@ OpenMP Support
modifier in the ``adjust_args`` clause.
- Allow array length to be omitted in array section subscript expression.
- Fixed non-contiguous strided update in the ``omp target update`` directive with the ``from`` clause.
+- Added support for threadset clause in task and taskloop directives.
- Properly handle array section/assumed-size array privatization in C/C++.
- Added support to handle new syntax of the ``uses_allocators`` clause.
- Added support for ``variable-category`` modifier in ``default clause``.
diff --git a/clang/include/clang/AST/OpenMPClause.h b/clang/include/clang/AST/OpenMPClause.h
index bc791e4..4f50748 100644
--- a/clang/include/clang/AST/OpenMPClause.h
+++ b/clang/include/clang/AST/OpenMPClause.h
@@ -1424,6 +1424,86 @@ public:
}
};
+/// This represents 'threadset' clause in the '#pragma omp task ...' directive.
+///
+/// \code
+/// #pragma omp task threadset(omp_pool)
+/// \endcode
+/// In this example directive '#pragma omp task' has simple 'threadset'
+/// clause with kind 'omp_pool'.
+class OMPThreadsetClause final : public OMPClause {
+ friend class OMPClauseReader;
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// A kind of the 'threadset' clause.
+ OpenMPThreadsetKind Kind = OMPC_THREADSET_unknown;
+
+ /// Start location of the kind in source code.
+ SourceLocation KindLoc;
+
+ /// Set kind of the clauses.
+ ///
+ /// \param K Argument of clause.
+ void setThreadsetKind(OpenMPThreadsetKind K) { Kind = K; }
+
+ /// Set argument location.
+ ///
+ /// \param KLoc Argument location.
+ void setThreadsetKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
+
+public:
+ /// Build 'threadset' clause with argument \a A ('omp_team' or 'omp_pool').
+ ///
+ /// \param A Argument of the clause ('omp_team' or 'omp_pool').
+ /// \param ALoc Starting location of the argument.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPThreadsetClause(OpenMPThreadsetKind A, SourceLocation ALoc,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_threadset, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), Kind(A), KindLoc(ALoc) {}
+
+ /// Build an empty clause.
+ OMPThreadsetClause()
+ : OMPClause(llvm::omp::OMPC_threadset, SourceLocation(),
+ SourceLocation()) {}
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns kind of the clause.
+ OpenMPThreadsetKind getThreadsetKind() const { return Kind; }
+
+ /// Returns location of clause kind.
+ SourceLocation getThreadsetKindLoc() const { return KindLoc; }
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_threadset;
+ }
+};
+
/// This represents 'proc_bind' clause in the '#pragma omp ...'
/// directive.
///
diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h
index 32b2b6b..8cb0a65 100644
--- a/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -3524,6 +3524,12 @@ bool RecursiveASTVisitor<Derived>::VisitOMPDefaultClause(OMPDefaultClause *) {
}
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPThreadsetClause(
+ OMPThreadsetClause *) {
+ return true;
+}
+
+template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOMPProcBindClause(OMPProcBindClause *) {
return true;
}
diff --git a/clang/include/clang/Basic/Builtins.def b/clang/include/clang/Basic/Builtins.def
index b856ad1..3a5b72e 100644
--- a/clang/include/clang/Basic/Builtins.def
+++ b/clang/include/clang/Basic/Builtins.def
@@ -43,6 +43,7 @@
// SJ -> sigjmp_buf
// K -> ucontext_t
// p -> pid_t
+// e -> _Float16 for HIP/C++ and __fp16 for OpenCL
// . -> "...". This may only occur at the end of the function list.
//
// Types may be prefixed with the following modifiers:
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index f265d82..36cb527 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -967,6 +967,47 @@ TARGET_BUILTIN(__builtin_amdgcn_image_sample_3d_v4f32_f32, "V4fifffQtV4ibii", "n
TARGET_BUILTIN(__builtin_amdgcn_image_sample_3d_v4f16_f32, "V4hifffQtV4ibii", "nc", "image-insts")
TARGET_BUILTIN(__builtin_amdgcn_image_sample_cube_v4f32_f32, "V4fifffQtV4ibii", "nc", "image-insts")
TARGET_BUILTIN(__builtin_amdgcn_image_sample_cube_v4f16_f32, "V4hifffQtV4ibii", "nc", "image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_1d_v4f32_f32, "V4fifQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_1d_v4f16_f32, "V4eifQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_1darray_v4f32_f32, "V4fiffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_1darray_v4f16_f32, "V4eiffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_2d_f32_f32, "fiffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_2d_v4f32_f32, "V4fiffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_2d_v4f16_f32, "V4eiffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_2darray_f32_f32, "fifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_2darray_v4f32_f32, "V4fifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_2darray_v4f16_f32, "V4eifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_3d_v4f32_f32, "V4fifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_3d_v4f16_f32, "V4eifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_cube_v4f32_f32, "V4fifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_lz_cube_v4f16_f32, "V4eifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_1d_v4f32_f32, "V4fiffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_1d_v4f16_f32, "V4eiffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_1darray_v4f32_f32, "V4fifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_1darray_v4f16_f32, "V4eifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_2d_f32_f32, "fifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_2d_v4f32_f32, "V4fifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_2d_v4f16_f32, "V4eifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_2darray_f32_f32, "fiffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_2darray_v4f32_f32, "V4fiffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_2darray_v4f16_f32, "V4eiffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_3d_v4f32_f32, "V4fiffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_3d_v4f16_f32, "V4eiffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_cube_v4f32_f32, "V4fiffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_l_cube_v4f16_f32, "V4eiffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_d_1d_v4f32_f32, "V4fifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_d_1d_v4f16_f32, "V4eifffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_d_1darray_v4f32_f32, "V4fiffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_d_1darray_v4f16_f32, "V4eiffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_d_2d_f32_f32, "fiffffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_d_2d_v4f32_f32, "V4fiffffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_d_2d_v4f16_f32, "V4eiffffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_d_2darray_f32_f32, "fifffffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_d_2darray_v4f32_f32, "V4fifffffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_d_2darray_v4f16_f32, "V4eifffffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_d_3d_v4f32_f32, "V4fifffffffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_sample_d_3d_v4f16_f32, "V4eifffffffffQtV4ibii", "nc", "extended-image-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32, "V4fiffQtV4ibii", "nc", "extended-image-insts")
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/clang/include/clang/Basic/OpenMPKinds.def b/clang/include/clang/Basic/OpenMPKinds.def
index 202d06f..328a0747 100644
--- a/clang/include/clang/Basic/OpenMPKinds.def
+++ b/clang/include/clang/Basic/OpenMPKinds.def
@@ -98,6 +98,9 @@
#ifndef OPENMP_ALLOCATE_MODIFIER
#define OPENMP_ALLOCATE_MODIFIER(Name)
#endif
+#ifndef OPENMP_THREADSET_KIND
+#define OPENMP_THREADSET_KIND(Name)
+#endif
// Static attributes for 'schedule' clause.
OPENMP_SCHEDULE_KIND(static)
@@ -255,6 +258,9 @@ OPENMP_DOACROSS_MODIFIER(sink)
OPENMP_DOACROSS_MODIFIER(sink_omp_cur_iteration)
OPENMP_DOACROSS_MODIFIER(source_omp_cur_iteration)
+OPENMP_THREADSET_KIND(omp_pool)
+OPENMP_THREADSET_KIND(omp_team)
+
#undef OPENMP_NUMTASKS_MODIFIER
#undef OPENMP_NUMTHREADS_MODIFIER
#undef OPENMP_GRAINSIZE_MODIFIER
@@ -284,4 +290,4 @@ OPENMP_DOACROSS_MODIFIER(source_omp_cur_iteration)
#undef OPENMP_DEFAULTMAP_MODIFIER
#undef OPENMP_DOACROSS_MODIFIER
#undef OPENMP_ALLOCATE_MODIFIER
-
+#undef OPENMP_THREADSET_KIND
diff --git a/clang/include/clang/Basic/OpenMPKinds.h b/clang/include/clang/Basic/OpenMPKinds.h
index ed89a31..c9ddbcd 100644
--- a/clang/include/clang/Basic/OpenMPKinds.h
+++ b/clang/include/clang/Basic/OpenMPKinds.h
@@ -250,6 +250,13 @@ enum OpenMPAllocateClauseModifier {
OMPC_ALLOCATE_unknown
};
+/// OpenMP modifiers for 'threadset' clause.
+enum OpenMPThreadsetKind {
+#define OPENMP_THREADSET_KIND(Name) OMPC_THREADSET_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_THREADSET_unknown
+};
+
/// Number of allowed allocate-modifiers.
static constexpr unsigned NumberOfOMPAllocateClauseModifiers =
OMPC_ALLOCATE_unknown;
diff --git a/clang/include/clang/Sema/SemaOpenMP.h b/clang/include/clang/Sema/SemaOpenMP.h
index f9baeed..ba12b40 100644
--- a/clang/include/clang/Sema/SemaOpenMP.h
+++ b/clang/include/clang/Sema/SemaOpenMP.h
@@ -975,6 +975,12 @@ public:
OpenMPDefaultClauseVariableCategory VCKind,
SourceLocation VCKindLoc, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
+ /// Called on well-formed 'threadset' clause.
+ OMPClause *ActOnOpenMPThreadsetClause(OpenMPThreadsetKind Kind,
+ SourceLocation KindLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 687cd46..2669f62 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -12403,6 +12403,11 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
// Read the base type.
switch (*Str++) {
default: llvm_unreachable("Unknown builtin type letter!");
+ case 'e':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'e'!");
+ Type = Context.getLangOpts().OpenCL ? Context.HalfTy : Context.Float16Ty;
+ break;
case 'x':
assert(HowLong == 0 && !Signed && !Unsigned &&
"Bad modifiers used with 'x'!");
diff --git a/clang/lib/AST/OpenMPClause.cpp b/clang/lib/AST/OpenMPClause.cpp
index 791df7e..59d9459 100644
--- a/clang/lib/AST/OpenMPClause.cpp
+++ b/clang/lib/AST/OpenMPClause.cpp
@@ -124,6 +124,7 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_nowait:
case OMPC_untied:
case OMPC_mergeable:
+ case OMPC_threadset:
case OMPC_threadprivate:
case OMPC_groupprivate:
case OMPC_flush:
@@ -2035,6 +2036,13 @@ void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) {
OS << ")";
}
+void OMPClausePrinter::VisitOMPThreadsetClause(OMPThreadsetClause *Node) {
+ OS << "threadset("
+ << getOpenMPSimpleClauseTypeName(OMPC_threadset,
+ unsigned(Node->getThreadsetKind()))
+ << ")";
+}
+
void OMPClausePrinter::VisitOMPProcBindClause(OMPProcBindClause *Node) {
OS << "proc_bind("
<< getOpenMPSimpleClauseTypeName(OMPC_proc_bind,
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index 05b64cc..c909e1b 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -546,6 +546,8 @@ void OMPClauseProfiler::VisitOMPNocontextClause(const OMPNocontextClause *C) {
void OMPClauseProfiler::VisitOMPDefaultClause(const OMPDefaultClause *C) { }
+void OMPClauseProfiler::VisitOMPThreadsetClause(const OMPThreadsetClause *C) {}
+
void OMPClauseProfiler::VisitOMPProcBindClause(const OMPProcBindClause *C) { }
void OMPClauseProfiler::VisitOMPUnifiedAddressClause(
diff --git a/clang/lib/Basic/OpenMPKinds.cpp b/clang/lib/Basic/OpenMPKinds.cpp
index 64b2bff..3d41f2d 100644
--- a/clang/lib/Basic/OpenMPKinds.cpp
+++ b/clang/lib/Basic/OpenMPKinds.cpp
@@ -210,6 +210,15 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
#define OPENMP_ALLOCATE_MODIFIER(Name) .Case(#Name, OMPC_ALLOCATE_##Name)
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_ALLOCATE_unknown);
+ case OMPC_threadset: {
+ unsigned Type = llvm::StringSwitch<unsigned>(Str)
+#define OPENMP_THREADSET_KIND(Name) .Case(#Name, OMPC_THREADSET_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_THREADSET_unknown);
+ if (LangOpts.OpenMP < 60)
+ return OMPC_THREADSET_unknown;
+ return Type;
+ }
case OMPC_num_threads: {
unsigned Type = llvm::StringSwitch<unsigned>(Str)
#define OPENMP_NUMTHREADS_MODIFIER(Name) .Case(#Name, OMPC_NUMTHREADS_##Name)
@@ -565,6 +574,16 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
#include "clang/Basic/OpenMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'num_threads' clause modifier");
+ case OMPC_threadset:
+ switch (Type) {
+ case OMPC_THREADSET_unknown:
+ return "unknown";
+#define OPENMP_THREADSET_KIND(Name) \
+ case OMPC_THREADSET_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'threadset' clause modifier");
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_groupprivate:
diff --git a/clang/lib/Basic/Targets/AMDGPU.cpp b/clang/lib/Basic/Targets/AMDGPU.cpp
index d4de704..d4d696b 100644
--- a/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -356,12 +356,6 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
if (hasFastFMA())
Builder.defineMacro("FP_FAST_FMA");
- Builder.defineMacro("__AMDGCN_WAVEFRONT_SIZE__", Twine(WavefrontSize),
- "compile-time-constant access to the wavefront size will "
- "be removed in a future release");
- Builder.defineMacro("__AMDGCN_WAVEFRONT_SIZE", Twine(WavefrontSize),
- "compile-time-constant access to the wavefront size will "
- "be removed in a future release");
Builder.defineMacro("__AMDGCN_CUMODE__", Twine(CUMode));
}
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 07a2cfb..ca579c9 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -345,7 +345,7 @@ void CGDebugInfo::setLocation(SourceLocation Loc) {
if (Loc.isInvalid())
return;
- CurLoc = CGM.getContext().getSourceManager().getExpansionLoc(Loc);
+ CurLoc = CGM.getContext().getSourceManager().getFileLoc(Loc);
// If we've changed files in the middle of a lexical scope go ahead
// and create a new lexical scope with file node if it's different
@@ -572,7 +572,7 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
FileName = TheCU->getFile()->getFilename();
CSInfo = TheCU->getFile()->getChecksum();
} else {
- PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ PresumedLoc PLoc = SM.getPresumedLoc(SM.getFileLoc(Loc));
FileName = PLoc.getFilename();
if (FileName.empty()) {
@@ -599,7 +599,8 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (CSKind)
CSInfo.emplace(*CSKind, Checksum);
}
- return createFile(FileName, CSInfo, getSource(SM, SM.getFileID(Loc)));
+ return createFile(FileName, CSInfo,
+ getSource(SM, SM.getFileID(SM.getFileLoc(Loc))));
}
llvm::DIFile *CGDebugInfo::createFile(
@@ -654,7 +655,7 @@ unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
if (Loc.isInvalid())
return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
- return SM.getPresumedLoc(Loc).getLine();
+ return SM.getPresumedLoc(SM.getFileLoc(Loc)).getLine();
}
unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc, bool Force) {
@@ -666,7 +667,8 @@ unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc, bool Force) {
if (Loc.isInvalid() && CurLoc.isInvalid())
return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
- PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ PresumedLoc PLoc =
+ SM.getPresumedLoc(Loc.isValid() ? SM.getFileLoc(Loc) : CurLoc);
return PLoc.isValid() ? PLoc.getColumn() : 0;
}
@@ -1174,7 +1176,10 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
}
llvm::DIType *CGDebugInfo::CreateType(const BitIntType *Ty) {
- StringRef Name = Ty->isUnsigned() ? "unsigned _BitInt" : "_BitInt";
+ SmallString<32> Name;
+ llvm::raw_svector_ostream OS(Name);
+ OS << (Ty->isUnsigned() ? "unsigned _BitInt(" : "_BitInt(")
+ << Ty->getNumBits() << ")";
llvm::dwarf::TypeKind Encoding = Ty->isUnsigned()
? llvm::dwarf::DW_ATE_unsigned
: llvm::dwarf::DW_ATE_signed;
@@ -4999,7 +5004,7 @@ void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
// Update our current location
setLocation(Loc);
- if (CurLoc.isInvalid() || CurLoc.isMacroID() || LexicalBlockStack.empty())
+ if (CurLoc.isInvalid() || LexicalBlockStack.empty())
return;
llvm::MDNode *Scope = LexicalBlockStack.back();
@@ -6275,7 +6280,8 @@ void CGDebugInfo::EmitGlobalAlias(const llvm::GlobalValue *GV,
void CGDebugInfo::AddStringLiteralDebugInfo(llvm::GlobalVariable *GV,
const StringLiteral *S) {
SourceLocation Loc = S->getStrTokenLoc(0);
- PresumedLoc PLoc = CGM.getContext().getSourceManager().getPresumedLoc(Loc);
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(SM.getFileLoc(Loc));
if (!PLoc.isValid())
return;
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 66fea92..121de42 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -3731,6 +3731,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
DestructorsFlag = 0x8,
PriorityFlag = 0x20,
DetachableFlag = 0x40,
+ FreeAgentFlag = 0x80,
};
unsigned Flags = Data.Tied ? TiedFlag : 0;
bool NeedsCleanup = false;
@@ -3740,6 +3741,11 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
if (NeedsCleanup)
Flags = Flags | DestructorsFlag;
}
+ if (const auto *Clause = D.getSingleClause<OMPThreadsetClause>()) {
+ OpenMPThreadsetKind Kind = Clause->getThreadsetKind();
+ if (Kind == OMPC_THREADSET_omp_pool)
+ Flags = Flags | FreeAgentFlag;
+ }
if (Data.Priority.getInt())
Flags = Flags | PriorityFlag;
if (D.hasClausesOfKind<OMPDetachClause>())
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index f49a5af..9eab709 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -647,8 +647,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_ballot_w64: {
llvm::Type *ResultType = ConvertType(E->getType());
llvm::Value *Src = EmitScalarExpr(E->getArg(0));
- Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ballot, { ResultType });
- return Builder.CreateCall(F, { Src });
+ Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ballot, {ResultType});
+ return Builder.CreateCall(F, {Src});
}
case AMDGPU::BI__builtin_amdgcn_inverse_ballot_w32:
case AMDGPU::BI__builtin_amdgcn_inverse_ballot_w64: {
@@ -1139,6 +1139,83 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f16_f32:
return emitAMDGCNImageOverloadedReturnType(
*this, E, Intrinsic::amdgcn_image_sample_cube, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_1d_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_1d_v4f16_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_lz_1d, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_1d_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_1d_v4f16_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_l_1d, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_1d_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_1d_v4f16_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_d_1d, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_v4f16_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_f32_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_lz_2d, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_v4f16_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_f32_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_l_2d, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_v4f16_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_f32_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_d_2d, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_3d_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_3d_v4f16_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_lz_3d, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_3d_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_3d_v4f16_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_l_3d, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_3d_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_3d_v4f16_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_d_3d, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_cube_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_cube_v4f16_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_lz_cube, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_cube_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_cube_v4f16_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_l_cube, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_1darray_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_1darray_v4f16_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_lz_1darray, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_1darray_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_1darray_v4f16_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_l_1darray, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_1darray_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_1darray_v4f16_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_d_1darray, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_v4f16_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_f32_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_lz_2darray, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_v4f16_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_f32_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_l_2darray, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_v4f32_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_v4f16_f32:
+ case clang::AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_f32_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_sample_d_2darray, false);
+ case clang::AMDGPU::BI__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32:
+ return emitAMDGCNImageOverloadedReturnType(
+ *this, E, Intrinsic::amdgcn_image_gather4_lz_2d, false);
case AMDGPU::BI__builtin_amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
case AMDGPU::BI__builtin_amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
llvm::FixedVectorType *VT = FixedVectorType::get(Builder.getInt32Ty(), 8);
diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
index 60f9b86..15fa78d 100644
--- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
@@ -1193,14 +1193,22 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddv_s16, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddv_s32, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddv_s8, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddv_u16, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddv_u32, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddv_u8, vector_reduce_add, Add1ArgType),
NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_s16, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_s32, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_s64, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_s8, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_u16, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_u32, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_u64, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_u8, vector_reduce_add, Add1ArgType),
NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
@@ -1243,27 +1251,43 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxv_s16, vector_reduce_smax, Add1ArgType),
+ NEONMAP1(vmaxv_s32, vector_reduce_smax, Add1ArgType),
+ NEONMAP1(vmaxv_s8, vector_reduce_smax, Add1ArgType),
+ NEONMAP1(vmaxv_u16, vector_reduce_umax, Add1ArgType),
+ NEONMAP1(vmaxv_u32, vector_reduce_umax, Add1ArgType),
+ NEONMAP1(vmaxv_u8, vector_reduce_umax, Add1ArgType),
NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxvq_s16, vector_reduce_smax, Add1ArgType),
+ NEONMAP1(vmaxvq_s32, vector_reduce_smax, Add1ArgType),
+ NEONMAP1(vmaxvq_s8, vector_reduce_smax, Add1ArgType),
+ NEONMAP1(vmaxvq_u16, vector_reduce_umax, Add1ArgType),
+ NEONMAP1(vmaxvq_u32, vector_reduce_umax, Add1ArgType),
+ NEONMAP1(vmaxvq_u8, vector_reduce_umax, Add1ArgType),
NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
- NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminv_s16, vector_reduce_smin, Add1ArgType),
+ NEONMAP1(vminv_s32, vector_reduce_smin, Add1ArgType),
+ NEONMAP1(vminv_s8, vector_reduce_smin, Add1ArgType),
+ NEONMAP1(vminv_u16, vector_reduce_umin, Add1ArgType),
+ NEONMAP1(vminv_u32, vector_reduce_umin, Add1ArgType),
+ NEONMAP1(vminv_u8, vector_reduce_umin, Add1ArgType),
NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
- NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminvq_s16, vector_reduce_smin, Add1ArgType),
+ NEONMAP1(vminvq_s32, vector_reduce_smin, Add1ArgType),
+ NEONMAP1(vminvq_s8, vector_reduce_smin, Add1ArgType),
+ NEONMAP1(vminvq_u16, vector_reduce_umin, Add1ArgType),
+ NEONMAP1(vminvq_u32, vector_reduce_umin, Add1ArgType),
+ NEONMAP1(vminvq_u8, vector_reduce_umin, Add1ArgType),
NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
- NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vpaddd_s64, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vpaddd_u64, vector_reduce_add, Add1ArgType),
NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
@@ -7067,127 +7091,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::bitreverse;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
}
- case NEON::BI__builtin_neon_vaddv_u8:
- // FIXME: These are handled by the AArch64 scalar code.
- usgn = true;
- [[fallthrough]];
- case NEON::BI__builtin_neon_vaddv_s8: {
- Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vaddv_u16:
- usgn = true;
- [[fallthrough]];
- case NEON::BI__builtin_neon_vaddv_s16: {
- Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vaddvq_u8:
- usgn = true;
- [[fallthrough]];
- case NEON::BI__builtin_neon_vaddvq_s8: {
- Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vaddvq_u16:
- usgn = true;
- [[fallthrough]];
- case NEON::BI__builtin_neon_vaddvq_s16: {
- Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_u8: {
- Int = Intrinsic::aarch64_neon_umaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_u16: {
- Int = Intrinsic::aarch64_neon_umaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxvq_u8: {
- Int = Intrinsic::aarch64_neon_umaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vmaxvq_u16: {
- Int = Intrinsic::aarch64_neon_umaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_s8: {
- Int = Intrinsic::aarch64_neon_smaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_s16: {
- Int = Intrinsic::aarch64_neon_smaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxvq_s8: {
- Int = Intrinsic::aarch64_neon_smaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vmaxvq_s16: {
- Int = Intrinsic::aarch64_neon_smaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
case NEON::BI__builtin_neon_vmaxv_f16: {
Int = Intrinsic::aarch64_neon_fmaxv;
Ty = HalfTy;
@@ -7206,78 +7109,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
- case NEON::BI__builtin_neon_vminv_u8: {
- Int = Intrinsic::aarch64_neon_uminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vminv_u16: {
- Int = Intrinsic::aarch64_neon_uminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vminvq_u8: {
- Int = Intrinsic::aarch64_neon_uminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vminvq_u16: {
- Int = Intrinsic::aarch64_neon_uminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vminv_s8: {
- Int = Intrinsic::aarch64_neon_sminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vminv_s16: {
- Int = Intrinsic::aarch64_neon_sminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vminvq_s8: {
- Int = Intrinsic::aarch64_neon_sminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vminvq_s16: {
- Int = Intrinsic::aarch64_neon_sminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
case NEON::BI__builtin_neon_vminv_f16: {
Int = Intrinsic::aarch64_neon_fminv;
Ty = HalfTy;
diff --git a/clang/lib/Driver/ToolChains/ZOS.cpp b/clang/lib/Driver/ToolChains/ZOS.cpp
index 57bcb3c..9a3c453 100644
--- a/clang/lib/Driver/ToolChains/ZOS.cpp
+++ b/clang/lib/Driver/ToolChains/ZOS.cpp
@@ -75,7 +75,7 @@ void zos::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
static std::string getLEHLQ(const ArgList &Args) {
@@ -213,7 +213,7 @@ void zos::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs));
+ Exec, CmdArgs, Inputs, Output));
}
ToolChain::RuntimeLibType ZOS::GetDefaultRuntimeLibType() const {
diff --git a/clang/lib/Frontend/TextDiagnostic.cpp b/clang/lib/Frontend/TextDiagnostic.cpp
index f5add2a..aea3e72 100644
--- a/clang/lib/Frontend/TextDiagnostic.cpp
+++ b/clang/lib/Frontend/TextDiagnostic.cpp
@@ -22,22 +22,16 @@
using namespace clang;
-static const enum raw_ostream::Colors noteColor = raw_ostream::CYAN;
-static const enum raw_ostream::Colors remarkColor =
- raw_ostream::BLUE;
-static const enum raw_ostream::Colors fixitColor =
- raw_ostream::GREEN;
-static const enum raw_ostream::Colors caretColor =
- raw_ostream::GREEN;
-static const enum raw_ostream::Colors warningColor =
- raw_ostream::MAGENTA;
-static const enum raw_ostream::Colors templateColor =
- raw_ostream::CYAN;
-static const enum raw_ostream::Colors errorColor = raw_ostream::RED;
-static const enum raw_ostream::Colors fatalColor = raw_ostream::RED;
+static constexpr raw_ostream::Colors NoteColor = raw_ostream::CYAN;
+static constexpr raw_ostream::Colors RemarkColor = raw_ostream::BLUE;
+static constexpr raw_ostream::Colors FixitColor = raw_ostream::GREEN;
+static constexpr raw_ostream::Colors CaretColor = raw_ostream::GREEN;
+static constexpr raw_ostream::Colors WarningColor = raw_ostream::MAGENTA;
+static constexpr raw_ostream::Colors TemplateColor = raw_ostream::CYAN;
+static constexpr raw_ostream::Colors ErrorColor = raw_ostream::RED;
+static constexpr raw_ostream::Colors FatalColor = raw_ostream::RED;
// Used for changing only the bold attribute.
-static const enum raw_ostream::Colors savedColor =
- raw_ostream::SAVEDCOLOR;
+static constexpr raw_ostream::Colors SavedColor = raw_ostream::SAVEDCOLOR;
// Magenta is taken for 'warning'. Red is already 'error' and 'cyan'
// is already taken for 'note'. Green is already used to underline
@@ -47,6 +41,43 @@ static constexpr raw_ostream::Colors CommentColor = raw_ostream::YELLOW;
static constexpr raw_ostream::Colors LiteralColor = raw_ostream::GREEN;
static constexpr raw_ostream::Colors KeywordColor = raw_ostream::BLUE;
+namespace {
+template <typename Sub> class ColumnsOrBytes {
+public:
+ int V = 0;
+ ColumnsOrBytes(int V) : V(V) {}
+ bool isValid() const { return V != -1; }
+ Sub next() const { return Sub(V + 1); }
+ Sub prev() const { return Sub(V - 1); }
+
+ bool operator>(Sub O) const { return V > O.V; }
+ bool operator<(Sub O) const { return V < O.V; }
+ bool operator<=(Sub B) const { return V <= B.V; }
+ bool operator!=(Sub C) const { return C.V != V; }
+
+ Sub operator+(Sub B) const { return Sub(V + B.V); }
+ Sub &operator+=(Sub B) {
+ V += B.V;
+ return *static_cast<Sub *>(this);
+ }
+ Sub operator-(Sub B) const { return Sub(V - B.V); }
+ Sub &operator-=(Sub B) {
+ V -= B.V;
+ return *static_cast<Sub *>(this);
+ }
+};
+
+class Bytes final : public ColumnsOrBytes<Bytes> {
+public:
+ Bytes(int V) : ColumnsOrBytes(V) {}
+};
+
+class Columns final : public ColumnsOrBytes<Columns> {
+public:
+ Columns(int V) : ColumnsOrBytes(V) {}
+};
+} // namespace
+
/// Add highlights to differences in template strings.
static void applyTemplateHighlighting(raw_ostream &OS, StringRef Str,
bool &Normal, bool Bold) {
@@ -58,11 +89,11 @@ static void applyTemplateHighlighting(raw_ostream &OS, StringRef Str,
Str = Str.substr(Pos + 1);
if (Normal)
- OS.changeColor(templateColor, true);
+ OS.changeColor(TemplateColor, true);
else {
OS.resetColor();
if (Bold)
- OS.changeColor(savedColor, true);
+ OS.changeColor(SavedColor, true);
}
Normal = !Normal;
}
@@ -109,8 +140,8 @@ printableTextForNextCharacter(StringRef SourceLine, size_t *I,
if (SourceLine[*I] == '\t') {
assert(0 < TabStop && TabStop <= DiagnosticOptions::MaxTabStop &&
"Invalid -ftabstop value");
- unsigned Col = bytesSincePreviousTabOrLineBegin(SourceLine, *I);
- unsigned NumSpaces = TabStop - (Col % TabStop);
+ unsigned LineBytes = bytesSincePreviousTabOrLineBegin(SourceLine, *I);
+ unsigned NumSpaces = TabStop - (LineBytes % TabStop);
assert(0 < NumSpaces && NumSpaces <= TabStop
&& "Invalid computation of space amt");
++(*I);
@@ -220,97 +251,99 @@ static void expandTabs(std::string &SourceLine, unsigned TabStop) {
/// (\\u3042 is represented in UTF-8 by three bytes and takes two columns to
/// display)
static void genColumnByteMapping(StringRef SourceLine, unsigned TabStop,
- SmallVectorImpl<int> &BytesOut,
- SmallVectorImpl<int> &ColumnsOut) {
+ SmallVectorImpl<Bytes> &BytesOut,
+ SmallVectorImpl<Columns> &ColumnsOut) {
assert(BytesOut.empty());
assert(ColumnsOut.empty());
if (SourceLine.empty()) {
- BytesOut.resize(1u, 0);
- ColumnsOut.resize(1u, 0);
+ BytesOut.resize(1u, Bytes(0));
+ ColumnsOut.resize(1u, Columns(0));
return;
}
ColumnsOut.resize(SourceLine.size() + 1, -1);
- int Columns = 0;
+ Columns NumColumns = 0;
size_t I = 0;
while (I < SourceLine.size()) {
- ColumnsOut[I] = Columns;
- BytesOut.resize(Columns + 1, -1);
- BytesOut.back() = I;
+ ColumnsOut[I] = NumColumns;
+ BytesOut.resize(NumColumns.V + 1, -1);
+ BytesOut.back() = Bytes(I);
auto [Str, Printable] =
printableTextForNextCharacter(SourceLine, &I, TabStop);
- Columns += llvm::sys::locale::columnWidth(Str);
+ NumColumns += Columns(llvm::sys::locale::columnWidth(Str));
}
- ColumnsOut.back() = Columns;
- BytesOut.resize(Columns + 1, -1);
- BytesOut.back() = I;
+ ColumnsOut.back() = NumColumns;
+ BytesOut.resize(NumColumns.V + 1, -1);
+ BytesOut.back() = Bytes(I);
}
namespace {
struct SourceColumnMap {
SourceColumnMap(StringRef SourceLine, unsigned TabStop)
- : m_SourceLine(SourceLine) {
+ : SourceLine(SourceLine) {
- genColumnByteMapping(SourceLine, TabStop, m_columnToByte, m_byteToColumn);
+ genColumnByteMapping(SourceLine, TabStop, ColumnToByte, ByteToColumn);
- assert(m_byteToColumn.size()==SourceLine.size()+1);
- assert(0 < m_byteToColumn.size() && 0 < m_columnToByte.size());
- assert(m_byteToColumn.size()
- == static_cast<unsigned>(m_columnToByte.back()+1));
- assert(static_cast<unsigned>(m_byteToColumn.back()+1)
- == m_columnToByte.size());
+ assert(ByteToColumn.size() == SourceLine.size() + 1);
+ assert(0 < ByteToColumn.size() && 0 < ColumnToByte.size());
+ assert(ByteToColumn.size() ==
+ static_cast<unsigned>(ColumnToByte.back().V + 1));
+ assert(static_cast<unsigned>(ByteToColumn.back().V + 1) ==
+ ColumnToByte.size());
}
- int columns() const { return m_byteToColumn.back(); }
- int bytes() const { return m_columnToByte.back(); }
+ Columns columns() const { return ByteToColumn.back(); }
+ Bytes bytes() const { return ColumnToByte.back(); }
/// Map a byte to the column which it is at the start of, or return -1
/// if it is not at the start of a column (for a UTF-8 trailing byte).
- int byteToColumn(int n) const {
- assert(0<=n && n<static_cast<int>(m_byteToColumn.size()));
- return m_byteToColumn[n];
+ Columns byteToColumn(Bytes N) const {
+ assert(0 <= N.V && N.V < static_cast<int>(ByteToColumn.size()));
+ return ByteToColumn[N.V];
}
/// Map a byte to the first column which contains it.
- int byteToContainingColumn(int N) const {
- assert(0 <= N && N < static_cast<int>(m_byteToColumn.size()));
- while (m_byteToColumn[N] == -1)
- --N;
- return m_byteToColumn[N];
+ Columns byteToContainingColumn(Bytes N) const {
+ assert(0 <= N.V && N.V < static_cast<int>(ByteToColumn.size()));
+ while (!ByteToColumn[N.V].isValid())
+ --N.V;
+ return ByteToColumn[N.V];
}
/// Map a column to the byte which starts the column, or return -1 if
/// the column the second or subsequent column of an expanded tab or similar
/// multi-column entity.
- int columnToByte(int n) const {
- assert(0<=n && n<static_cast<int>(m_columnToByte.size()));
- return m_columnToByte[n];
+ Bytes columnToByte(Columns N) const {
+ assert(0 <= N.V && N.V < static_cast<int>(ColumnToByte.size()));
+ return ColumnToByte[N.V];
}
/// Map from a byte index to the next byte which starts a column.
- int startOfNextColumn(int N) const {
- assert(0 <= N && N < static_cast<int>(m_byteToColumn.size() - 1));
- while (byteToColumn(++N) == -1) {}
+ Bytes startOfNextColumn(Bytes N) const {
+ assert(0 <= N.V && N.V < static_cast<int>(ByteToColumn.size() - 1));
+ N = N.next();
+ while (!byteToColumn(N).isValid())
+ N = N.next();
return N;
}
/// Map from a byte index to the previous byte which starts a column.
- int startOfPreviousColumn(int N) const {
- assert(0 < N && N < static_cast<int>(m_byteToColumn.size()));
- while (byteToColumn(--N) == -1) {}
+ Bytes startOfPreviousColumn(Bytes N) const {
+ assert(0 < N.V && N.V < static_cast<int>(ByteToColumn.size()));
+ N = N.prev();
+ while (!byteToColumn(N).isValid())
+ N = N.prev();
return N;
}
- StringRef getSourceLine() const {
- return m_SourceLine;
- }
+ StringRef getSourceLine() const { return SourceLine; }
private:
- const std::string m_SourceLine;
- SmallVector<int,200> m_byteToColumn;
- SmallVector<int,200> m_columnToByte;
+ StringRef SourceLine;
+ SmallVector<Columns, 200> ByteToColumn;
+ SmallVector<Bytes, 200> ColumnToByte;
};
} // end anonymous namespace
@@ -319,14 +352,15 @@ private:
static void selectInterestingSourceRegion(std::string &SourceLine,
std::string &CaretLine,
std::string &FixItInsertionLine,
- unsigned Columns,
- const SourceColumnMap &map) {
- unsigned CaretColumns = CaretLine.size();
- unsigned FixItColumns = llvm::sys::locale::columnWidth(FixItInsertionLine);
- unsigned MaxColumns = std::max(static_cast<unsigned>(map.columns()),
- std::max(CaretColumns, FixItColumns));
+ Columns NonGutterColumns,
+ const SourceColumnMap &Map) {
+ Columns CaretColumns = Columns(CaretLine.size());
+ Columns FixItColumns =
+ Columns(llvm::sys::locale::columnWidth(FixItInsertionLine));
+ Columns MaxColumns =
+ std::max({Map.columns().V, CaretColumns.V, FixItColumns.V});
// if the number of columns is less than the desired number we're done
- if (MaxColumns <= Columns)
+ if (MaxColumns <= NonGutterColumns)
return;
// No special characters are allowed in CaretLine.
@@ -334,13 +368,13 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
// Find the slice that we need to display the full caret line
// correctly.
- unsigned CaretStart = 0, CaretEnd = CaretLine.size();
- for (; CaretStart != CaretEnd; ++CaretStart)
- if (!isWhitespace(CaretLine[CaretStart]))
+ Columns CaretStart = 0, CaretEnd = CaretLine.size();
+ for (; CaretStart != CaretEnd; CaretStart = CaretStart.next())
+ if (!isWhitespace(CaretLine[CaretStart.V]))
break;
- for (; CaretEnd != CaretStart; --CaretEnd)
- if (!isWhitespace(CaretLine[CaretEnd - 1]))
+ for (; CaretEnd != CaretStart; CaretEnd = CaretEnd.prev())
+ if (!isWhitespace(CaretLine[CaretEnd.V - 1]))
break;
// caret has already been inserted into CaretLine so the above whitespace
@@ -349,39 +383,38 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
// If we have a fix-it line, make sure the slice includes all of the
// fix-it information.
if (!FixItInsertionLine.empty()) {
- unsigned FixItStart = 0, FixItEnd = FixItInsertionLine.size();
- for (; FixItStart != FixItEnd; ++FixItStart)
- if (!isWhitespace(FixItInsertionLine[FixItStart]))
- break;
-
- for (; FixItEnd != FixItStart; --FixItEnd)
- if (!isWhitespace(FixItInsertionLine[FixItEnd - 1]))
- break;
-
// We can safely use the byte offset FixItStart as the column offset
// because the characters up until FixItStart are all ASCII whitespace
// characters.
- unsigned FixItStartCol = FixItStart;
- unsigned FixItEndCol
- = llvm::sys::locale::columnWidth(FixItInsertionLine.substr(0, FixItEnd));
-
- CaretStart = std::min(FixItStartCol, CaretStart);
- CaretEnd = std::max(FixItEndCol, CaretEnd);
+ Bytes FixItStart = 0;
+ Bytes FixItEnd = Bytes(FixItInsertionLine.size());
+ while (FixItStart != FixItEnd &&
+ isWhitespace(FixItInsertionLine[FixItStart.V]))
+ FixItStart = FixItStart.next();
+
+ while (FixItEnd != FixItStart &&
+ isWhitespace(FixItInsertionLine[FixItEnd.V - 1]))
+ FixItEnd = FixItEnd.prev();
+
+ Columns FixItStartCol = Columns(FixItStart.V);
+ Columns FixItEndCol = Columns(llvm::sys::locale::columnWidth(
+ FixItInsertionLine.substr(0, FixItEnd.V)));
+
+ CaretStart = std::min(FixItStartCol.V, CaretStart.V);
+ CaretEnd = std::max(FixItEndCol.V, CaretEnd.V);
}
// CaretEnd may have been set at the middle of a character
// If it's not at a character's first column then advance it past the current
// character.
- while (static_cast<int>(CaretEnd) < map.columns() &&
- -1 == map.columnToByte(CaretEnd))
- ++CaretEnd;
-
- assert((static_cast<int>(CaretStart) > map.columns() ||
- -1!=map.columnToByte(CaretStart)) &&
- "CaretStart must not point to a column in the middle of a source"
- " line character");
- assert((static_cast<int>(CaretEnd) > map.columns() ||
- -1!=map.columnToByte(CaretEnd)) &&
+ while (CaretEnd < Map.columns() && !Map.columnToByte(CaretEnd).isValid())
+ CaretEnd = CaretEnd.next();
+
+ assert(
+ (CaretStart > Map.columns() || Map.columnToByte(CaretStart).isValid()) &&
+ "CaretStart must not point to a column in the middle of a source"
+ " line character");
+ assert((CaretEnd > Map.columns() || Map.columnToByte(CaretEnd).isValid()) &&
"CaretEnd must not point to a column in the middle of a source line"
" character");
@@ -390,70 +423,69 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
// number of columns we have, try to grow the slice to encompass
// more context.
- unsigned SourceStart = map.columnToByte(std::min<unsigned>(CaretStart,
- map.columns()));
- unsigned SourceEnd = map.columnToByte(std::min<unsigned>(CaretEnd,
- map.columns()));
+ Bytes SourceStart = Map.columnToByte(std::min(CaretStart.V, Map.columns().V));
+ Bytes SourceEnd = Map.columnToByte(std::min(CaretEnd.V, Map.columns().V));
- unsigned CaretColumnsOutsideSource = CaretEnd-CaretStart
- - (map.byteToColumn(SourceEnd)-map.byteToColumn(SourceStart));
+ Columns CaretColumnsOutsideSource =
+ CaretEnd - CaretStart -
+ (Map.byteToColumn(SourceEnd) - Map.byteToColumn(SourceStart));
- char const *front_ellipse = " ...";
- char const *front_space = " ";
- char const *back_ellipse = "...";
- unsigned ellipses_space = strlen(front_ellipse) + strlen(back_ellipse);
+ constexpr StringRef FrontEllipse = " ...";
+ constexpr StringRef FrontSpace = " ";
+ constexpr StringRef BackEllipse = "...";
+ Columns EllipsesColumns = Columns(FrontEllipse.size() + BackEllipse.size());
- unsigned TargetColumns = Columns;
+ Columns TargetColumns = NonGutterColumns;
// Give us extra room for the ellipses
// and any of the caret line that extends past the source
- if (TargetColumns > ellipses_space+CaretColumnsOutsideSource)
- TargetColumns -= ellipses_space+CaretColumnsOutsideSource;
+ if (TargetColumns > EllipsesColumns + CaretColumnsOutsideSource)
+ TargetColumns -= EllipsesColumns + CaretColumnsOutsideSource;
- while (SourceStart>0 || SourceEnd<SourceLine.size()) {
+ while (SourceStart > 0 || SourceEnd < SourceLine.size()) {
bool ExpandedRegion = false;
- if (SourceStart>0) {
- unsigned NewStart = map.startOfPreviousColumn(SourceStart);
+ if (SourceStart > 0) {
+ Bytes NewStart = Map.startOfPreviousColumn(SourceStart);
// Skip over any whitespace we see here; we're looking for
// another bit of interesting text.
// FIXME: Detect non-ASCII whitespace characters too.
- while (NewStart && isWhitespace(SourceLine[NewStart]))
- NewStart = map.startOfPreviousColumn(NewStart);
+ while (NewStart > 0 && isWhitespace(SourceLine[NewStart.V]))
+ NewStart = Map.startOfPreviousColumn(NewStart);
// Skip over this bit of "interesting" text.
- while (NewStart) {
- unsigned Prev = map.startOfPreviousColumn(NewStart);
- if (isWhitespace(SourceLine[Prev]))
+ while (NewStart > 0) {
+ Bytes Prev = Map.startOfPreviousColumn(NewStart);
+ if (isWhitespace(SourceLine[Prev.V]))
break;
NewStart = Prev;
}
- assert(map.byteToColumn(NewStart) != -1);
- unsigned NewColumns = map.byteToColumn(SourceEnd) -
- map.byteToColumn(NewStart);
+ assert(Map.byteToColumn(NewStart).isValid());
+ Columns NewColumns =
+ Map.byteToColumn(SourceEnd) - Map.byteToColumn(NewStart);
if (NewColumns <= TargetColumns) {
SourceStart = NewStart;
ExpandedRegion = true;
}
}
- if (SourceEnd<SourceLine.size()) {
- unsigned NewEnd = map.startOfNextColumn(SourceEnd);
+ if (SourceEnd < SourceLine.size()) {
+ Bytes NewEnd = Map.startOfNextColumn(SourceEnd);
// Skip over any whitespace we see here; we're looking for
// another bit of interesting text.
// FIXME: Detect non-ASCII whitespace characters too.
- while (NewEnd < SourceLine.size() && isWhitespace(SourceLine[NewEnd]))
- NewEnd = map.startOfNextColumn(NewEnd);
+ while (NewEnd < SourceLine.size() && isWhitespace(SourceLine[NewEnd.V]))
+ NewEnd = Map.startOfNextColumn(NewEnd);
// Skip over this bit of "interesting" text.
- while (NewEnd < SourceLine.size() && isWhitespace(SourceLine[NewEnd]))
- NewEnd = map.startOfNextColumn(NewEnd);
+ while (NewEnd < SourceLine.size() && isWhitespace(SourceLine[NewEnd.V]))
+ NewEnd = Map.startOfNextColumn(NewEnd);
- assert(map.byteToColumn(NewEnd) != -1);
- unsigned NewColumns = map.byteToColumn(NewEnd) -
- map.byteToColumn(SourceStart);
+ assert(Map.byteToColumn(NewEnd).isValid());
+ Columns NewColumns =
+ Map.byteToColumn(NewEnd) - Map.byteToColumn(SourceStart);
if (NewColumns <= TargetColumns) {
SourceEnd = NewEnd;
ExpandedRegion = true;
@@ -464,39 +496,41 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
break;
}
- CaretStart = map.byteToColumn(SourceStart);
- CaretEnd = map.byteToColumn(SourceEnd) + CaretColumnsOutsideSource;
+ CaretStart = Map.byteToColumn(SourceStart);
+ CaretEnd = Map.byteToColumn(SourceEnd) + CaretColumnsOutsideSource;
// [CaretStart, CaretEnd) is the slice we want. Update the various
// output lines to show only this slice.
- assert(CaretStart!=(unsigned)-1 && CaretEnd!=(unsigned)-1 &&
- SourceStart!=(unsigned)-1 && SourceEnd!=(unsigned)-1);
+ assert(CaretStart.isValid() && CaretEnd.isValid() && SourceStart.isValid() &&
+ SourceEnd.isValid());
assert(SourceStart <= SourceEnd);
assert(CaretStart <= CaretEnd);
- unsigned BackColumnsRemoved
- = map.byteToColumn(SourceLine.size())-map.byteToColumn(SourceEnd);
- unsigned FrontColumnsRemoved = CaretStart;
- unsigned ColumnsKept = CaretEnd-CaretStart;
+ Columns BackColumnsRemoved =
+ Map.byteToColumn(Bytes{static_cast<int>(SourceLine.size())}) -
+ Map.byteToColumn(SourceEnd);
+ Columns FrontColumnsRemoved = CaretStart;
+ Columns ColumnsKept = CaretEnd - CaretStart;
// We checked up front that the line needed truncation
- assert(FrontColumnsRemoved+ColumnsKept+BackColumnsRemoved > Columns);
+ assert(FrontColumnsRemoved + ColumnsKept + BackColumnsRemoved >
+ NonGutterColumns);
// The line needs some truncation, and we'd prefer to keep the front
// if possible, so remove the back
- if (BackColumnsRemoved > strlen(back_ellipse))
- SourceLine.replace(SourceEnd, std::string::npos, back_ellipse);
+ if (BackColumnsRemoved > Columns(BackEllipse.size()))
+ SourceLine.replace(SourceEnd.V, std::string::npos, BackEllipse);
// If that's enough then we're done
- if (FrontColumnsRemoved+ColumnsKept <= Columns)
+ if (FrontColumnsRemoved + ColumnsKept <= Columns(NonGutterColumns))
return;
// Otherwise remove the front as well
- if (FrontColumnsRemoved > strlen(front_ellipse)) {
- SourceLine.replace(0, SourceStart, front_ellipse);
- CaretLine.replace(0, CaretStart, front_space);
+ if (FrontColumnsRemoved > Columns(FrontEllipse.size())) {
+ SourceLine.replace(0, SourceStart.V, FrontEllipse);
+ CaretLine.replace(0, CaretStart.V, FrontSpace);
if (!FixItInsertionLine.empty())
- FixItInsertionLine.replace(0, CaretStart, front_space);
+ FixItInsertionLine.replace(0, CaretStart.V, FrontSpace);
}
}
@@ -690,11 +724,21 @@ TextDiagnostic::printDiagnosticLevel(raw_ostream &OS,
switch (Level) {
case DiagnosticsEngine::Ignored:
llvm_unreachable("Invalid diagnostic type");
- case DiagnosticsEngine::Note: OS.changeColor(noteColor, true); break;
- case DiagnosticsEngine::Remark: OS.changeColor(remarkColor, true); break;
- case DiagnosticsEngine::Warning: OS.changeColor(warningColor, true); break;
- case DiagnosticsEngine::Error: OS.changeColor(errorColor, true); break;
- case DiagnosticsEngine::Fatal: OS.changeColor(fatalColor, true); break;
+ case DiagnosticsEngine::Note:
+ OS.changeColor(NoteColor, true);
+ break;
+ case DiagnosticsEngine::Remark:
+ OS.changeColor(RemarkColor, true);
+ break;
+ case DiagnosticsEngine::Warning:
+ OS.changeColor(WarningColor, true);
+ break;
+ case DiagnosticsEngine::Error:
+ OS.changeColor(ErrorColor, true);
+ break;
+ case DiagnosticsEngine::Fatal:
+ OS.changeColor(FatalColor, true);
+ break;
}
}
@@ -722,7 +766,7 @@ void TextDiagnostic::printDiagnosticMessage(raw_ostream &OS,
if (ShowColors && !IsSupplemental) {
// Print primary diagnostic messages in bold and without color, to visually
// indicate the transition from continuation notes and other output.
- OS.changeColor(savedColor, true);
+ OS.changeColor(SavedColor, true);
Bold = true;
}
@@ -800,7 +844,7 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
return;
if (DiagOpts.ShowColors)
- OS.changeColor(savedColor, true);
+ OS.changeColor(SavedColor, true);
emitFilename(PLoc.getFilename(), Loc.getManager());
switch (DiagOpts.getFormat()) {
@@ -961,41 +1005,40 @@ maybeAddRange(std::pair<unsigned, unsigned> A, std::pair<unsigned, unsigned> B,
struct LineRange {
unsigned LineNo;
- unsigned StartCol;
- unsigned EndCol;
+ Bytes StartByte;
+ Bytes EndByte;
};
/// Highlight \p R (with ~'s) on the current source line.
static void highlightRange(const LineRange &R, const SourceColumnMap &Map,
std::string &CaretLine) {
// Pick the first non-whitespace column.
- unsigned StartColNo = R.StartCol;
- while (StartColNo < Map.getSourceLine().size() &&
- (Map.getSourceLine()[StartColNo] == ' ' ||
- Map.getSourceLine()[StartColNo] == '\t'))
- StartColNo = Map.startOfNextColumn(StartColNo);
+ Bytes StartByte = R.StartByte;
+ while (StartByte < Map.bytes() && (Map.getSourceLine()[StartByte.V] == ' ' ||
+ Map.getSourceLine()[StartByte.V] == '\t'))
+ StartByte = Map.startOfNextColumn(StartByte);
// Pick the last non-whitespace column.
- unsigned EndColNo =
- std::min(static_cast<size_t>(R.EndCol), Map.getSourceLine().size());
- while (EndColNo && (Map.getSourceLine()[EndColNo - 1] == ' ' ||
- Map.getSourceLine()[EndColNo - 1] == '\t'))
- EndColNo = Map.startOfPreviousColumn(EndColNo);
+ Bytes EndByte = std::min(R.EndByte.V, Map.bytes().V);
+ while (EndByte.V != 0 && (Map.getSourceLine()[EndByte.V - 1] == ' ' ||
+ Map.getSourceLine()[EndByte.V - 1] == '\t'))
+ EndByte = Map.startOfPreviousColumn(EndByte);
// If the start/end passed each other, then we are trying to highlight a
// range that just exists in whitespace. That most likely means we have
// a multi-line highlighting range that covers a blank line.
- if (StartColNo > EndColNo)
+ if (StartByte > EndByte)
return;
+ assert(StartByte <= EndByte && "Invalid range!");
// Fill the range with ~'s.
- StartColNo = Map.byteToContainingColumn(StartColNo);
- EndColNo = Map.byteToContainingColumn(EndColNo);
+ Columns StartCol = Map.byteToContainingColumn(StartByte);
+ Columns EndCol = Map.byteToContainingColumn(EndByte);
+
+ if (CaretLine.size() < static_cast<size_t>(EndCol.V))
+ CaretLine.resize(EndCol.V, ' ');
- assert(StartColNo <= EndColNo && "Invalid range!");
- if (CaretLine.size() < EndColNo)
- CaretLine.resize(EndColNo, ' ');
- std::fill(CaretLine.begin() + StartColNo, CaretLine.begin() + EndColNo, '~');
+ std::fill(CaretLine.begin() + StartCol.V, CaretLine.begin() + EndCol.V, '~');
}
static std::string buildFixItInsertionLine(FileID FID, unsigned LineNo,
@@ -1006,7 +1049,7 @@ static std::string buildFixItInsertionLine(FileID FID, unsigned LineNo,
std::string FixItInsertionLine;
if (Hints.empty() || !DiagOpts.ShowFixits)
return FixItInsertionLine;
- unsigned PrevHintEndCol = 0;
+ Columns PrevHintEndCol = 0;
for (const auto &H : Hints) {
if (H.CodeToInsert.empty())
@@ -1024,12 +1067,13 @@ static std::string buildFixItInsertionLine(FileID FID, unsigned LineNo,
// Note: When modifying this function, be very careful about what is a
// "column" (printed width, platform-dependent) and what is a
// "byte offset" (SourceManager "column").
- unsigned HintByteOffset =
- SM.getColumnNumber(HintLocInfo.first, HintLocInfo.second) - 1;
+ Bytes HintByteOffset =
+ Bytes(SM.getColumnNumber(HintLocInfo.first, HintLocInfo.second))
+ .prev();
// The hint must start inside the source or right at the end
- assert(HintByteOffset < static_cast<unsigned>(map.bytes()) + 1);
- unsigned HintCol = map.byteToContainingColumn(HintByteOffset);
+ assert(HintByteOffset < map.bytes().next());
+ Columns HintCol = map.byteToContainingColumn(HintByteOffset);
// If we inserted a long previous hint, push this one forwards, and add
// an extra space to show that this is not part of the previous
@@ -1043,11 +1087,11 @@ static std::string buildFixItInsertionLine(FileID FID, unsigned LineNo,
// This should NOT use HintByteOffset, because the source might have
// Unicode characters in earlier columns.
- unsigned NewFixItLineSize = FixItInsertionLine.size() +
- (HintCol - PrevHintEndCol) +
- H.CodeToInsert.size();
+ Columns NewFixItLineSize = Columns(FixItInsertionLine.size()) +
+ (HintCol - PrevHintEndCol) +
+ Columns(H.CodeToInsert.size());
if (NewFixItLineSize > FixItInsertionLine.size())
- FixItInsertionLine.resize(NewFixItLineSize, ' ');
+ FixItInsertionLine.resize(NewFixItLineSize.V, ' ');
std::copy(H.CodeToInsert.begin(), H.CodeToInsert.end(),
FixItInsertionLine.end() - H.CodeToInsert.size());
@@ -1095,28 +1139,29 @@ prepareAndFilterRanges(const SmallVectorImpl<CharSourceRange> &Ranges,
if (EndLineNo < Lines.first || SM.getFileID(End) != FID)
continue;
- unsigned StartColumn = SM.getExpansionColumnNumber(Begin);
- unsigned EndColumn = SM.getExpansionColumnNumber(End);
- assert(StartColumn && "StartColumn must be valid, 0 is invalid");
- assert(EndColumn && "EndColumn must be valid, 0 is invalid");
+ Bytes StartByte = SM.getExpansionColumnNumber(Begin);
+ Bytes EndByte = SM.getExpansionColumnNumber(End);
+ assert(StartByte.V != 0 && "StartByte must be valid, 0 is invalid");
+ assert(EndByte.V != 0 && "EndByte must be valid, 0 is invalid");
if (R.isTokenRange())
- EndColumn += Lexer::MeasureTokenLength(End, SM, LangOpts);
+ EndByte += Bytes(Lexer::MeasureTokenLength(End, SM, LangOpts));
// Only a single line.
if (StartLineNo == EndLineNo) {
- LineRanges.push_back({StartLineNo, StartColumn - 1, EndColumn - 1});
+ LineRanges.push_back({StartLineNo, StartByte.prev(), EndByte.prev()});
continue;
}
// Start line.
- LineRanges.push_back({StartLineNo, StartColumn - 1, ~0u});
+ LineRanges.push_back(
+ {StartLineNo, StartByte.prev(), std::numeric_limits<int>::max()});
// Middle lines.
for (unsigned S = StartLineNo + 1; S != EndLineNo; ++S)
- LineRanges.push_back({S, 0, ~0u});
+ LineRanges.push_back({S, 0, std::numeric_limits<int>::max()});
// End line.
- LineRanges.push_back({EndLineNo, 0, EndColumn - 1});
+ LineRanges.push_back({EndLineNo, 0, EndByte.prev()});
}
return LineRanges;
@@ -1226,8 +1271,7 @@ highlightLines(StringRef FileData, unsigned StartLineNumber,
if (TokenStartLine > EndLineNumber)
break;
- unsigned StartCol =
- SM.getSpellingColumnNumber(T.getLocation(), &Invalid) - 1;
+ Bytes StartCol = SM.getSpellingColumnNumber(T.getLocation(), &Invalid) - 1;
if (Invalid)
continue;
@@ -1235,14 +1279,14 @@ highlightLines(StringRef FileData, unsigned StartLineNumber,
if (TokenStartLine == TokenEndLine) {
SmallVector<TextDiagnostic::StyleRange> &LineRanges =
SnippetRanges[TokenStartLine - StartLineNumber];
- appendStyle(LineRanges, T, StartCol, T.getLength());
+ appendStyle(LineRanges, T, StartCol.V, T.getLength());
continue;
}
assert((TokenEndLine - TokenStartLine) >= 1);
// For tokens that span multiple lines (think multiline comments), we
// divide them into multiple StyleRanges.
- unsigned EndCol = SM.getSpellingColumnNumber(T.getEndLoc(), &Invalid) - 1;
+ Bytes EndCol = SM.getSpellingColumnNumber(T.getEndLoc(), &Invalid) - 1;
if (Invalid)
continue;
@@ -1258,9 +1302,9 @@ highlightLines(StringRef FileData, unsigned StartLineNumber,
SnippetRanges[L - StartLineNumber];
if (L == TokenStartLine) // First line
- appendStyle(LineRanges, T, StartCol, LineLength);
+ appendStyle(LineRanges, T, StartCol.V, LineLength);
else if (L == TokenEndLine) // Last line
- appendStyle(LineRanges, T, 0, EndCol);
+ appendStyle(LineRanges, T, 0, EndCol.V);
else
appendStyle(LineRanges, T, 0, LineLength);
}
@@ -1315,11 +1359,11 @@ void TextDiagnostic::emitSnippetAndCaret(
const char *BufEnd = BufStart + BufData.size();
unsigned CaretLineNo = Loc.getLineNumber();
- unsigned CaretColNo = Loc.getColumnNumber();
+ Bytes CaretByte = Loc.getColumnNumber();
// Arbitrarily stop showing snippets when the line is too long.
static const size_t MaxLineLengthToPrint = 4096;
- if (CaretColNo > MaxLineLengthToPrint)
+ if (CaretByte > MaxLineLengthToPrint)
return;
// Find the set of lines to include.
@@ -1379,35 +1423,37 @@ void TextDiagnostic::emitSnippetAndCaret(
std::string SourceLine(LineStart, LineEnd);
// Remove trailing null bytes.
while (!SourceLine.empty() && SourceLine.back() == '\0' &&
- (LineNo != CaretLineNo || SourceLine.size() > CaretColNo))
+ (LineNo != CaretLineNo ||
+ SourceLine.size() > static_cast<size_t>(CaretByte.V)))
SourceLine.pop_back();
// Build the byte to column map.
- const SourceColumnMap sourceColMap(SourceLine, DiagOpts.TabStop);
+ const SourceColumnMap SourceColMap(SourceLine, DiagOpts.TabStop);
std::string CaretLine;
// Highlight all of the characters covered by Ranges with ~ characters.
for (const auto &LR : LineRanges) {
if (LR.LineNo == LineNo)
- highlightRange(LR, sourceColMap, CaretLine);
+ highlightRange(LR, SourceColMap, CaretLine);
}
// Next, insert the caret itself.
if (CaretLineNo == LineNo) {
- size_t Col = sourceColMap.byteToContainingColumn(CaretColNo - 1);
- CaretLine.resize(std::max(Col + 1, CaretLine.size()), ' ');
- CaretLine[Col] = '^';
+ Columns Col = SourceColMap.byteToContainingColumn(CaretByte.prev());
+ CaretLine.resize(
+ std::max(static_cast<size_t>(Col.V) + 1, CaretLine.size()), ' ');
+ CaretLine[Col.V] = '^';
}
std::string FixItInsertionLine =
- buildFixItInsertionLine(FID, LineNo, sourceColMap, Hints, SM, DiagOpts);
+ buildFixItInsertionLine(FID, LineNo, SourceColMap, Hints, SM, DiagOpts);
// If the source line is too long for our terminal, select only the
// "interesting" source region within that line.
- unsigned Columns = DiagOpts.MessageLength;
- if (Columns)
+ Columns MessageLength = DiagOpts.MessageLength;
+ if (MessageLength.V != 0)
selectInterestingSourceRegion(SourceLine, CaretLine, FixItInsertionLine,
- Columns, sourceColMap);
+ MessageLength, SourceColMap);
// If we are in -fdiagnostics-print-source-range-info mode, we are trying
// to produce easily machine parsable output. Add a space before the
@@ -1425,7 +1471,7 @@ void TextDiagnostic::emitSnippetAndCaret(
if (!CaretLine.empty()) {
indentForLineNumbers();
if (DiagOpts.ShowColors)
- OS.changeColor(caretColor, true);
+ OS.changeColor(CaretColor, true);
OS << CaretLine << '\n';
if (DiagOpts.ShowColors)
OS.resetColor();
@@ -1435,7 +1481,7 @@ void TextDiagnostic::emitSnippetAndCaret(
indentForLineNumbers();
if (DiagOpts.ShowColors)
// Print fixit line in color
- OS.changeColor(fixitColor, false);
+ OS.changeColor(FixitColor, false);
if (DiagOpts.ShowSourceRanges)
OS << ' ';
OS << FixItInsertionLine << '\n';
diff --git a/clang/lib/Lex/HeaderSearch.cpp b/clang/lib/Lex/HeaderSearch.cpp
index 65c324c..f05c28fd 100644
--- a/clang/lib/Lex/HeaderSearch.cpp
+++ b/clang/lib/Lex/HeaderSearch.cpp
@@ -221,7 +221,7 @@ std::string HeaderSearch::getPrebuiltModuleFileName(StringRef ModuleName,
// file.
for (const std::string &Dir : HSOpts.PrebuiltModulePaths) {
SmallString<256> Result(Dir);
- llvm::sys::fs::make_absolute(Result);
+ FileMgr.makeAbsolutePath(Result);
if (ModuleName.contains(':'))
// The separator of C++20 modules partitions (':') is not good for file
// systems, here clang and gcc choose '-' by default since it is not a
@@ -246,7 +246,7 @@ std::string HeaderSearch::getPrebuiltImplicitModuleFileName(Module *Module) {
StringRef ModuleCacheHash = HSOpts.DisableModuleHash ? "" : getModuleHash();
for (const std::string &Dir : HSOpts.PrebuiltModulePaths) {
SmallString<256> CachePath(Dir);
- llvm::sys::fs::make_absolute(CachePath);
+ FileMgr.makeAbsolutePath(CachePath);
llvm::sys::path::append(CachePath, ModuleCacheHash);
std::string FileName =
getCachedModuleFileNameImpl(ModuleName, ModuleMapPath, CachePath);
diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp
index 25199c7..31bc941 100644
--- a/clang/lib/Parse/ParseOpenMP.cpp
+++ b/clang/lib/Parse/ParseOpenMP.cpp
@@ -3221,6 +3221,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
else
Clause = ParseOpenMPSingleExprClause(CKind, WrongDirective);
break;
+ case OMPC_threadset:
case OMPC_fail:
case OMPC_proc_bind:
case OMPC_atomic_default_mem_order:
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index e32f437..139c4ab 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -153,7 +153,48 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f32_f32:
case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f16_f32:
case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f32_f32:
- case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f16_f32: {
+ case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1d_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1d_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1darray_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1darray_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_3d_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_3d_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_cube_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_lz_cube_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_1d_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_1d_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_1darray_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_1darray_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_3d_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_3d_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_cube_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_l_cube_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_d_1d_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_d_1d_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_d_1darray_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_d_1darray_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_d_3d_v4f32_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_sample_d_3d_v4f16_f32:
+ case AMDGPU::BI__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32: {
StringRef FeatureList(
getASTContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
if (!Builtin::evaluateRequiredTargetFeatures(FeatureList,
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 6d5cb0f..256f952 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -17216,6 +17216,10 @@ OMPClause *SemaOpenMP::ActOnOpenMPSimpleClause(
static_cast<OpenMPSeverityClauseKind>(Argument), ArgumentLoc, StartLoc,
LParenLoc, EndLoc);
break;
+ case OMPC_threadset:
+ Res = ActOnOpenMPThreadsetClause(static_cast<OpenMPThreadsetKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -17355,6 +17359,23 @@ OMPClause *SemaOpenMP::ActOnOpenMPDefaultClause(
OMPDefaultClause(M, MLoc, VCKind, VCKindLoc, StartLoc, LParenLoc, EndLoc);
}
+OMPClause *SemaOpenMP::ActOnOpenMPThreadsetClause(OpenMPThreadsetKind Kind,
+ SourceLocation KindLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (Kind == OMPC_THREADSET_unknown) {
+ Diag(KindLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_threadset, /*First=*/0,
+ /*Last=*/unsigned(OMPC_THREADSET_unknown))
+ << getOpenMPClauseName(OMPC_threadset);
+ return nullptr;
+ }
+
+ return new (getASTContext())
+ OMPThreadsetClause(Kind, KindLoc, StartLoc, LParenLoc, EndLoc);
+}
+
OMPClause *SemaOpenMP::ActOnOpenMPProcBindClause(ProcBindKind Kind,
SourceLocation KindKwLoc,
SourceLocation StartLoc,
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 0c8c1d1..8c20078 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -10624,6 +10624,13 @@ TreeTransform<Derived>::TransformOMPDefaultClause(OMPDefaultClause *C) {
template <typename Derived>
OMPClause *
+TreeTransform<Derived>::TransformOMPThreadsetClause(OMPThreadsetClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
TreeTransform<Derived>::TransformOMPProcBindClause(OMPProcBindClause *C) {
return getDerived().RebuildOMPProcBindClause(
C->getProcBindKind(), C->getProcBindKindKwLoc(), C->getBeginLoc(),
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index c1b5cb7..e3106f8d 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -11255,6 +11255,9 @@ OMPClause *OMPClauseReader::readClause() {
case llvm::omp::OMPC_mergeable:
C = new (Context) OMPMergeableClause();
break;
+ case llvm::omp::OMPC_threadset:
+ C = new (Context) OMPThreadsetClause();
+ break;
case llvm::omp::OMPC_read:
C = new (Context) OMPReadClause();
break;
@@ -11658,6 +11661,17 @@ void OMPClauseReader::VisitOMPDefaultClause(OMPDefaultClause *C) {
C->setDefaultVariableCategoryLocation(Record.readSourceLocation());
}
+// Read the parameter of threadset clause. This will have been saved when
+// OMPClauseWriter is called.
+void OMPClauseReader::VisitOMPThreadsetClause(OMPThreadsetClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ SourceLocation ThreadsetKindLoc = Record.readSourceLocation();
+ C->setThreadsetKindLoc(ThreadsetKindLoc);
+ OpenMPThreadsetKind TKind =
+ static_cast<OpenMPThreadsetKind>(Record.readInt());
+ C->setThreadsetKind(TKind);
+}
+
void OMPClauseReader::VisitOMPProcBindClause(OMPProcBindClause *C) {
C->setProcBindKind(static_cast<llvm::omp::ProcBindKind>(Record.readInt()));
C->setLParenLoc(Record.readSourceLocation());
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index 377e396..3ac338e 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -7913,6 +7913,12 @@ void OMPClauseWriter::VisitOMPDefaultClause(OMPDefaultClause *C) {
Record.AddSourceLocation(C->getDefaultVCLoc());
}
+void OMPClauseWriter::VisitOMPThreadsetClause(OMPThreadsetClause *C) {
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getThreadsetKindLoc());
+ Record.writeEnum(C->getThreadsetKind());
+}
+
void OMPClauseWriter::VisitOMPProcBindClause(OMPProcBindClause *C) {
Record.push_back(unsigned(C->getProcBindKind()));
Record.AddSourceLocation(C->getLParenLoc());
diff --git a/clang/test/CIR/CodeGen/builtin_prefetech.c b/clang/test/CIR/CodeGen/builtin_prefetch.c
index cfe85b9..cfe85b9 100644
--- a/clang/test/CIR/CodeGen/builtin_prefetech.c
+++ b/clang/test/CIR/CodeGen/builtin_prefetch.c
diff --git a/clang/test/CodeGen/AArch64/neon-across.c b/clang/test/CodeGen/AArch64/neon-across.c
index aa0387d..aae5097 100644
--- a/clang/test/CodeGen/AArch64/neon-across.c
+++ b/clang/test/CodeGen/AArch64/neon-across.c
@@ -113,9 +113,8 @@ uint64_t test_vaddlvq_u32(uint32x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxv_s8
// CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMAXV_S8_I:%.*]] = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMAXV_S8_I]]
//
int8_t test_vmaxv_s8(int8x8_t a) {
return vmaxv_s8(a);
@@ -124,9 +123,8 @@ int8_t test_vmaxv_s8(int8x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxv_s16
// CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMAXV_S16_I:%.*]] = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMAXV_S16_I]]
//
int16_t test_vmaxv_s16(int16x4_t a) {
return vmaxv_s16(a);
@@ -135,9 +133,8 @@ int16_t test_vmaxv_s16(int16x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxv_u8
// CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMAXV_U8_I:%.*]] = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMAXV_U8_I]]
//
uint8_t test_vmaxv_u8(uint8x8_t a) {
return vmaxv_u8(a);
@@ -146,9 +143,8 @@ uint8_t test_vmaxv_u8(uint8x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxv_u16
// CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMAXV_U16_I:%.*]] = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMAXV_U16_I]]
//
uint16_t test_vmaxv_u16(uint16x4_t a) {
return vmaxv_u16(a);
@@ -157,9 +153,8 @@ uint16_t test_vmaxv_u16(uint16x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_s8
// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMAXVQ_S8_I:%.*]] = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMAXVQ_S8_I]]
//
int8_t test_vmaxvq_s8(int8x16_t a) {
return vmaxvq_s8(a);
@@ -168,9 +163,8 @@ int8_t test_vmaxvq_s8(int8x16_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_s16
// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMAXVQ_S16_I:%.*]] = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMAXVQ_S16_I]]
//
int16_t test_vmaxvq_s16(int16x8_t a) {
return vmaxvq_s16(a);
@@ -179,7 +173,7 @@ int16_t test_vmaxvq_s16(int16x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_s32
// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> [[A]])
+// CHECK-NEXT: [[VMAXVQ_S32_I:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMAXVQ_S32_I]]
//
int32_t test_vmaxvq_s32(int32x4_t a) {
@@ -189,9 +183,8 @@ int32_t test_vmaxvq_s32(int32x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_u8
// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMAXVQ_U8_I:%.*]] = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMAXVQ_U8_I]]
//
uint8_t test_vmaxvq_u8(uint8x16_t a) {
return vmaxvq_u8(a);
@@ -200,9 +193,8 @@ uint8_t test_vmaxvq_u8(uint8x16_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_u16
// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMAXVQ_U16_I:%.*]] = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMAXVQ_U16_I]]
//
uint16_t test_vmaxvq_u16(uint16x8_t a) {
return vmaxvq_u16(a);
@@ -211,7 +203,7 @@ uint16_t test_vmaxvq_u16(uint16x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_u32
// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> [[A]])
+// CHECK-NEXT: [[VMAXVQ_U32_I:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMAXVQ_U32_I]]
//
uint32_t test_vmaxvq_u32(uint32x4_t a) {
@@ -221,9 +213,8 @@ uint32_t test_vmaxvq_u32(uint32x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminv_s8
// CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMINV_S8_I:%.*]] = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMINV_S8_I]]
//
int8_t test_vminv_s8(int8x8_t a) {
return vminv_s8(a);
@@ -232,9 +223,8 @@ int8_t test_vminv_s8(int8x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminv_s16
// CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMINV_S16_I:%.*]] = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMINV_S16_I]]
//
int16_t test_vminv_s16(int16x4_t a) {
return vminv_s16(a);
@@ -243,9 +233,8 @@ int16_t test_vminv_s16(int16x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminv_u8
// CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMINV_U8_I:%.*]] = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMINV_U8_I]]
//
uint8_t test_vminv_u8(uint8x8_t a) {
return vminv_u8(a);
@@ -254,9 +243,8 @@ uint8_t test_vminv_u8(uint8x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminv_u16
// CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMINV_U16_I:%.*]] = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMINV_U16_I]]
//
uint16_t test_vminv_u16(uint16x4_t a) {
return vminv_u16(a);
@@ -265,9 +253,8 @@ uint16_t test_vminv_u16(uint16x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_s8
// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMINVQ_S8_I:%.*]] = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMINVQ_S8_I]]
//
int8_t test_vminvq_s8(int8x16_t a) {
return vminvq_s8(a);
@@ -276,9 +263,8 @@ int8_t test_vminvq_s8(int8x16_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_s16
// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMINVQ_S16_I:%.*]] = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMINVQ_S16_I]]
//
int16_t test_vminvq_s16(int16x8_t a) {
return vminvq_s16(a);
@@ -287,7 +273,7 @@ int16_t test_vminvq_s16(int16x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_s32
// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> [[A]])
+// CHECK-NEXT: [[VMINVQ_S32_I:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMINVQ_S32_I]]
//
int32_t test_vminvq_s32(int32x4_t a) {
@@ -297,9 +283,8 @@ int32_t test_vminvq_s32(int32x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_u8
// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMINVQ_U8_I:%.*]] = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMINVQ_U8_I]]
//
uint8_t test_vminvq_u8(uint8x16_t a) {
return vminvq_u8(a);
@@ -308,9 +293,8 @@ uint8_t test_vminvq_u8(uint8x16_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_u16
// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMINVQ_U16_I:%.*]] = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMINVQ_U16_I]]
//
uint16_t test_vminvq_u16(uint16x8_t a) {
return vminvq_u16(a);
@@ -319,7 +303,7 @@ uint16_t test_vminvq_u16(uint16x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_u32
// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> [[A]])
+// CHECK-NEXT: [[VMINVQ_U32_I:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMINVQ_U32_I]]
//
uint32_t test_vminvq_u32(uint32x4_t a) {
@@ -329,9 +313,8 @@ uint32_t test_vminvq_u32(uint32x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddv_s8
// CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VADDV_S8_I:%.*]] = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VADDV_S8_I]]
//
int8_t test_vaddv_s8(int8x8_t a) {
return vaddv_s8(a);
@@ -340,9 +323,8 @@ int8_t test_vaddv_s8(int8x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddv_s16
// CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VADDV_S16_I:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VADDV_S16_I]]
//
int16_t test_vaddv_s16(int16x4_t a) {
return vaddv_s16(a);
@@ -351,9 +333,8 @@ int16_t test_vaddv_s16(int16x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddv_u8
// CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VADDV_U8_I:%.*]] = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VADDV_U8_I]]
//
uint8_t test_vaddv_u8(uint8x8_t a) {
return vaddv_u8(a);
@@ -362,9 +343,8 @@ uint8_t test_vaddv_u8(uint8x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddv_u16
// CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VADDV_U16_I:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VADDV_U16_I]]
//
uint16_t test_vaddv_u16(uint16x4_t a) {
return vaddv_u16(a);
@@ -373,9 +353,8 @@ uint16_t test_vaddv_u16(uint16x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_s8
// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VADDVQ_S8_I:%.*]] = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VADDVQ_S8_I]]
//
int8_t test_vaddvq_s8(int8x16_t a) {
return vaddvq_s8(a);
@@ -384,9 +363,8 @@ int8_t test_vaddvq_s8(int8x16_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_s16
// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VADDVQ_S16_I:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VADDVQ_S16_I]]
//
int16_t test_vaddvq_s16(int16x8_t a) {
return vaddvq_s16(a);
@@ -395,7 +373,7 @@ int16_t test_vaddvq_s16(int16x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_s32
// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> [[A]])
+// CHECK-NEXT: [[VADDVQ_S32_I:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VADDVQ_S32_I]]
//
int32_t test_vaddvq_s32(int32x4_t a) {
@@ -405,9 +383,8 @@ int32_t test_vaddvq_s32(int32x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_u8
// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VADDVQ_U8_I:%.*]] = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VADDVQ_U8_I]]
//
uint8_t test_vaddvq_u8(uint8x16_t a) {
return vaddvq_u8(a);
@@ -416,9 +393,8 @@ uint8_t test_vaddvq_u8(uint8x16_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_u16
// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VADDVQ_U16_I:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VADDVQ_U16_I]]
//
uint16_t test_vaddvq_u16(uint16x8_t a) {
return vaddvq_u16(a);
@@ -427,7 +403,7 @@ uint16_t test_vaddvq_u16(uint16x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_u32
// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> [[A]])
+// CHECK-NEXT: [[VADDVQ_U32_I:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VADDVQ_U32_I]]
//
uint32_t test_vaddvq_u32(uint32x4_t a) {
diff --git a/clang/test/CodeGen/AArch64/neon-intrinsics.c b/clang/test/CodeGen/AArch64/neon-intrinsics.c
index 035e1ca..1c628bb 100644
--- a/clang/test/CodeGen/AArch64/neon-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon-intrinsics.c
@@ -12643,7 +12643,7 @@ uint64_t test_vqrshld_u64(uint64_t a, int64_t b) {
// CHECK-LABEL: define dso_local i64 @test_vpaddd_s64(
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VPADDD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> [[A]])
+// CHECK-NEXT: [[VPADDD_S64_I:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[A]])
// CHECK-NEXT: ret i64 [[VPADDD_S64_I]]
//
int64_t test_vpaddd_s64(int64x2_t a) {
@@ -23227,7 +23227,7 @@ uint64x2_t test_vpaddq_u64(uint64x2_t a, uint64x2_t b) {
// CHECK-LABEL: define dso_local i64 @test_vpaddd_u64(
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VPADDD_U64_I:%.*]] = call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> [[A]])
+// CHECK-NEXT: [[VPADDD_U64_I:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[A]])
// CHECK-NEXT: ret i64 [[VPADDD_U64_I]]
//
uint64_t test_vpaddd_u64(uint64x2_t a) {
@@ -23237,7 +23237,7 @@ uint64_t test_vpaddd_u64(uint64x2_t a) {
// CHECK-LABEL: define dso_local i64 @test_vaddvq_s64(
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VADDVQ_S64_I:%.*]] = call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> [[A]])
+// CHECK-NEXT: [[VADDVQ_S64_I:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[A]])
// CHECK-NEXT: ret i64 [[VADDVQ_S64_I]]
//
int64_t test_vaddvq_s64(int64x2_t a) {
@@ -23247,7 +23247,7 @@ int64_t test_vaddvq_s64(int64x2_t a) {
// CHECK-LABEL: define dso_local i64 @test_vaddvq_u64(
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VADDVQ_U64_I:%.*]] = call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> [[A]])
+// CHECK-NEXT: [[VADDVQ_U64_I:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[A]])
// CHECK-NEXT: ret i64 [[VADDVQ_U64_I]]
//
uint64_t test_vaddvq_u64(uint64x2_t a) {
@@ -23878,7 +23878,7 @@ float64x1_t test_vrsqrts_f64(float64x1_t a, float64x1_t b) {
// CHECK-LABEL: define dso_local i32 @test_vminv_s32(
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VMINV_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> [[A]])
+// CHECK-NEXT: [[VMINV_S32_I:%.*]] = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMINV_S32_I]]
//
int32_t test_vminv_s32(int32x2_t a) {
@@ -23888,7 +23888,7 @@ int32_t test_vminv_s32(int32x2_t a) {
// CHECK-LABEL: define dso_local i32 @test_vminv_u32(
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VMINV_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v2i32(<2 x i32> [[A]])
+// CHECK-NEXT: [[VMINV_U32_I:%.*]] = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMINV_U32_I]]
//
uint32_t test_vminv_u32(uint32x2_t a) {
@@ -23898,7 +23898,7 @@ uint32_t test_vminv_u32(uint32x2_t a) {
// CHECK-LABEL: define dso_local i32 @test_vmaxv_s32(
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VMAXV_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32> [[A]])
+// CHECK-NEXT: [[VMAXV_S32_I:%.*]] = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMAXV_S32_I]]
//
int32_t test_vmaxv_s32(int32x2_t a) {
@@ -23908,7 +23908,7 @@ int32_t test_vmaxv_s32(int32x2_t a) {
// CHECK-LABEL: define dso_local i32 @test_vmaxv_u32(
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VMAXV_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v2i32(<2 x i32> [[A]])
+// CHECK-NEXT: [[VMAXV_U32_I:%.*]] = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMAXV_U32_I]]
//
uint32_t test_vmaxv_u32(uint32x2_t a) {
@@ -23918,7 +23918,7 @@ uint32_t test_vmaxv_u32(uint32x2_t a) {
// CHECK-LABEL: define dso_local i32 @test_vaddv_s32(
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VADDV_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> [[A]])
+// CHECK-NEXT: [[VADDV_S32_I:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VADDV_S32_I]]
//
int32_t test_vaddv_s32(int32x2_t a) {
@@ -23928,7 +23928,7 @@ int32_t test_vaddv_s32(int32x2_t a) {
// CHECK-LABEL: define dso_local i32 @test_vaddv_u32(
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VADDV_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v2i32(<2 x i32> [[A]])
+// CHECK-NEXT: [[VADDV_U32_I:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VADDV_U32_I]]
//
uint32_t test_vaddv_u32(uint32x2_t a) {
diff --git a/clang/test/CodeGen/builtins-extended-image.c b/clang/test/CodeGen/builtins-extended-image.c
new file mode 100644
index 0000000..0dbf81d
--- /dev/null
+++ b/clang/test/CodeGen/builtins-extended-image.c
@@ -0,0 +1,1528 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -triple amdgcn-- -target-cpu gfx1100 -target-feature +extended-image-insts %s -emit-llvm -o - | FileCheck %s
+
+typedef int int4 __attribute__((ext_vector_type(4)));
+typedef float float4 __attribute__((ext_vector_type(4)));
+typedef _Float16 half4 __attribute__((ext_vector_type(4)));
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_gather4_lz_2d_v4f32_f32_r(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.gather4.lz.2d.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP4]]
+//
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_r(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(1, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_gather4_lz_2d_v4f32_f32_g(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.gather4.lz.2d.v4f32.f32.v8i32.v4i32(i32 2, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP4]]
+//
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_g(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(2, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_gather4_lz_2d_v4f32_f32_b(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.gather4.lz.2d.v4f32.f32.v8i32.v4i32(i32 4, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP4]]
+//
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_b(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(4, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_gather4_lz_2d_v4f32_f32_a(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.gather4.lz.2d.v4f32.f32.v8i32.v4i32(i32 8, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP4]]
+//
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_a(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(8, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_1d_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP1]], align 32
+// CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.1d.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP2]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP3]]
+//
+float4 test_amdgcn_image_sample_lz_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(100, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_1d_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP4]]
+//
+float4 test_amdgcn_image_sample_l_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_1d_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.1d.v4f32.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP5]]
+//
+float4 test_amdgcn_image_sample_d_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_2d_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.2d.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP4]]
+//
+float4 test_amdgcn_image_sample_lz_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_2d_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f32.v8i32.v4i32(i32 10, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP5]]
+//
+float4 test_amdgcn_image_sample_l_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(10, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_2d_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP6]], align 32
+// CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.2d.v4f32.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP7]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP8]]
+//
+float4 test_amdgcn_image_sample_d_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(100, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_3d_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.3d.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP5]]
+//
+float4 test_amdgcn_image_sample_lz_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_3d_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
+// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.3d.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP6]]
+//
+float4 test_amdgcn_image_sample_l_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_3d_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32
+// CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP11:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.3d.v4f32.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], float [[TMP7]], float [[TMP8]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP10]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP11]]
+//
+float4 test_amdgcn_image_sample_d_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_cube_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.cube.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP5]]
+//
+float4 test_amdgcn_image_sample_lz_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_cube_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
+// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.cube.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP6]]
+//
+float4 test_amdgcn_image_sample_l_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_1darray_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.1darray.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP4]]
+//
+float4 test_amdgcn_image_sample_lz_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(1, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_1darray_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.1darray.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP5]]
+//
+float4 test_amdgcn_image_sample_l_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_1darray_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
+// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.1darray.v4f32.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP6]]
+//
+float4 test_amdgcn_image_sample_d_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_2darray_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.2darray.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP5]]
+//
+float4 test_amdgcn_image_sample_lz_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_2darray_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
+// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.2darray.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP6]]
+//
+float4 test_amdgcn_image_sample_l_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_2darray_v4f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP7]], align 32
+// CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.2darray.v4f32.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP8]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x float> [[TMP9]]
+//
+float4 test_amdgcn_image_sample_d_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_1d_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP1]], align 32
+// CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.1d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP2]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP3]]
+//
+half4 test_amdgcn_image_sample_lz_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(100, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_1d_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.1d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP4]]
+//
+half4 test_amdgcn_image_sample_l_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_1d_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.1d.v4f16.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP5]]
+//
+half4 test_amdgcn_image_sample_d_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_2d_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.2d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP4]]
+//
+half4 test_amdgcn_image_sample_lz_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_2d_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.2d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP5]]
+//
+half4 test_amdgcn_image_sample_l_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_2d_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP6]], align 32
+// CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP8:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.2d.v4f16.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP7]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP8]]
+//
+half4 test_amdgcn_image_sample_d_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_3d_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.3d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP5]]
+//
+half4 test_amdgcn_image_sample_lz_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_3d_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
+// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.3d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP6]]
+//
+half4 test_amdgcn_image_sample_l_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_3d_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32
+// CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP11:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.3d.v4f16.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], float [[TMP7]], float [[TMP8]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP10]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP11]]
+//
+half4 test_amdgcn_image_sample_d_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_cube_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.cube.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP5]]
+//
+half4 test_amdgcn_image_sample_lz_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_cube_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
+// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.cube.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP6]]
+//
+half4 test_amdgcn_image_sample_l_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_1darray_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.1darray.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP4]]
+//
+half4 test_amdgcn_image_sample_lz_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_1darray_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.1darray.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP5]]
+//
+half4 test_amdgcn_image_sample_l_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_1darray_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
+// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.1darray.v4f16.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP6]]
+//
+half4 test_amdgcn_image_sample_d_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_2darray_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.2darray.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP5]]
+//
+half4 test_amdgcn_image_sample_lz_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_2darray_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
+// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.2darray.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP6]]
+//
+half4 test_amdgcn_image_sample_l_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_2darray_v4f16_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca <4 x half>, align 8, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP7]], align 32
+// CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP9:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.2darray.v4f16.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP8]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret <4 x half> [[TMP9]]
+//
+half4 test_amdgcn_image_sample_d_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_lz_2d_f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
+// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.image.sample.lz.2d.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret float [[TMP4]]
+//
+float test_amdgcn_image_sample_lz_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2d_f32_f32(1, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_l_2d_f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.amdgcn.image.sample.l.2d.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret float [[TMP5]]
+//
+float test_amdgcn_image_sample_l_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2d_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_d_2d_f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP6]], align 32
+// CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP8:%.*]] = call float @llvm.amdgcn.image.sample.d.2d.f32.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP7]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret float [[TMP8]]
+//
+float test_amdgcn_image_sample_d_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2d_f32_f32(1, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_lz_2darray_f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
+// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.amdgcn.image.sample.lz.2darray.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret float [[TMP5]]
+//
+float test_amdgcn_image_sample_lz_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_l_2darray_f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
+// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.amdgcn.image.sample.l.2darray.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret float [[TMP6]]
+//
+float test_amdgcn_image_sample_l_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2darray_f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
+
+// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_d_2darray_f32_f32(
+// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT: [[ENTRY:.*:]]
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[V4F32_ADDR:%.*]] = alloca <4 x float>, align 16, addrspace(5)
+// CHECK-NEXT: [[F32_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[I32_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[TEX_ADDR:%.*]] = alloca ptr, align 32, addrspace(5)
+// CHECK-NEXT: [[VEC4I32_ADDR:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT: [[V4F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[V4F32_ADDR]] to ptr
+// CHECK-NEXT: [[F32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[F32_ADDR]] to ptr
+// CHECK-NEXT: [[I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I32_ADDR]] to ptr
+// CHECK-NEXT: [[TEX_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TEX_ADDR]] to ptr
+// CHECK-NEXT: [[VEC4I32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VEC4I32_ADDR]] to ptr
+// CHECK-NEXT: store <4 x float> [[V4F32]], ptr [[V4F32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store float [[F32]], ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[I32]], ptr [[I32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store ptr [[TEX]], ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: store <4 x i32> [[VEC4I32]], ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[F32_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
+// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP7]], align 32
+// CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
+// CHECK-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.image.sample.d.2darray.f32.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP8]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: ret float [[TMP9]]
+//
+float test_amdgcn_image_sample_d_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2darray_f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+}
diff --git a/clang/test/CodeGenCXX/ubsan-coroutines.cpp b/clang/test/CodeGenCXX/ubsan-coroutines.cpp
index 04ab050..60c89a4 100644
--- a/clang/test/CodeGenCXX/ubsan-coroutines.cpp
+++ b/clang/test/CodeGenCXX/ubsan-coroutines.cpp
@@ -1,6 +1,7 @@
// This test merely verifies that emitting the object file does not cause a
// crash when the LLVM coroutines passes are run.
// RUN: %clang_cc1 -emit-obj -std=c++2a -fsanitize=null %s -o %t.o
+// UNSUPPORTED: target={{.*}}-zos{{.*}}
namespace std {
template <typename R, typename... T> struct coroutine_traits {
diff --git a/clang/test/CodeGenHIP/maybe_undef-attr-verify.hip b/clang/test/CodeGenHIP/maybe_undef-attr-verify.hip
index 571fba1..6dc57c4 100644
--- a/clang/test/CodeGenHIP/maybe_undef-attr-verify.hip
+++ b/clang/test/CodeGenHIP/maybe_undef-attr-verify.hip
@@ -20,7 +20,7 @@
#define __maybe_undef __attribute__((maybe_undef))
#define WARP_SIZE 64
-static constexpr int warpSize = __AMDGCN_WAVEFRONT_SIZE__;
+static constexpr int warpSize = WARP_SIZE;
__device__ static inline unsigned int __lane_id() {
return __builtin_amdgcn_mbcnt_hi(
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-wave32.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-wave32.cl
index d390418..31fd0e7 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-wave32.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-wave32.cl
@@ -1,5 +1,5 @@
// REQUIRES: amdgpu-registered-target
-// RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-unknown-unknown -D__AMDGCN_WAVEFRONT_SIZE=32 -target-feature +wavefrontsize32 -emit-llvm -o - %s | FileCheck -enable-var-scope %s
+// RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-unknown-unknown -target-feature +wavefrontsize32 -emit-llvm -o - %s | FileCheck -enable-var-scope %s
// RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-unknown-unknown -target-cpu gfx1010 -emit-llvm -o - %s | FileCheck -enable-var-scope %s
// RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-unknown-unknown -target-cpu gfx1010 -target-feature +wavefrontsize32 -emit-llvm -o - %s | FileCheck -enable-var-scope %s
// RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-unknown-unknown -target-cpu gfx1100 -target-feature +wavefrontsize32 -emit-llvm -o - %s | FileCheck -enable-var-scope %s
@@ -48,7 +48,3 @@ void test_read_exec_lo(global uint* out) {
void test_read_exec_hi(global uint* out) {
*out = __builtin_amdgcn_read_exec_hi();
}
-
-#if __AMDGCN_WAVEFRONT_SIZE != 32
-#error Wrong wavesize detected
-#endif
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-wave64.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-wave64.cl
index d851ec7..758b5aa 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-wave64.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-wave64.cl
@@ -50,7 +50,3 @@ void test_read_exec_lo(global ulong* out) {
void test_read_exec_hi(global ulong* out) {
*out = __builtin_amdgcn_read_exec_hi();
}
-
-#if defined(__AMDGCN_WAVEFRONT_SIZE__) && __AMDGCN_WAVEFRONT_SIZE__ != 64
-#error Wrong wavesize detected
-#endif
diff --git a/clang/test/DebugInfo/Generic/bit-int.c b/clang/test/DebugInfo/Generic/bit-int.c
index 94b9301..88ecc13 100644
--- a/clang/test/DebugInfo/Generic/bit-int.c
+++ b/clang/test/DebugInfo/Generic/bit-int.c
@@ -4,5 +4,5 @@
unsigned _BitInt(17) a;
_BitInt(2) b;
-// CHECK: !DIBasicType(name: "_BitInt", size: 8, dataSize: 2, encoding: DW_ATE_signed)
-// CHECK: !DIBasicType(name: "unsigned _BitInt", size: 32, dataSize: 17, encoding: DW_ATE_unsigned)
+// CHECK: !DIBasicType(name: "_BitInt(2)", size: 8, dataSize: 2, encoding: DW_ATE_signed)
+// CHECK: !DIBasicType(name: "unsigned _BitInt(17)", size: 32, dataSize: 17, encoding: DW_ATE_unsigned)
diff --git a/clang/test/DebugInfo/Generic/macro-info.c b/clang/test/DebugInfo/Generic/macro-info.c
new file mode 100644
index 0000000..ec49eb5
--- /dev/null
+++ b/clang/test/DebugInfo/Generic/macro-info.c
@@ -0,0 +1,35 @@
+// RUN: %clang_cc1 %s -debug-info-kind=standalone -emit-llvm -o - | FileCheck %s
+
+#define GLOBAL(num) global## num
+#define DECL_GLOBAL(x) int x
+#define SAME_ORDER(x, y) x; y
+#define SWAP_ORDER(x,y) y; x
+
+
+
+SAME_ORDER(
+ int
+// CHECK: DIGlobalVariable(name: "global",{{.*}} line: [[@LINE+1]]
+ GLOBAL // <- global
+ () = 42,
+ const char* s() {
+// CHECK: DIGlobalVariable({{.*}}line: [[@LINE+1]],{{.*}} type: [[TYPEID:![0-9]+]]
+ return "1234567890";
+ }
+)
+
+SWAP_ORDER(
+ int GLOBAL( // <- global2
+ 2) = 43,
+// CHECK: DIGlobalVariable(name: "global3",{{.*}} line: [[@LINE+3]]
+// CHECK: DIGlobalVariable(name: "global2",{{.*}} line: [[@LINE-3]]
+ DECL_GLOBAL(
+ GLOBAL( // <- global3
+ 3)) = 44
+);
+
+
+DECL_GLOBAL(
+// CHECK: DIGlobalVariable(name: "global4",{{.*}} line: [[@LINE+1]]
+ GLOBAL( // <- global4
+ 4));
diff --git a/clang/test/Driver/amdgpu-macros.cl b/clang/test/Driver/amdgpu-macros.cl
index 9fda2f3..6d049e7 100644
--- a/clang/test/Driver/amdgpu-macros.cl
+++ b/clang/test/Driver/amdgpu-macros.cl
@@ -154,26 +154,10 @@
// ARCH-GCN-DAG: #define __[[CPU]]__ 1
// ARCH-GCN-DAG: #define __[[FAMILY]]__ 1
// ARCH-GCN-DAG: #define __amdgcn_processor__ "[[CPU]]"
-// ARCH-GCN-DAG: #define __AMDGCN_WAVEFRONT_SIZE [[WAVEFRONT_SIZE]]
// ARCH-GCN-DAG: #define __GCC_DESTRUCTIVE_SIZE 128
// ARCH-GCN-DAG: #define __GCC_CONSTRUCTIVE_SIZE 128
// UNSAFEFPATOMIC-DAG: #define __AMDGCN_UNSAFE_FP_ATOMICS__ 1
-// RUN: %clang -E -dM -target amdgcn -mcpu=gfx906 -mwavefrontsize64 \
-// RUN: %s 2>&1 | FileCheck --check-prefix=WAVE64 %s
-// RUN: %clang -E -dM -target amdgcn -mcpu=gfx1010 -mwavefrontsize64 \
-// RUN: %s 2>&1 | FileCheck --check-prefix=WAVE64 %s
-// RUN: %clang -E -dM -target amdgcn -mcpu=gfx906 -mwavefrontsize64 \
-// RUN: -mno-wavefrontsize64 %s 2>&1 | FileCheck --check-prefix=WAVE64 %s
-// RUN: %clang -E -dM -target amdgcn -mcpu=gfx1010 -mwavefrontsize64 \
-// RUN: -mno-wavefrontsize64 %s 2>&1 | FileCheck --check-prefix=WAVE32 %s
-// RUN: %clang -E -dM -target amdgcn -mcpu=gfx906 -mno-wavefrontsize64 \
-// RUN: -mwavefrontsize64 %s 2>&1 | FileCheck --check-prefix=WAVE64 %s
-// RUN: %clang -E -dM -target amdgcn -mcpu=gfx1010 -mno-wavefrontsize64 \
-// RUN: -mwavefrontsize64 %s 2>&1 | FileCheck --check-prefix=WAVE64 %s
-// WAVE64-DAG: #define __AMDGCN_WAVEFRONT_SIZE 64
-// WAVE32-DAG: #define __AMDGCN_WAVEFRONT_SIZE 32
-
// RUN: %clang -E -dM -target amdgcn -mcpu=gfx906 \
// RUN: %s 2>&1 | FileCheck --check-prefix=CUMODE-ON %s
// RUN: %clang -E -dM -target amdgcn -mcpu=gfx906 -mcumode \
diff --git a/clang/test/Driver/fat-archive-unbundle-ext.c b/clang/test/Driver/fat-archive-unbundle-ext.c
index e797acc..d658ad05 100644
--- a/clang/test/Driver/fat-archive-unbundle-ext.c
+++ b/clang/test/Driver/fat-archive-unbundle-ext.c
@@ -1,5 +1,5 @@
// REQUIRES: x86-registered-target
-// UNSUPPORTED: target={{.*-windows.*}}, target={{.*}}-macosx{{.*}}, target={{.*-darwin.*}}, target={{.*}}-aix{{.*}}
+// UNSUPPORTED: target={{.*-windows.*}}, target={{.*}}-macosx{{.*}}, target={{.*-darwin.*}}, target={{.*}}-aix{{.*}}, target={{.*}}-zos{{.*}}
// Generate dummy fat object
// RUN: %clang -O0 --target=%itanium_abi_triple %s -c -o %t.host.o
diff --git a/clang/test/Driver/hip-macros.hip b/clang/test/Driver/hip-macros.hip
index 516e01a..4c460d5 100644
--- a/clang/test/Driver/hip-macros.hip
+++ b/clang/test/Driver/hip-macros.hip
@@ -1,27 +1,4 @@
// REQUIRES: amdgpu-registered-target
-// RUN: %clang -E -dM --offload-arch=gfx906 -mwavefrontsize64 \
-// RUN: --cuda-device-only -nogpuinc -nogpulib \
-// RUN: %s 2>&1 | FileCheck --check-prefixes=WAVE64 %s
-// RUN: %clang -E -dM --offload-arch=gfx1010 -mwavefrontsize64 \
-// RUN: --cuda-device-only -nogpuinc -nogpulib \
-// RUN: %s 2>&1 | FileCheck --check-prefixes=WAVE64 %s
-// RUN: %clang -E -dM --offload-arch=gfx906 -mwavefrontsize64 \
-// RUN: --cuda-device-only -nogpuinc -nogpulib \
-// RUN: -mno-wavefrontsize64 %s 2>&1 | FileCheck --check-prefixes=WAVE64 %s
-// RUN: %clang -E -dM --offload-arch=gfx1010 -mwavefrontsize64 \
-// RUN: --cuda-device-only -nogpuinc -nogpulib \
-// RUN: -mno-wavefrontsize64 %s 2>&1 | FileCheck --check-prefixes=WAVE32 %s
-// RUN: %clang -E -dM --offload-arch=gfx906 -mno-wavefrontsize64 \
-// RUN: --cuda-device-only -nogpuinc -nogpulib \
-// RUN: -mwavefrontsize64 %s 2>&1 | FileCheck --check-prefixes=WAVE64 %s
-// RUN: %clang -E -dM --offload-arch=gfx1010 -mno-wavefrontsize64 \
-// RUN: --cuda-device-only -nogpuinc -nogpulib \
-// RUN: -mwavefrontsize64 %s 2>&1 | FileCheck --check-prefixes=WAVE64 %s
-// WAVE64-DAG: #define __AMDGCN_WAVEFRONT_SIZE__ 64
-// WAVE32-DAG: #define __AMDGCN_WAVEFRONT_SIZE__ 32
-// WAVE64-DAG: #define __AMDGCN_WAVEFRONT_SIZE 64
-// WAVE32-DAG: #define __AMDGCN_WAVEFRONT_SIZE 32
-
// RUN: %clang -E -dM --offload-arch=gfx906 --cuda-device-only -nogpuinc -nogpulib \
// RUN: %s 2>&1 | FileCheck --check-prefix=CUMODE-ON %s
// RUN: %clang -E -dM --offload-arch=gfx906 --cuda-device-only -nogpuinc -nogpulib -mcumode \
diff --git a/clang/test/Driver/hip-wavefront-size-deprecation-diagnostics.hip b/clang/test/Driver/hip-wavefront-size-deprecation-diagnostics.hip
deleted file mode 100644
index 8a60f5a..0000000
--- a/clang/test/Driver/hip-wavefront-size-deprecation-diagnostics.hip
+++ /dev/null
@@ -1,115 +0,0 @@
-// REQUIRES: amdgpu-registered-target
-// RUN: %clang -xhip --offload-arch=gfx1030 --offload-host-only -pedantic -nogpuinc -nogpulib -nobuiltininc -fsyntax-only -Xclang -verify %s
-// RUN: %clang -xhip --offload-arch=gfx1030 --offload-device-only -pedantic -nogpuinc -nogpulib -nobuiltininc -fsyntax-only -Xclang -verify %s
-
-// Test that deprecation warnings for the wavefront size macro are emitted properly.
-
-#define WRAPPED __AMDGCN_WAVEFRONT_SIZE__
-
-#define DOUBLE_WRAPPED (WRAPPED)
-
-template <bool C, class T = void> struct my_enable_if {};
-
-template <class T> struct my_enable_if<true, T> {
- typedef T type;
-};
-
-__attribute__((host, device)) void use(int, const char*);
-
-template<int N> __attribute__((host, device)) int templatify(int x) {
- return x + N;
-}
-
-__attribute__((device)) const int GlobalConst = __AMDGCN_WAVEFRONT_SIZE__; // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
-constexpr int GlobalConstExpr = __AMDGCN_WAVEFRONT_SIZE__; // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
-
-#if defined(__HIP_DEVICE_COMPILE__) && (__AMDGCN_WAVEFRONT_SIZE__ == 64) // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
-int foo(void);
-#endif
-
-__attribute__((device)) int device_var = __AMDGCN_WAVEFRONT_SIZE__; // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
-
-__attribute__((device))
-void device_fun() {
- use(__AMDGCN_WAVEFRONT_SIZE, "device function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE' has been marked as deprecated}}
- use(__AMDGCN_WAVEFRONT_SIZE__, "device function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(WRAPPED, "device function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(DOUBLE_WRAPPED, "device function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(templatify<__AMDGCN_WAVEFRONT_SIZE__>(42), "device function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(GlobalConst, "device function");
- use(GlobalConstExpr, "device function");
-}
-
-__attribute__((global))
-void global_fun() {
- // no warnings expected
- use(__AMDGCN_WAVEFRONT_SIZE, "global function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE' has been marked as deprecated}}
- use(__AMDGCN_WAVEFRONT_SIZE__, "global function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(WRAPPED, "global function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(DOUBLE_WRAPPED, "global function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(templatify<__AMDGCN_WAVEFRONT_SIZE__>(42), "global function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
-}
-
-int host_var = __AMDGCN_WAVEFRONT_SIZE__; // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
-int host_var_alt = __AMDGCN_WAVEFRONT_SIZE; // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE' has been marked as deprecated}}
-int host_var_wrapped = WRAPPED; // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
-int host_var_double_wrapped = DOUBLE_WRAPPED; // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
-
-__attribute__((host))
-void host_fun() {
- use(__AMDGCN_WAVEFRONT_SIZE, "host function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE' has been marked as deprecated}}
- use(__AMDGCN_WAVEFRONT_SIZE__, "host function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(WRAPPED, "host function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(DOUBLE_WRAPPED, "host function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(templatify<__AMDGCN_WAVEFRONT_SIZE__>(42), "host function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(GlobalConst, "host function");
- use(GlobalConstExpr, "host function");
-}
-
-__attribute((host, device))
-void host_device_fun() {
- use(__AMDGCN_WAVEFRONT_SIZE__, "host device function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(WRAPPED, "host device function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(DOUBLE_WRAPPED, "host device function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- use(templatify<__AMDGCN_WAVEFRONT_SIZE__>(42), "host device function"); // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
-}
-
-template <unsigned int OuterWarpSize = __AMDGCN_WAVEFRONT_SIZE__> // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
-class FunSelector {
-public:
- template<unsigned int FunWarpSize = OuterWarpSize>
- __attribute__((device))
- auto fun(void)
- -> typename my_enable_if<(FunWarpSize <= __AMDGCN_WAVEFRONT_SIZE__), void>::type // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- {
- use(1, "yay!");
- }
-
- template<unsigned int FunWarpSize = OuterWarpSize>
- __attribute__((device))
- auto fun(void)
- -> typename my_enable_if<(FunWarpSize > __AMDGCN_WAVEFRONT_SIZE__), void>::type // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- {
- use(0, "nay!");
- }
-};
-
-__attribute__((device))
-void device_fun_selector_user() {
- FunSelector<> f;
- f.fun<>();
- f.fun<1>();
- f.fun<1000>();
-
- my_enable_if<(1 <= __AMDGCN_WAVEFRONT_SIZE__), int>::type x = 42; // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
-}
-
-__attribute__((device)) my_enable_if<(1 <= __AMDGCN_WAVEFRONT_SIZE__), int>::type DeviceFunTemplateRet(void) { // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- return 42;
-}
-
-__attribute__((device)) int DeviceFunTemplateArg(my_enable_if<(1 <= __AMDGCN_WAVEFRONT_SIZE__), int>::type x) { // expected-warning {{macro '__AMDGCN_WAVEFRONT_SIZE__' has been marked as deprecated}}
- return x;
-}
-
-// expected-note@* 0+ {{macro marked 'deprecated' here}}
diff --git a/clang/test/Headers/cuda_with_openmp.cu b/clang/test/Headers/cuda_with_openmp.cu
index efde4ec..8ea0de5 100644
--- a/clang/test/Headers/cuda_with_openmp.cu
+++ b/clang/test/Headers/cuda_with_openmp.cu
@@ -2,7 +2,7 @@
// Reported in https://bugs.llvm.org/show_bug.cgi?id=48014
///==========================================================================///
-// REQUIRES: nvptx-registered-target
+// REQUIRES: nvptx-registered-target, host-supports-cuda
// RUN: %clang -x cuda -fopenmp -c %s -o - --cuda-path=%S/../Driver/Inputs/CUDA/usr/local/cuda -nocudalib -isystem %S/Inputs/include -isystem %S/../../lib/Headers -fsyntax-only
diff --git a/clang/test/OpenMP/task_ast_print.cpp b/clang/test/OpenMP/task_ast_print.cpp
index 30fb7ab..b059f18 100644
--- a/clang/test/OpenMP/task_ast_print.cpp
+++ b/clang/test/OpenMP/task_ast_print.cpp
@@ -1,8 +1,10 @@
// RUN: %clang_cc1 -verify -Wno-vla -fopenmp -ast-print %s | FileCheck %s
+// RUN: %clang_cc1 -verify -Wno-vla -fopenmp -fopenmp-version=60 -DOMP60 -ast-print %s | FileCheck %s --check-prefix=CHECK60
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -std=c++11 -include-pch %t -verify -Wno-vla %s -ast-print | FileCheck %s
// RUN: %clang_cc1 -verify -Wno-vla -fopenmp-simd -ast-print %s | FileCheck %s
+// RUN: %clang_cc1 -verify -Wno-vla -fopenmp-simd -fopenmp-version=60 -DOMP60 -ast-print %s | FileCheck %s --check-prefix=CHECK60
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -std=c++11 -include-pch %t -verify -Wno-vla %s -ast-print | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -ast-dump %s | FileCheck %s --check-prefix=DUMP
@@ -101,8 +103,8 @@ T tmain(T argc, T *argv) {
a = 2;
#pragma omp task default(none), private(argc, b) firstprivate(argv) shared(d) if (argc > 0) final(S<T>::TS > 0) priority(argc) affinity(argc, argv[b:argc], arr[:], ([argc][sizeof(T)])argv)
foo();
-#pragma omp taskgroup task_reduction(-: argc)
-#pragma omp task if (C) mergeable priority(C) in_reduction(-: argc)
+#pragma omp taskgroup task_reduction(+: argc)
+#pragma omp task if (C) mergeable priority(C) in_reduction(+: argc)
foo();
return 0;
}
@@ -119,8 +121,8 @@ T tmain(T argc, T *argv) {
// CHECK-NEXT: a = 2;
// CHECK-NEXT: #pragma omp task default(none) private(argc,b) firstprivate(argv) shared(d) if(argc > 0) final(S<T>::TS > 0) priority(argc) affinity(argc,argv[b:argc],arr[:],([argc][sizeof(T)])argv)
// CHECK-NEXT: foo()
-// CHECK-NEXT: #pragma omp taskgroup task_reduction(-: argc)
-// CHECK-NEXT: #pragma omp task if(C) mergeable priority(C) in_reduction(-: argc)
+// CHECK-NEXT: #pragma omp taskgroup task_reduction(+: argc)
+// CHECK-NEXT: #pragma omp task if(C) mergeable priority(C) in_reduction(+: argc)
// CHECK-NEXT: foo()
// CHECK: template<> int tmain<int, 5>(int argc, int *argv) {
// CHECK-NEXT: int b = argc, c, d, e, f, g;
@@ -134,8 +136,8 @@ T tmain(T argc, T *argv) {
// CHECK-NEXT: a = 2;
// CHECK-NEXT: #pragma omp task default(none) private(argc,b) firstprivate(argv) shared(d) if(argc > 0) final(S<int>::TS > 0) priority(argc) affinity(argc,argv[b:argc],arr[:],([argc][sizeof(int)])argv)
// CHECK-NEXT: foo()
-// CHECK-NEXT: #pragma omp taskgroup task_reduction(-: argc)
-// CHECK-NEXT: #pragma omp task if(5) mergeable priority(5) in_reduction(-: argc)
+// CHECK-NEXT: #pragma omp taskgroup task_reduction(+: argc)
+// CHECK-NEXT: #pragma omp task if(5) mergeable priority(5) in_reduction(+: argc)
// CHECK-NEXT: foo()
// CHECK: template<> long tmain<long, 1>(long argc, long *argv) {
// CHECK-NEXT: long b = argc, c, d, e, f, g;
@@ -149,8 +151,8 @@ T tmain(T argc, T *argv) {
// CHECK-NEXT: a = 2;
// CHECK-NEXT: #pragma omp task default(none) private(argc,b) firstprivate(argv) shared(d) if(argc > 0) final(S<long>::TS > 0) priority(argc) affinity(argc,argv[b:argc],arr[:],([argc][sizeof(long)])argv)
// CHECK-NEXT: foo()
-// CHECK-NEXT: #pragma omp taskgroup task_reduction(-: argc)
-// CHECK-NEXT: #pragma omp task if(1) mergeable priority(1) in_reduction(-: argc)
+// CHECK-NEXT: #pragma omp taskgroup task_reduction(+: argc)
+// CHECK-NEXT: #pragma omp task if(1) mergeable priority(1) in_reduction(+: argc)
// CHECK-NEXT: foo()
enum Enum {};
@@ -199,6 +201,14 @@ int main(int argc, char **argv) {
#pragma omp task depend(inout: omp_all_memory)
foo();
// CHECK-NEXT: foo();
+#ifdef OMP60
+#pragma omp task threadset(omp_pool)
+#pragma omp task threadset(omp_team)
+ foo();
+#endif
+ // CHECK60: #pragma omp task threadset(omp_pool)
+ // CHECK60: #pragma omp task threadset(omp_team)
+ // CHECK60-NEXT: foo();
return tmain<int, 5>(b, &b) + tmain<long, 1>(x, &x);
}
diff --git a/clang/test/OpenMP/task_codegen.cpp b/clang/test/OpenMP/task_codegen.cpp
index c3e6d9e6b..ba8e694 100644
--- a/clang/test/OpenMP/task_codegen.cpp
+++ b/clang/test/OpenMP/task_codegen.cpp
@@ -41,6 +41,9 @@
// RUN: -emit-llvm -o - -DOMP51 | FileCheck %s \
// RUN: --implicit-check-not="{{__kmpc|__tgt}}"
+// RUN: %clang_cc1 -verify -Wno-vla -triple x86_64-apple-darwin10 -fopenmp -fopenmp-version=60 -DOMP60 -fopenmp-enable-irbuilder -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK6
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -DOMP60 -fopenmp-enable-irbuilder -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -DOMP60 -fopenmp-enable-irbuilder -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify -Wno-vla %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
// expected-no-diagnostics
#ifndef HEADER
@@ -65,6 +68,7 @@ struct S {
S(const S &s) : a(s.a) {}
~S() {}
};
+
int a;
int main() {
char b;
@@ -147,6 +151,7 @@ int main() {
+
// s1 = S();
@@ -215,6 +220,19 @@ void test_omp_all_memory()
}
}
#endif // OMP51
+
+#ifdef OMP60
+void test_threadset()
+{
+#pragma omp task threadset(omp_team)
+ {
+ }
+#pragma omp task threadset(omp_pool)
+ {
+ }
+}
+#endif // OMP60
+
#endif
// CHECK1-LABEL: define {{[^@]+}}@main
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
@@ -10243,3 +10261,18 @@ void test_omp_all_memory()
// CHECK4-51-NEXT: call void @__cxx_global_var_init()
// CHECK4-51-NEXT: ret void
//
+// CHECK6-LABEL: define void @_Z14test_threadsetv()
+// CHECK6-NEXT: entry:
+// CHECK6-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_23:%.*]], align 1
+// CHECK6-NEXT: [[AGG_CAPTURED2:%.*]] = alloca [[STRUCT_ANON_25:%.*]], align 1
+// CHECK6-NEXT: call i32 @__kmpc_global_thread_num(ptr @[[GLOB_PTR:[0-9]+]])
+// CHECK6-NEXT: [[TMP0:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @1, i32 %omp_global_thread_num, i32 1, i64 40, i64 1, ptr @.omp_task_entry..[[ENTRY1:[0-9]+]])
+// CHECK6-NEXT: getelementptr inbounds nuw %struct.kmp_task_t_with_privates{{.*}}, ptr %0, i32 0, i32 0
+// CHECK6-NEXT: call i32 @__kmpc_global_thread_num(ptr @[[GLOB_PTR:[0-9]+]])
+// CHECK6-NEXT: call i32 @__kmpc_omp_task(ptr @1, i32 %omp_global_thread_num1, ptr %0)
+// CHECK6-NEXT: call i32 @__kmpc_global_thread_num(ptr @[[GLOB_PTR2:[0-9]+]])
+// CHECK6-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @1, i32 %omp_global_thread_num3, i32 129, i64 40, i64 1, ptr @.omp_task_entry..[[ENTRY2:[0-9]+]])
+// CHECK6-NEXT: getelementptr inbounds nuw %struct.kmp_task_t_with_privates{{.*}}, ptr %3, i32 0, i32 0
+// CHECK6-NEXT: call i32 @__kmpc_global_thread_num(ptr @[[GLOB_PTR2:[0-9]+]])
+// CHECK6-NEXT: call i32 @__kmpc_omp_task(ptr @1, i32 %omp_global_thread_num4, ptr %3)
+// CHECK6-NEXT: ret void
diff --git a/clang/test/OpenMP/task_threadset_messages.cpp b/clang/test/OpenMP/task_threadset_messages.cpp
new file mode 100755
index 0000000..f553a2d
--- /dev/null
+++ b/clang/test/OpenMP/task_threadset_messages.cpp
@@ -0,0 +1,99 @@
+// RUN: %clang_cc1 -verify=expected,omp45 -fopenmp -fopenmp-version=45 -std=c++11 -ferror-limit 200 -o - %s
+// RUN: %clang_cc1 -verify=expected,omp50 -fopenmp -fopenmp-version=50 -std=c++11 -ferror-limit 200 -o - %s
+// RUN: %clang_cc1 -verify=expected,omp51 -fopenmp -fopenmp-version=51 -std=c++11 -ferror-limit 200 -o - %s
+// RUN: %clang_cc1 -verify=expected -DOMP60 -fopenmp -fopenmp-version=60 -std=c++11 -ferror-limit 200 -o - %s
+
+// RUN: %clang_cc1 -verify=expected,omp45 -fopenmp-simd -fopenmp-version=45 -std=c++11 -ferror-limit 200 -o - %s
+// RUN: %clang_cc1 -verify=expected,omp50 -fopenmp-simd -fopenmp-version=50 -std=c++11 -ferror-limit 200 -o - %s
+// RUN: %clang_cc1 -verify=expected,omp51 -fopenmp-simd -fopenmp-version=51 -std=c++11 -ferror-limit 200 -o - %s
+// RUN: %clang_cc1 -verify=expected -DOMP60 -fopenmp-simd -fopenmp-version=60 -std=c++11 -ferror-limit 200 -o - %s
+
+#ifdef OMP60
+struct ComplexStruct {
+ int data[10];
+ struct InnerStruct {
+ float value;
+ } inner;
+};
+
+// Template class with member functions using 'threadset'.
+template <typename T>
+class TemplateClass {
+public:
+ void foo() {
+ #pragma omp task threadset(omp_pool)
+ {
+ T temp;
+ }
+ }
+ void bar() {
+ #pragma omp taskloop threadset(omp_team)
+ for (int i = 0; i < 10; ++i) {}
+ }
+};
+
+// Valid uses of 'threadset' with 'omp_pool' and 'omp_team' in task directive.
+void test_task_threadset_valid() {
+ int a;
+ #pragma omp task threadset(omp_pool)
+ #pragma omp task threadset(omp_team)
+ #pragma omp task threadset(omp_pool) if(1)
+ #pragma omp task threadset(omp_team) priority(5)
+ #pragma omp task threadset(omp_pool) depend(out: a)
+ #pragma omp parallel
+ {
+ #pragma omp task threadset(omp_pool)
+ {
+ #pragma omp taskloop threadset(omp_team)
+ for (int i = 0; i < 5; ++i) {}
+ }
+ }
+
+ TemplateClass<int> obj;
+ obj.foo();
+ obj.bar();
+}
+
+// Invalid uses of 'threadset' with incorrect arguments in task directive.
+void test_task_threadset_invalid_args() {
+ #pragma omp task threadset(invalid_arg) // expected-error {{expected 'omp_pool' or 'omp_team' in OpenMP clause 'threadset'}}
+ #pragma omp task threadset(123) // expected-error {{expected 'omp_pool' or 'omp_team' in OpenMP clause 'threadset'}}
+ #pragma omp task threadset(omp_pool, omp_team) // expected-error {{expected ')'}} expected-note {{to match this '('}}
+ #pragma omp task threadset() // expected-error {{expected 'omp_pool' or 'omp_team' in OpenMP clause 'threadset'}}
+ {}
+}
+
+// Valid uses of 'threadset' with 'omp_pool' and 'omp_team' in taskloop directive.
+void test_taskloop_threadset_valid() {
+ #pragma omp taskloop threadset(omp_pool)
+ for (int i = 0; i < 10; ++i) {}
+ #pragma omp taskloop threadset(omp_team)
+ for (int i = 0; i < 10; ++i) {}
+ #pragma omp taskloop threadset(omp_pool) grainsize(5)
+ for (int i = 0; i < 10; ++i) {}
+ #pragma omp taskloop threadset(omp_team) num_tasks(2)
+ for (int i = 0; i < 10; ++i) {}
+}
+
+// Invalid uses of 'threadset' with incorrect arguments in taskloop directive.
+void test_taskloop_threadset_invalid_args() {
+ #pragma omp taskloop threadset(invalid_arg) // expected-error {{expected 'omp_pool' or 'omp_team' in OpenMP clause 'threadset'}}
+ for (int i = 0; i < 10; ++i) {}
+ #pragma omp taskloop threadset(123) // expected-error {{expected 'omp_pool' or 'omp_team' in OpenMP clause 'threadset'}}
+ for (int i = 0; i < 10; ++i) {}
+ #pragma omp taskloop threadset(omp_pool, omp_team) // expected-error {{expected ')'}} expected-note {{to match this '('}}
+ for (int i = 0; i < 10; ++i) {}
+ #pragma omp taskloop threadset() // expected-error {{expected 'omp_pool' or 'omp_team' in OpenMP clause 'threadset'}}
+ for (int i = 0; i < 10; ++i) {}
+}
+
+#else
+void test_threadset_not_supported() {
+ #pragma omp task threadset(omp_pool) // omp45-error {{unexpected OpenMP clause 'threadset' in directive '#pragma omp task'}} omp50-error {{unexpected OpenMP clause 'threadset' in directive '#pragma omp task'}} omp51-error {{unexpected OpenMP clause 'threadset' in directive '#pragma omp task'}}
+ #pragma omp task threadset(omp_team) // omp45-error {{unexpected OpenMP clause 'threadset' in directive '#pragma omp task'}} omp50-error {{unexpected OpenMP clause 'threadset' in directive '#pragma omp task'}} omp51-error {{unexpected OpenMP clause 'threadset' in directive '#pragma omp task'}}
+ #pragma omp taskloop threadset(omp_team) // omp45-error {{unexpected OpenMP clause 'threadset' in directive '#pragma omp taskloop'}} omp50-error {{unexpected OpenMP clause 'threadset' in directive '#pragma omp taskloop'}} omp51-error {{unexpected OpenMP clause 'threadset' in directive '#pragma omp taskloop'}}
+ for (int i = 0; i < 10; ++i) {}
+ #pragma omp taskloop threadset(omp_pool) // omp45-error {{unexpected OpenMP clause 'threadset' in directive '#pragma omp taskloop'}} omp50-error {{unexpected OpenMP clause 'threadset' in directive '#pragma omp taskloop'}} omp51-error {{unexpected OpenMP clause 'threadset' in directive '#pragma omp taskloop'}}
+ for (int i = 0; i < 10; ++i) {}
+}
+#endif
diff --git a/clang/test/OpenMP/taskloop_ast_print.cpp b/clang/test/OpenMP/taskloop_ast_print.cpp
index 1b6d724..e4bf20a 100644
--- a/clang/test/OpenMP/taskloop_ast_print.cpp
+++ b/clang/test/OpenMP/taskloop_ast_print.cpp
@@ -1,8 +1,10 @@
// RUN: %clang_cc1 -verify -fopenmp -ast-print %s | FileCheck %s
+// RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=60 -DOMP60 -ast-print %s | FileCheck %s --check-prefix=CHECK60
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -std=c++11 -include-pch %t -verify %s -ast-print | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -ast-print %s | FileCheck %s
+// RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=60 -DOMP60 -ast-print %s | FileCheck %s --check-prefix=CHECK60
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -std=c++11 -include-pch %t -verify %s -ast-print | FileCheck %s
// expected-no-diagnostics
@@ -87,6 +89,20 @@ int main(int argc, char **argv) {
// CHECK-NEXT: #pragma omp cancel taskgroup
// CHECK-NEXT: #pragma omp cancellation point taskgroup
// CHECK-NEXT: foo();
+#ifdef OMP60
+#pragma omp taskloop threadset(omp_team)
+ for (int i = 0; i < 10; ++i) {
+#pragma omp taskloop threadset(omp_pool)
+ for (int j = 0; j < 10; ++j) {
+ foo();
+ }
+}
+#endif
+ // CHECK60: #pragma omp taskloop threadset(omp_team)
+ // CHECK60-NEXT: for (int i = 0; i < 10; ++i) {
+ // CHECK60: #pragma omp taskloop threadset(omp_pool)
+ // CHECK60-NEXT: for (int j = 0; j < 10; ++j) {
+ // CHECK60-NEXT: foo();
return (tmain<int, 5>(argc) + tmain<char, 1>(argv[0][0]));
}
diff --git a/clang/test/OpenMP/taskloop_codegen.cpp b/clang/test/OpenMP/taskloop_codegen.cpp
index 69f8d3b..d119760 100644
--- a/clang/test/OpenMP/taskloop_codegen.cpp
+++ b/clang/test/OpenMP/taskloop_codegen.cpp
@@ -5,7 +5,12 @@
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -x c++ -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
+
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
+
+// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fopenmp-version=60 -DOMP60 -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK6
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -DOMP60 -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
+// RUN: %clang_cc1 -fopenmp -fopenmp-version=60 -DOMP60 -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
@@ -241,4 +246,52 @@ void taskloop_with_class() {
}
}
+#ifdef OMP60
+void test_threadset()
+{
+#pragma omp taskloop threadset(omp_team)
+ for (int i = 0; i < 10; ++i) {
+ }
+#pragma omp taskloop threadset(omp_pool)
+ for (int i = 0; i < 10; ++i) {
+ }
+}
+#endif // OMP60
+// CHECK6-LABEL: define void @_Z14test_threadsetv()
+// CHECK6-NEXT: entry:
+// CHECK6-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_14:%.*]], align 1
+// CHECK6-NEXT: %[[TMP:.*]] = alloca i32, align 4
+// CHECK6-NEXT: [[AGG_CAPTURED1:%.*]] = alloca [[STRUCT_ANON_16:%.*]], align 1
+// CHECK6-NEXT: %[[TMP2:.*]] = alloca i32, align 4
+// CHECK6-NEXT: %[[TID0:.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB_PTR:[0-9]+]])
+// CHECK6-NEXT: call void @__kmpc_taskgroup(ptr @1, i32 %[[TID0:.*]])
+// CHECK6-NEXT: %[[TID1:.*]] = call ptr @__kmpc_omp_task_alloc(ptr @1, i32 %[[TID0:.*]], i32 1, i64 80, i64 1, ptr @.omp_task_entry..[[ENTRY1:[0-9]+]])
+// CHECK6-NEXT: %[[TID2:.*]] = getelementptr inbounds nuw %struct.kmp_task_t_with_privates{{.*}}, ptr %[[TID1:.*]], i32 0, i32 0
+// CHECK6-NEXT: %[[TID3:.*]] = getelementptr inbounds nuw %struct.kmp_task_t{{.*}}, ptr %[[TID2:.*]], i32 0, i32 5
+// CHECK6-NEXT: store i64 0, ptr %[[TID3:.*]], align 8
+// CHECK6-NEXT: %[[TID4:.*]] = getelementptr inbounds nuw %struct.kmp_task_t{{.*}}, ptr %[[TID2:.*]], i32 0, i32 6
+// CHECK6-NEXT: store i64 9, ptr %[[TID4:.*]], align 8
+// CHECK6-NEXT: %[[TID5:.*]] = getelementptr inbounds nuw %struct.kmp_task_t{{.*}}, ptr %[[TID2:.*]], i32 0, i32 7
+// CHECK6-NEXT: store i64 1, ptr %[[TID5:.*]], align 8
+// CHECK6-NEXT: %[[TID6:.*]] = getelementptr inbounds nuw %struct.kmp_task_t{{.*}}, ptr %[[TID2:.*]], i32 0, i32 9
+// CHECK6-NEXT: call void @llvm.memset.p0.i64(ptr align 8 %[[TID6:.*]], i8 0, i64 8, i1 false)
+// CHECK6-NEXT: %[[TID7:.*]] = load i64, ptr %[[TID5:.*]], align 8
+// CHECK6-NEXT: call void @__kmpc_taskloop(ptr @1, i32 %[[TID0:.*]], ptr %[[TID1:.*]], i32 1, ptr %[[TID3:.*]], ptr %4, i64 %[[TID7:.*]], i32 1, i32 0, i64 0, ptr null)
+// CHECK6-NEXT: call void @__kmpc_end_taskgroup(ptr @1, i32 %[[TID0:.*]])
+// CHECK6-NEXT: call void @__kmpc_taskgroup(ptr @1, i32 %[[TID0:.*]])
+// CHECK6-NEXT: %[[TID8:.*]] = call ptr @__kmpc_omp_task_alloc(ptr @1, i32 %[[TID0:.*]], i32 129, i64 80, i64 1, ptr @.omp_task_entry..[[ENTRY1:[0-9]+]])
+// CHECK6-NEXT: %[[TID9:.*]] = getelementptr inbounds nuw %struct.kmp_task_t_with_privates{{.*}}, ptr %[[TID8:.*]], i32 0, i32 0
+// CHECK6-NEXT: %[[TID10:.*]] = getelementptr inbounds nuw %struct.kmp_task_t{{.*}}, ptr %[[TID9:.*]], i32 0, i32 5
+// CHECK6-NEXT: store i64 0, ptr %[[TID10:.*]], align 8
+// CHECK6-NEXT: %[[TID11:.*]] = getelementptr inbounds nuw %struct.kmp_task_t{{.*}}, ptr %[[TID9:.*]], i32 0, i32 6
+// CHECK6-NEXT: store i64 9, ptr %[[TID11:.*]], align 8
+// CHECK6-NEXT: %[[TID12:.*]] = getelementptr inbounds nuw %struct.kmp_task_t{{.*}}, ptr %[[TID9:.*]], i32 0, i32 7
+// CHECK6-NEXT: store i64 1, ptr %[[TID12:.*]], align 8
+// CHECK6-NEXT: %[[TID13:.*]] = getelementptr inbounds nuw %struct.kmp_task_t{{.*}}, ptr %[[TID9:.*]], i32 0, i32 9
+// CHECK6-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TID13:.*]], i8 0, i64 8, i1 false)
+// CHECK6-NEXT: %[[TID14:.*]] = load i64, ptr [[TID12:.*]], align 8
+// CHECK6-NEXT: call void @__kmpc_taskloop(ptr @1, i32 %[[TID0:.*]], ptr %[[TID8:.*]], i32 1, ptr %[[TID10:.*]], ptr %[[TID11:.*]], i64 %[[TID14:.*]], i32 1, i32 0, i64 0, ptr null)
+// CHECK6-NEXT: call void @__kmpc_end_taskgroup(ptr @1, i32 %[[TID0:.*]])
+// CHECK6-NEXT: ret void
+
#endif
diff --git a/clang/test/Preprocessor/predefined-arch-macros.c b/clang/test/Preprocessor/predefined-arch-macros.c
index a3c3697..cdb4632 100644
--- a/clang/test/Preprocessor/predefined-arch-macros.c
+++ b/clang/test/Preprocessor/predefined-arch-macros.c
@@ -4418,7 +4418,6 @@
// CHECK_AMDGCN_NONE-NOT: #define __HAS_FMAF__
// CHECK_AMDGCN_NONE-NOT: #define __HAS_FP64__
// CHECK_AMDGCN_NONE-NOT: #define __HAS_LDEXPF__
-// CHECK_AMDGCN_NONE-NOT: #define __AMDGCN_WAVEFRONT_SIZE__
// Begin r600 tests ----------------
@@ -4439,7 +4438,6 @@
// RUN: %clang -x hip -E -dM %s -o - 2>&1 --offload-host-only -nogpulib \
// RUN: -nogpuinc --offload-arch=gfx803 -target x86_64-unknown-linux \
// RUN: | FileCheck -match-full-lines %s -check-prefixes=CHECK_HIP_HOST
-// CHECK_HIP_HOST: #define __AMDGCN_WAVEFRONT_SIZE__ 64
// CHECK_HIP_HOST: #define __AMDGPU__ 1
// CHECK_HIP_HOST: #define __AMD__ 1
diff --git a/clang/test/SemaOpenCL/builtins-extended-image-param-gfx1100-err.cl b/clang/test/SemaOpenCL/builtins-extended-image-param-gfx1100-err.cl
new file mode 100644
index 0000000..47dbdd4
--- /dev/null
+++ b/clang/test/SemaOpenCL/builtins-extended-image-param-gfx1100-err.cl
@@ -0,0 +1,227 @@
+// RUN: %clang_cc1 -triple amdgcn-- -target-cpu gfx1100 -target-feature +extended-image-insts -S -verify=expected -o - %s
+// REQUIRES: amdgpu-registered-target
+
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+
+typedef int int4 __attribute__((ext_vector_type(4)));
+typedef float float4 __attribute__((ext_vector_type(4)));
+typedef half half4 __attribute__((ext_vector_type(4)));
+
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_r(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(1, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_g(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(2, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_b(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(4, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_a(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(8, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_lz_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(i32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_l_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_d_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_lz_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_l_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, f32, 103); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_d_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(i32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_v4f32_f32' must be a constant integer}}
+}
+float4 test_amdgcn_image_sample_lz_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(i32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_3d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_l_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_3d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_d_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_3d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_lz_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_cube_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_l_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_cube_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_lz_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(1, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1darray_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_l_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1darray_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_d_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1darray_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_lz_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_l_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_d_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_v4f32_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_lz_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(23, f32, tex, vec4i32, 0, i32, 11); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_l_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(i32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_d_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_lz_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_l_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_d_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_lz_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_3d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_l_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_3d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_d_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_3d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_lz_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_cube_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_l_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(i32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_cube_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_lz_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(i32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1darray_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_l_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1darray_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_d_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1darray_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_lz_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_l_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_d_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_v4f16_f32' must be a constant integer}}
+}
+
+float test_amdgcn_image_sample_lz_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2d_f32_f32(1, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_f32_f32' must be a constant integer}}
+}
+
+float test_amdgcn_image_sample_l_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2d_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_f32_f32' must be a constant integer}}
+}
+
+float test_amdgcn_image_sample_d_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2d_f32_f32(1, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_f32_f32' must be a constant integer}}
+}
+
+float test_amdgcn_image_sample_lz_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_f32_f32' must be a constant integer}}
+}
+
+float test_amdgcn_image_sample_l_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2darray_f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_f32_f32' must be a constant integer}}
+}
+
+float test_amdgcn_image_sample_d_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2darray_f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_f32_f32' must be a constant integer}}
+}
diff --git a/clang/test/SemaOpenCL/builtins-extended-image-param-gfx942-err.cl b/clang/test/SemaOpenCL/builtins-extended-image-param-gfx942-err.cl
new file mode 100644
index 0000000..e60f8c7
--- /dev/null
+++ b/clang/test/SemaOpenCL/builtins-extended-image-param-gfx942-err.cl
@@ -0,0 +1,227 @@
+// RUN: %clang_cc1 -triple amdgcn-- -target-cpu gfx942 -verify=GFX94 -S -o - %s
+// REQUIRES: amdgpu-registered-target
+
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+
+typedef int int4 __attribute__((ext_vector_type(4)));
+typedef float float4 __attribute__((ext_vector_type(4)));
+typedef half half4 __attribute__((ext_vector_type(4)));
+
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_r(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(1, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_gather4_lz_2d_v4f32_f32_r' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_g(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(2, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_gather4_lz_2d_v4f32_f32_g' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_b(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(4, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_gather4_lz_2d_v4f32_f32_b' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_a(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(8, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_gather4_lz_2d_v4f32_f32_a' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_lz_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(105, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1d_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_l_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1d_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_d_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1d_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_lz_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_l_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(10, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_d_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(105, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_v4f32_f32' needs target feature extended-image-insts}}
+}
+float4 test_amdgcn_image_sample_lz_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(105, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_3d_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_l_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_3d_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_d_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_3d_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_lz_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_cube_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_l_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_cube_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_lz_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(1, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1darray_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_l_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1darray_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_d_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1darray_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_lz_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_l_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+float4 test_amdgcn_image_sample_d_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_v4f32_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_lz_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(105, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1d_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_l_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(105, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1d_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_d_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(105, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1d_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_lz_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_l_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_d_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_lz_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_3d_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_l_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_3d_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_d_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_3d_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_lz_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_cube_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_l_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(105, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_cube_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_lz_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(105, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1darray_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_l_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(105, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1darray_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_d_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1darray_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_lz_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_l_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+half4 test_amdgcn_image_sample_d_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_v4f16_f32' needs target feature extended-image-insts}}
+}
+
+float test_amdgcn_image_sample_lz_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2d_f32_f32(1, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_f32_f32' needs target feature extended-image-insts}}
+}
+
+float test_amdgcn_image_sample_l_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2d_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_f32_f32' needs target feature extended-image-insts}}
+}
+
+float test_amdgcn_image_sample_d_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2d_f32_f32(1, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_f32_f32' needs target feature extended-image-insts}}
+}
+
+float test_amdgcn_image_sample_lz_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_f32_f32' needs target feature extended-image-insts}}
+}
+
+float test_amdgcn_image_sample_l_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2darray_f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_f32_f32' needs target feature extended-image-insts}}
+}
+
+float test_amdgcn_image_sample_d_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2darray_f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_f32_f32' needs target feature extended-image-insts}}
+}
diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp
index fc27fd2..08776d9 100644
--- a/clang/tools/libclang/CIndex.cpp
+++ b/clang/tools/libclang/CIndex.cpp
@@ -2406,6 +2406,8 @@ void OMPClauseEnqueue::VisitOMPCompareClause(const OMPCompareClause *) {}
void OMPClauseEnqueue::VisitOMPFailClause(const OMPFailClause *) {}
+void OMPClauseEnqueue::VisitOMPThreadsetClause(const OMPThreadsetClause *) {}
+
void OMPClauseEnqueue::VisitOMPAbsentClause(const OMPAbsentClause *) {}
void OMPClauseEnqueue::VisitOMPHoldsClause(const OMPHoldsClause *) {}
diff --git a/clang/www/cxx_dr_status.html b/clang/www/cxx_dr_status.html
index b7da22c..ae9b28e 100755
--- a/clang/www/cxx_dr_status.html
+++ b/clang/www/cxx_dr_status.html
@@ -3113,11 +3113,11 @@ of class templates</td>
<td>Default initialization of POD classes?</td>
<td class="na" align="center">N/A</td>
</tr>
- <tr class="open" id="511">
+ <tr id="511">
<td><a href="https://cplusplus.github.io/CWG/issues/511.html">511</a></td>
- <td>open</td>
+ <td>NAD</td>
<td>POD-structs with template assignment operators</td>
- <td align="center">Not resolved</td>
+ <td class="unknown" align="center">Unknown</td>
</tr>
<tr id="512">
<td><a href="https://cplusplus.github.io/CWG/issues/512.html">512</a></td>
@@ -10895,7 +10895,7 @@ and <I>POD class</I></td>
</tr>
<tr class="open" id="1845">
<td><a href="https://cplusplus.github.io/CWG/issues/1845.html">1845</a></td>
- <td>drafting</td>
+ <td>review</td>
<td>Point of instantiation of a variable template specialization</td>
<td align="center">Not resolved</td>
</tr>
@@ -12081,7 +12081,7 @@ and <I>POD class</I></td>
</tr>
<tr class="open" id="2042">
<td><a href="https://cplusplus.github.io/CWG/issues/2042.html">2042</a></td>
- <td>drafting</td>
+ <td>review</td>
<td>Exceptions and deallocation functions</td>
<td align="center">Not resolved</td>
</tr>
@@ -12335,7 +12335,7 @@ and <I>POD class</I></td>
<td><a href="https://cplusplus.github.io/CWG/issues/2084.html">2084</a></td>
<td>CD4</td>
<td>NSDMIs and deleted union default constructors</td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="full" align="center">Clang 3.1</td>
</tr>
<tr id="2085">
<td><a href="https://cplusplus.github.io/CWG/issues/2085.html">2085</a></td>
@@ -12837,7 +12837,7 @@ and <I>POD class</I></td>
</tr>
<tr class="open" id="2168">
<td><a href="https://cplusplus.github.io/CWG/issues/2168.html">2168</a></td>
- <td>open</td>
+ <td>review</td>
<td>Narrowing conversions and +/- infinity</td>
<td align="center">Not resolved</td>
</tr>
@@ -14237,11 +14237,11 @@ and <I>POD class</I></td>
<td>Constexpr virtual functions and temporary objects</td>
<td class="unknown" align="center">Unknown</td>
</tr>
- <tr class="open" id="2401">
+ <tr id="2401">
<td><a href="https://cplusplus.github.io/CWG/issues/2401.html">2401</a></td>
- <td>drafting</td>
+ <td>C++20</td>
<td>Array decay vs prohibition of subobject non-type arguments</td>
- <td align="center">Not resolved</td>
+ <td class="unknown" align="center">Unknown</td>
</tr>
<tr id="2402">
<td><a href="https://cplusplus.github.io/CWG/issues/2402.html">2402</a></td>
@@ -15171,7 +15171,7 @@ and <I>POD class</I></td>
</tr>
<tr class="open" id="2555">
<td><a href="https://cplusplus.github.io/CWG/issues/2555.html">2555</a></td>
- <td>drafting</td>
+ <td>tentatively ready</td>
<td>Ineffective redeclaration prevention for <I>using-declarator</I>s</td>
<td align="center">Not resolved</td>
</tr>
@@ -15311,23 +15311,23 @@ and <I>POD class</I></td>
<td>Undefined behavior for preprocessing directives in macro arguments</td>
<td align="center">Not resolved</td>
</tr>
- <tr class="open" id="2578">
+ <tr id="2578">
<td><a href="https://cplusplus.github.io/CWG/issues/2578.html">2578</a></td>
- <td>open</td>
+ <td>CD7</td>
<td>Undefined behavior when creating an invalid string literal via stringizing</td>
- <td align="center">Not resolved</td>
+ <td class="unknown" align="center">Unknown</td>
</tr>
- <tr class="open" id="2579">
+ <tr id="2579">
<td><a href="https://cplusplus.github.io/CWG/issues/2579.html">2579</a></td>
- <td>open</td>
+ <td>CD7</td>
<td>Undefined behavior when token pasting does not create a preprocessing token</td>
- <td align="center">Not resolved</td>
+ <td class="unknown" align="center">Unknown</td>
</tr>
- <tr class="open" id="2580">
+ <tr id="2580">
<td><a href="https://cplusplus.github.io/CWG/issues/2580.html">2580</a></td>
- <td>open</td>
+ <td>CD7</td>
<td>Undefined behavior with <TT>#line</TT></td>
- <td align="center">Not resolved</td>
+ <td class="unknown" align="center">Unknown</td>
</tr>
<tr class="open" id="2581">
<td><a href="https://cplusplus.github.io/CWG/issues/2581.html">2581</a></td>
@@ -17104,7 +17104,7 @@ objects</td>
</tr>
<tr class="open" id="2875">
<td><a href="https://cplusplus.github.io/CWG/issues/2875.html">2875</a></td>
- <td>review</td>
+ <td>tentatively ready</td>
<td>Missing support for round-tripping null pointer values through indirection/address operators</td>
<td align="center">Not resolved</td>
</tr>
@@ -17400,7 +17400,7 @@ objects</td>
</tr>
<tr class="open" id="2923">
<td><a href="https://cplusplus.github.io/CWG/issues/2923.html">2923</a></td>
- <td>review</td>
+ <td>tentatively ready</td>
<td>Note about infinite loops and execution steps</td>
<td align="center">Not resolved</td>
</tr>
@@ -17760,7 +17760,7 @@ objects</td>
</tr>
<tr class="open" id="2983">
<td><a href="https://cplusplus.github.io/CWG/issues/2983.html">2983</a></td>
- <td>open</td>
+ <td>review</td>
<td>Non-type template parameters are not variables</td>
<td align="center">Not resolved</td>
</tr>
@@ -17868,7 +17868,7 @@ objects</td>
</tr>
<tr class="open" id="3001">
<td><a href="https://cplusplus.github.io/CWG/issues/3001.html">3001</a></td>
- <td>review</td>
+ <td>tentatively ready</td>
<td>Inconsistent restrictions for <TT>static_cast</TT> on pointers to out-of-lifetime objects</td>
<td align="center">Not resolved</td>
</tr>
@@ -17932,7 +17932,7 @@ objects</td>
</tr>
<tr class="open" id="3011">
<td><a href="https://cplusplus.github.io/CWG/issues/3011.html">3011</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Parenthesized aggregate initialization for <I>new-expression</I>s</td>
<td align="center">Not resolved</td>
</tr>
@@ -17992,7 +17992,7 @@ objects</td>
</tr>
<tr class="open" id="3021">
<td><a href="https://cplusplus.github.io/CWG/issues/3021.html">3021</a></td>
- <td>open</td>
+ <td>drafting</td>
<td>Subsumption rules for fold expanded constraints</td>
<td align="center">Not resolved</td>
</tr>
@@ -18058,7 +18058,7 @@ objects</td>
</tr>
<tr class="open" id="3032">
<td><a href="https://cplusplus.github.io/CWG/issues/3032.html">3032</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Template argument disambiguation</td>
<td align="center">Not resolved</td>
</tr>
@@ -18184,7 +18184,7 @@ objects</td>
</tr>
<tr class="open" id="3053">
<td><a href="https://cplusplus.github.io/CWG/issues/3053.html">3053</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Allowing <TT>#undef likely</TT></td>
<td align="center">Not resolved</td>
</tr>
@@ -18265,6 +18265,210 @@ objects</td>
<td>tentatively ready</td>
<td>Declarative <I>nested-name-specifier</I> in explicit instantiation</td>
<td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3067">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3067.html">3067</a></td>
+ <td>open</td>
+ <td>Array-to-pointer conversion with object type mismatch</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3068">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3068.html">3068</a></td>
+ <td>open</td>
+ <td>Access checking in friends involving <I>qualified-id</I>s</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3069">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3069.html">3069</a></td>
+ <td>open</td>
+ <td>Reference to wrong placeholder</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3070">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3070.html">3070</a></td>
+ <td>open</td>
+ <td>Trivial assignment can skip member subobjects</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3071">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3071.html">3071</a></td>
+ <td>open</td>
+ <td>Negative <TT>tuple_size</TT> in structured bindings</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3072">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3072.html">3072</a></td>
+ <td>open</td>
+ <td>Incorrect examples for lambda SFINAE</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3073">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3073.html">3073</a></td>
+ <td>open</td>
+ <td>Dependence of <I>R</I> on <TT>T2</TT> is unclear</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3074">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3074.html">3074</a></td>
+ <td>tentatively ready</td>
+ <td>Redundant ill-formedness for module macros</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3075">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3075.html">3075</a></td>
+ <td>tentatively ready</td>
+ <td>Unclear matching of import directive</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3076">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3076.html">3076</a></td>
+ <td>tentatively ready</td>
+ <td>Remove unnecessary IFNDR for malformed <I>header-name-token</I>s</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3077">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3077.html">3077</a></td>
+ <td>tentatively ready</td>
+ <td>Undesirable formation of <TT>import</TT> directive with <I>string-literal</I></td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3078">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3078.html">3078</a></td>
+ <td>review</td>
+ <td>Different treatment of <TT>#include</TT> <I>pp-tokens</I> and <I>header-name-tokens</I></td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3079">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3079.html">3079</a></td>
+ <td>open</td>
+ <td>Allow <I>empty-declaration</I>s in anonymous unions</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3080">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3080.html">3080</a></td>
+ <td>tentatively ready</td>
+ <td>Clarify kinds of permitted template template arguments</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3081">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3081.html">3081</a></td>
+ <td>review</td>
+ <td>Require glvalue when splicing direct base class relationship</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3082">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3082.html">3082</a></td>
+ <td>tentatively ready</td>
+ <td>Allow for call-compatible function types in <TT>reinterpret_cast</TT></td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3083">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3083.html">3083</a></td>
+ <td>tentatively ready</td>
+ <td>Remove redundant restrictions on class and enum definitions</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3084">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3084.html">3084</a></td>
+ <td>tentatively ready</td>
+ <td><I>compound-statement</I>s inside <I>iteration-statement</I>s</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3085">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3085.html">3085</a></td>
+ <td>tentatively ready</td>
+ <td>Apply restriction inside for-range-declaration</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3086">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3086.html">3086</a></td>
+ <td>tentatively ready</td>
+ <td>Destringizing should consider all sorts of encoding-prefixes</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3087">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3087.html">3087</a></td>
+ <td>open</td>
+ <td>Destringizing for raw string literals</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3088">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3088.html">3088</a></td>
+ <td>open</td>
+ <td>Clarify macro treatment of identifiers with special meaning</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3089">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3089.html">3089</a></td>
+ <td>tentatively ready</td>
+ <td>const-default-constructible improperly handles std::meta::info</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3090">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3090.html">3090</a></td>
+ <td>tentatively ready</td>
+ <td>Internal linkage from header units</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3091">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3091.html">3091</a></td>
+ <td>review</td>
+ <td>Linking of translation units as sequences of tokens</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3092">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3092.html">3092</a></td>
+ <td>tentatively ready</td>
+ <td><I>base-specifier</I>s are not "declared"</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3093">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3093.html">3093</a></td>
+ <td>open</td>
+ <td>Missing integration of direct base class relationships</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3094">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3094.html">3094</a></td>
+ <td>review</td>
+ <td>Rework phases for string literal concatenation and token formation</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3095">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3095.html">3095</a></td>
+ <td>open</td>
+ <td>Type-dependent packs that are not structured binding packs</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3096">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3096.html">3096</a></td>
+ <td>open</td>
+ <td>Value-dependence of size of structured binding pack with non-dependent initializer</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3097">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3097.html">3097</a></td>
+ <td>tentatively ready</td>
+ <td>Lambda expression introduces a scope</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3098">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3098.html">3098</a></td>
+ <td>tentatively ready</td>
+ <td>Remove redundancy "names or designates"</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3099">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3099.html">3099</a></td>
+ <td>open</td>
+ <td>Instantiation of type aliases from alias templates is unspecified</td>
+ <td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="3100">
+ <td><a href="https://cplusplus.github.io/CWG/issues/3100.html">3100</a></td>
+ <td>open</td>
+ <td>Destruction order for objects with static storage duration</td>
+ <td align="center">Not resolved</td>
</tr></table>
</div>
diff --git a/compiler-rt/test/asan/TestCases/log-path_test.cpp b/compiler-rt/test/asan/TestCases/log-path_test.cpp
index 3c5ca11..6875d57 100644
--- a/compiler-rt/test/asan/TestCases/log-path_test.cpp
+++ b/compiler-rt/test/asan/TestCases/log-path_test.cpp
@@ -25,7 +25,8 @@
// RUN: FileCheck %s --check-prefix=CHECK-BAD-DIR < %t.out
// Too long log_path.
-// RUN: %env_asan_opts=log_path=`for((i=0;i<10000;i++)); do echo -n $i; done` \
+// RUN: %python -c "for i in range(0, 10000): print(i, end='')" > %t.long_log_path
+// RUN: %env_asan_opts=log_path=%{readfile:%t.long_log_path} \
// RUN: not %run %t 2> %t.out
// RUN: FileCheck %s --check-prefix=CHECK-LONG < %t.out
diff --git a/compiler-rt/test/asan/TestCases/scariness_score_test.cpp b/compiler-rt/test/asan/TestCases/scariness_score_test.cpp
index 9e55e33..5d229cf 100644
--- a/compiler-rt/test/asan/TestCases/scariness_score_test.cpp
+++ b/compiler-rt/test/asan/TestCases/scariness_score_test.cpp
@@ -6,7 +6,7 @@
// RUN: %clangxx_asan -O0 -mllvm -asan-use-stack-safety=0 %s -o %t
// On OSX and Windows, alloc_dealloc_mismatch=1 isn't 100% reliable, so it's
// off by default. It's safe for these tests, though, so we turn it on.
-// RUN: export %env_asan_opts=symbolize=0:detect_stack_use_after_return=1:handle_abort=1:print_scariness=1:alloc_dealloc_mismatch=1
+// RUN: %export_asan_opts=symbolize=0:detect_stack_use_after_return=1:handle_abort=1:print_scariness=1:alloc_dealloc_mismatch=1
// Make sure the stack is limited (may not be the default under GNU make)
// RUN: ulimit -s 4096
// RUN: not %run %t 1 2>&1 | FileCheck %s --check-prefix=CHECK1
@@ -41,7 +41,7 @@
// RUN: %clangxx_asan -O0 %s -o %t -fsanitize-address-use-after-return=always -mllvm -asan-use-stack-safety=0
// On OSX and Windows, alloc_dealloc_mismatch=1 isn't 100% reliable, so it's
// off by default. It's safe for these tests, though, so we turn it on.
-// RUN: export %env_asan_opts=symbolize=0:handle_abort=1:print_scariness=1:alloc_dealloc_mismatch=1
+// RUN: %export_asan_opts=symbolize=0:handle_abort=1:print_scariness=1:alloc_dealloc_mismatch=1
// Make sure the stack is limited (may not be the default under GNU make)
// RUN: ulimit -s 4096
// RUN: not %run %t 1 2>&1 | FileCheck %s --check-prefix=CHECK1
diff --git a/compiler-rt/test/asan/lit.cfg.py b/compiler-rt/test/asan/lit.cfg.py
index 96201e6..0194c720 100644
--- a/compiler-rt/test/asan/lit.cfg.py
+++ b/compiler-rt/test/asan/lit.cfg.py
@@ -41,6 +41,9 @@ if default_asan_opts_str:
config.substitutions.append(
("%env_asan_opts=", "env ASAN_OPTIONS=" + default_asan_opts_str)
)
+config.substitutions.append(
+ ("%export_asan_opts=", "export ASAN_OPTIONS=" + default_asan_opts_str)
+)
# Setup source root.
config.test_source_root = os.path.dirname(__file__)
diff --git a/compiler-rt/test/lit.common.cfg.py b/compiler-rt/test/lit.common.cfg.py
index 8d14705..9d2f021 100644
--- a/compiler-rt/test/lit.common.cfg.py
+++ b/compiler-rt/test/lit.common.cfg.py
@@ -1066,3 +1066,5 @@ if config.compiler_id == "GNU":
# llvm.
config.substitutions.append(("%crt_src", config.compiler_rt_src_root))
config.substitutions.append(("%llvm_src", config.llvm_src_root))
+
+config.substitutions.append(("%python", '"%s"' % (sys.executable)))
diff --git a/compiler-rt/test/profile/Linux/instrprof-debug-info-correlate-warnings.c b/compiler-rt/test/profile/Linux/instrprof-debug-info-correlate-warnings.c
index 5069c63..25022f2 100644
--- a/compiler-rt/test/profile/Linux/instrprof-debug-info-correlate-warnings.c
+++ b/compiler-rt/test/profile/Linux/instrprof-debug-info-correlate-warnings.c
@@ -1,6 +1,6 @@
// Disable full debug info and verify that we get warnings during merging
-// RUN: %clang_pgogen -o %t -gline-tables-only -mllvm --debug-info-correlate -mllvm --disable-vp=true %S/../Inputs/instrprof-debug-info-correlate-main.cpp %S/../Inputs/instrprof-debug-info-correlate-foo.cpp
+// RUN: %clang_pgogen -o %t -gline-tables-only -mllvm --profile-correlate=debug-info -mllvm --disable-vp=true %S/../Inputs/instrprof-debug-info-correlate-main.cpp %S/../Inputs/instrprof-debug-info-correlate-foo.cpp
// RUN: env LLVM_PROFILE_FILE=%t.proflite %run %t
// RUN: llvm-profdata merge -o %t.profdata --debug-info=%t %t.proflite --max-debug-info-correlation-warnings=2 2>&1 >/dev/null | FileCheck %s --check-prefixes=CHECK,LIMIT --implicit-check-not=warning
// RUN: llvm-profdata merge -o %t.profdata --debug-info=%t %t.proflite --max-debug-info-correlation-warnings=0 2>&1 >/dev/null | FileCheck %s --check-prefixes=CHECK,NOLIMIT --implicit-check-not=warning
diff --git a/flang/include/flang/Lower/OpenMP/Clauses.h b/flang/include/flang/Lower/OpenMP/Clauses.h
index 7492466..688d017 100644
--- a/flang/include/flang/Lower/OpenMP/Clauses.h
+++ b/flang/include/flang/Lower/OpenMP/Clauses.h
@@ -294,6 +294,7 @@ using Permutation = tomp::clause::PermutationT<TypeTy, IdTy, ExprTy>;
using TaskReduction = tomp::clause::TaskReductionT<TypeTy, IdTy, ExprTy>;
using ThreadLimit = tomp::clause::ThreadLimitT<TypeTy, IdTy, ExprTy>;
using Threads = tomp::clause::ThreadsT<TypeTy, IdTy, ExprTy>;
+using Threadset = tomp::clause::ThreadsetT<TypeTy, IdTy, ExprTy>;
using Transparent = tomp::clause::TransparentT<TypeTy, IdTy, ExprTy>;
using To = tomp::clause::ToT<TypeTy, IdTy, ExprTy>;
using UnifiedAddress = tomp::clause::UnifiedAddressT<TypeTy, IdTy, ExprTy>;
diff --git a/flang/include/flang/Parser/dump-parse-tree.h b/flang/include/flang/Parser/dump-parse-tree.h
index bb97069..a7398a4 100644
--- a/flang/include/flang/Parser/dump-parse-tree.h
+++ b/flang/include/flang/Parser/dump-parse-tree.h
@@ -685,6 +685,8 @@ public:
NODE_ENUM(OmpTaskDependenceType, Value)
NODE(parser, OmpTaskReductionClause)
NODE(OmpTaskReductionClause, Modifier)
+ NODE(parser, OmpThreadsetClause)
+ NODE_ENUM(OmpThreadsetClause, ThreadsetPolicy)
NODE(parser, OmpToClause)
NODE(OmpToClause, Modifier)
NODE(parser, OmpTraitProperty)
diff --git a/flang/include/flang/Parser/parse-tree.h b/flang/include/flang/Parser/parse-tree.h
index c3a8c2e..375790a 100644
--- a/flang/include/flang/Parser/parse-tree.h
+++ b/flang/include/flang/Parser/parse-tree.h
@@ -4825,6 +4825,14 @@ struct OmpTaskReductionClause {
std::tuple<MODIFIERS(), OmpObjectList> t;
};
+// Ref: [6.0:442]
+// threadset-clause ->
+// THREADSET(omp_pool|omp_team)
+struct OmpThreadsetClause {
+ ENUM_CLASS(ThreadsetPolicy, Omp_Pool, Omp_Team)
+ WRAPPER_CLASS_BOILERPLATE(OmpThreadsetClause, ThreadsetPolicy);
+};
+
// Ref: [4.5:107-109], [5.0:176-180], [5.1:205-210], [5.2:167-168]
//
// to-clause (in DECLARE TARGET) ->
diff --git a/flang/include/flang/Semantics/dump-expr.h b/flang/include/flang/Semantics/dump-expr.h
index 2dbd4cb..5a78e13 100644
--- a/flang/include/flang/Semantics/dump-expr.h
+++ b/flang/include/flang/Semantics/dump-expr.h
@@ -48,10 +48,11 @@ private:
// "... [with T = xyz; std::string_view = ...]"
#ifdef __clang__
std::string_view front("[T = ");
+ std::string_view back("]");
#else
std::string_view front("[with T = ");
-#endif
std::string_view back("; std::string_view =");
+#endif
#elif defined(_MSC_VER)
#define DUMP_EXPR_SHOW_TYPE
diff --git a/flang/lib/Lower/OpenMP/Clauses.cpp b/flang/lib/Lower/OpenMP/Clauses.cpp
index d39f9dd..0f60b47 100644
--- a/flang/lib/Lower/OpenMP/Clauses.cpp
+++ b/flang/lib/Lower/OpenMP/Clauses.cpp
@@ -1482,6 +1482,21 @@ ThreadLimit make(const parser::OmpClause::ThreadLimit &inp,
return ThreadLimit{/*Threadlim=*/makeExpr(inp.v, semaCtx)};
}
+Threadset make(const parser::OmpClause::Threadset &inp,
+ semantics::SemanticsContext &semaCtx) {
+ // inp.v -> parser::OmpThreadsetClause
+ using wrapped = parser::OmpThreadsetClause;
+
+ CLAUSET_ENUM_CONVERT( //
+ convert, wrapped::ThreadsetPolicy, Threadset::ThreadsetPolicy,
+ // clang-format off
+ MS(Omp_Pool, Omp_Pool)
+ MS(Omp_Team, Omp_Team)
+ // clang-format on
+ );
+ return Threadset{/*ThreadsetPolicy=*/convert(inp.v.v)};
+}
+
// Threadprivate: empty
// Threads: empty
diff --git a/flang/lib/Parser/prescan.cpp b/flang/lib/Parser/prescan.cpp
index 4739da0..fd69404 100644
--- a/flang/lib/Parser/prescan.cpp
+++ b/flang/lib/Parser/prescan.cpp
@@ -557,7 +557,7 @@ bool Prescanner::MustSkipToEndOfLine() const {
return true; // skip over ignored columns in right margin (73:80)
} else if (*at_ == '!' && !inCharLiteral_ &&
(!inFixedForm_ || tabInCurrentLine_ || column_ != 6)) {
- return !IsCompilerDirectiveSentinel(at_);
+ return !IsCompilerDirectiveSentinel(at_ + 1);
} else {
return false;
}
diff --git a/flang/lib/Semantics/check-omp-structure.cpp b/flang/lib/Semantics/check-omp-structure.cpp
index e094458f..aaaf1ec 100644
--- a/flang/lib/Semantics/check-omp-structure.cpp
+++ b/flang/lib/Semantics/check-omp-structure.cpp
@@ -3390,6 +3390,7 @@ CHECK_SIMPLE_CLAUSE(Read, OMPC_read)
CHECK_SIMPLE_CLAUSE(Threadprivate, OMPC_threadprivate)
CHECK_SIMPLE_CLAUSE(Groupprivate, OMPC_groupprivate)
CHECK_SIMPLE_CLAUSE(Threads, OMPC_threads)
+CHECK_SIMPLE_CLAUSE(Threadset, OMPC_threadset)
CHECK_SIMPLE_CLAUSE(Inbranch, OMPC_inbranch)
CHECK_SIMPLE_CLAUSE(Link, OMPC_link)
CHECK_SIMPLE_CLAUSE(Indirect, OMPC_indirect)
diff --git a/flang/test/Parser/inline-directives.f90 b/flang/test/Parser/inline-directives.f90
new file mode 100644
index 0000000..24d4f95
--- /dev/null
+++ b/flang/test/Parser/inline-directives.f90
@@ -0,0 +1,29 @@
+! RUN: %flang_fc1 -fdebug-unparse %s 2>&1 | FileCheck %s
+
+! Test that checks whether compiler directives can be inlined without mistaking it as comment.
+
+module m
+contains
+#define MACRO(X) subroutine func1(X); real(2) :: X; !dir$ ignore_tkr(d) X; end subroutine func1;
+MACRO(foo)
+
+!CHECK: SUBROUTINE func1 (foo)
+!CHECK: !DIR$ IGNORE_TKR (d) foo
+!CHECK: END SUBROUTINE func1
+
+ subroutine func2(foo)
+ real(2) :: foo; !dir$ ignore_tkr(d) foo;
+ end subroutine func2
+
+!CHECK: SUBROUTINE func2 (foo)
+!CHECK: !DIR$ IGNORE_TKR (d) foo
+!CHECK: END SUBROUTINE func2
+
+ subroutine func3(foo)
+ real(2) :: foo; !dir$ ignore_tkr(d) foo; end subroutine func3;
+
+!CHECK: SUBROUTINE func3 (foo)
+!CHECK: !DIR$ IGNORE_TKR (d) foo
+!CHECK: END SUBROUTINE func3
+
+end module
diff --git a/libc/CMakeLists.txt b/libc/CMakeLists.txt
index 14718e2..ae555a25 100644
--- a/libc/CMakeLists.txt
+++ b/libc/CMakeLists.txt
@@ -363,7 +363,7 @@ elseif(LLVM_LIBC_FULL_BUILD)
message(FATAL_ERROR "${LIBC_CONFIG_PATH}/headers.txt file not found and fullbuild requested.")
endif()
-# Check exclude.txt that appends to LIBC_EXCLUDE_ENTRYPOINTS list
+# Check exclude.txt that appends to TARGET_LLVMLIBC_REMOVED_ENTRYPOINTS list
if(EXISTS "${LIBC_CONFIG_PATH}/exclude.txt")
include("${LIBC_CONFIG_PATH}/exclude.txt")
endif()
diff --git a/libc/config/linux/x86_64/exclude.txt b/libc/config/linux/x86_64/exclude.txt
index 2c218b7..a068631 100644
--- a/libc/config/linux/x86_64/exclude.txt
+++ b/libc/config/linux/x86_64/exclude.txt
@@ -19,3 +19,11 @@ if(NOT has_sys_random)
)
endif()
endif()
+
+include(CheckSymbolExists)
+check_symbol_exists(SYS_faccessat2 "sys/syscall.h" HAVE_SYS_FACCESSAT2)
+if(NOT HAVE_SYS_FACCESSAT2)
+ list(APPEND TARGET_LLVMLIBC_REMOVED_ENTRYPOINTS
+ libc.src.unistd.faccessat
+ )
+endif()
diff --git a/libc/include/locale.yaml b/libc/include/locale.yaml
index 4566984..3c3998e 100644
--- a/libc/include/locale.yaml
+++ b/libc/include/locale.yaml
@@ -1,7 +1,7 @@
header: locale.h
header_template: locale.h.def
macros:
- - macro_name: NULL
+ - macro_name: "NULL"
macro_header: null-macro.h
types:
- type_name: locale_t
diff --git a/libc/include/stdio.yaml b/libc/include/stdio.yaml
index 394437b..c50b4ec 100644
--- a/libc/include/stdio.yaml
+++ b/libc/include/stdio.yaml
@@ -1,7 +1,7 @@
header: stdio.h
header_template: stdio.h.def
macros:
- - macro_name: NULL
+ - macro_name: "NULL"
macro_header: null-macro.h
- macro_name: stdout
macro_value: stdout
diff --git a/libc/include/stdlib.yaml b/libc/include/stdlib.yaml
index 3b2ff13..495eb7e 100644
--- a/libc/include/stdlib.yaml
+++ b/libc/include/stdlib.yaml
@@ -5,7 +5,7 @@ standards:
merge_yaml_files:
- stdlib-malloc.yaml
macros:
- - macro_name: NULL
+ - macro_name: "NULL"
macro_header: null-macro.h
types:
- type_name: __atexithandler_t
diff --git a/libc/include/string.yaml b/libc/include/string.yaml
index 0bf297e..22010f4 100644
--- a/libc/include/string.yaml
+++ b/libc/include/string.yaml
@@ -2,7 +2,7 @@ header: string.h
standards:
- stdc
macros:
- - macro_name: NULL
+ - macro_name: "NULL"
macro_header: null-macro.h
types:
- type_name: locale_t
diff --git a/libc/include/time.yaml b/libc/include/time.yaml
index 2f80242..88e50d1 100644
--- a/libc/include/time.yaml
+++ b/libc/include/time.yaml
@@ -1,7 +1,7 @@
header: time.h
header_template: time.h.def
macros:
- - macro_name: NULL
+ - macro_name: "NULL"
macro_header: null-macro.h
types:
- type_name: struct_timeval
diff --git a/libc/include/wchar.yaml b/libc/include/wchar.yaml
index b8a0a74..c8b9e21 100644
--- a/libc/include/wchar.yaml
+++ b/libc/include/wchar.yaml
@@ -1,7 +1,7 @@
header: wchar.h
header_template: wchar.h.def
macros:
- - macro_name: NULL
+ - macro_name: "NULL"
macro_header: null-macro.h
types:
- type_name: FILE
@@ -188,8 +188,8 @@ functions:
standards:
- stdc
return_type: wchar_t *
- arguments:
- - type: wchar_t *__restrict
+ arguments:
+ - type: wchar_t *__restrict
- type: const wchar_t *__restrict
- type: size_t
- name: wmemmove
@@ -212,7 +212,7 @@ functions:
standards:
- stdc
return_type: wchar_t *
- arguments:
+ arguments:
- type: wchar_t *__restrict
- type: const wchar_t *__restrict
- name: wcslcat
diff --git a/libc/src/time/strftime.cpp b/libc/src/time/strftime.cpp
index f36091b..89b7d9b 100644
--- a/libc/src/time/strftime.cpp
+++ b/libc/src/time/strftime.cpp
@@ -26,7 +26,7 @@ LLVM_LIBC_FUNCTION(size_t, strftime,
int ret = strftime_core::strftime_main(&writer, format, timeptr);
if (buffsz > 0) // if the buffsz is 0 the buffer may be a null pointer.
wb.buff[wb.buff_cur] = '\0';
- return (ret < 0 || static_cast<size_t>(ret) > buffsz) ? 0 : ret;
+ return (ret < 0 || static_cast<size_t>(ret) >= buffsz) ? 0 : ret;
}
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/time/strftime_l.cpp b/libc/src/time/strftime_l.cpp
index 201b85d..409f868 100644
--- a/libc/src/time/strftime_l.cpp
+++ b/libc/src/time/strftime_l.cpp
@@ -29,7 +29,7 @@ LLVM_LIBC_FUNCTION(size_t, strftime_l,
int ret = strftime_core::strftime_main(&writer, format, timeptr);
if (buffsz > 0) // if the buffsz is 0 the buffer may be a null pointer.
wb.buff[wb.buff_cur] = '\0';
- return (ret < 0 || static_cast<size_t>(ret) > buffsz) ? 0 : ret;
+ return (ret < 0 || static_cast<size_t>(ret) >= buffsz) ? 0 : ret;
}
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/test/src/time/strftime_test.cpp b/libc/test/src/time/strftime_test.cpp
index cac7560..38176f7 100644
--- a/libc/test/src/time/strftime_test.cpp
+++ b/libc/test/src/time/strftime_test.cpp
@@ -2326,3 +2326,23 @@ TEST(LlvmLibcStrftimeTest, TimeFormatFullDateTime) {
// size_t written = 0;
// SimplePaddedNum spn;
// }
+
+TEST(LlvmLibcStrftimeTest, BufferTooSmall) {
+ struct tm time;
+ char buffer[1];
+
+ time.tm_year = get_adjusted_year(2025);
+ time.tm_mon = 10;
+ time.tm_mday = 24;
+
+ size_t written =
+ LIBC_NAMESPACE::strftime(buffer, sizeof(buffer), "%F", &time);
+ EXPECT_EQ(written, size_t{0});
+
+ char buffer2[10];
+
+ // The string "2025-11-24" is 10 chars,
+ // so strftime needs 10 + 1 bytes to write the string and the null terminator.
+ written = LIBC_NAMESPACE::strftime(buffer, sizeof(buffer2), "%F", &time);
+ EXPECT_EQ(written, size_t{0});
+}
diff --git a/libc/utils/hdrgen/hdrgen/enumeration.py b/libc/utils/hdrgen/hdrgen/enumeration.py
index 19872082..1e0f64a 100644
--- a/libc/utils/hdrgen/hdrgen/enumeration.py
+++ b/libc/utils/hdrgen/hdrgen/enumeration.py
@@ -6,24 +6,14 @@
#
# ==-------------------------------------------------------------------------==#
-from functools import total_ordering
+from hdrgen.symbol import Symbol
-@total_ordering
-class Enumeration:
+class Enumeration(Symbol):
def __init__(self, name, value):
- self.name = name
+ super().__init__(name)
self.value = value
- def __eq__(self, other):
- return self.name == other.name
-
- def __lt__(self, other):
- return self.name < other.name
-
- def __hash__(self):
- return self.name.__hash__()
-
def __str__(self):
if self.value != None:
return f"{self.name} = {self.value}"
diff --git a/libc/utils/hdrgen/hdrgen/function.py b/libc/utils/hdrgen/hdrgen/function.py
index f039996..4de3406 100644
--- a/libc/utils/hdrgen/hdrgen/function.py
+++ b/libc/utils/hdrgen/hdrgen/function.py
@@ -7,7 +7,7 @@
# ==-------------------------------------------------------------------------==#
import re
-from functools import total_ordering
+from hdrgen.symbol import Symbol
from hdrgen.type import Type
@@ -37,14 +37,13 @@ KEYWORDS = [
NONIDENTIFIER = re.compile("[^a-zA-Z0-9_]+")
-@total_ordering
-class Function:
+class Function(Symbol):
def __init__(
self, return_type, name, arguments, standards, guard=None, attributes=[]
):
+ super().__init__(name)
assert return_type
self.return_type = return_type
- self.name = name
self.arguments = [
arg if isinstance(arg, str) else arg["type"] for arg in arguments
]
@@ -53,15 +52,6 @@ class Function:
self.guard = guard
self.attributes = attributes or []
- def __eq__(self, other):
- return self.name == other.name
-
- def __lt__(self, other):
- return self.name < other.name
-
- def __hash__(self):
- return self.name.__hash__()
-
def signature_types(self):
def collapse(type_string):
assert type_string
diff --git a/libc/utils/hdrgen/hdrgen/header.py b/libc/utils/hdrgen/hdrgen/header.py
index 715d4b7..f592327 100644
--- a/libc/utils/hdrgen/hdrgen/header.py
+++ b/libc/utils/hdrgen/hdrgen/header.py
@@ -35,6 +35,13 @@ NONIDENTIFIER = re.compile("[^a-zA-Z0-9_]+")
COMMON_HEADER = PurePosixPath("__llvm-libc-common.h")
+# These "attributes" are known macros defined in COMMON_HEADER.
+# Others are found in "llvm-libc-macros/{name}.h".
+COMMON_ATTRIBUTES = {
+ "_Noreturn",
+ "_Returns_twice",
+}
+
# All the canonical identifiers are in lowercase for easy maintenance.
# This maps them to the pretty descriptions to generate in header comments.
LIBRARY_DESCRIPTIONS = {
@@ -50,9 +57,7 @@ LIBRARY_DESCRIPTIONS = {
HEADER_TEMPLATE = """\
//===-- {library} header <{header}> --===//
//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+{license_lines}
//
//===---------------------------------------------------------------------===//
@@ -64,6 +69,12 @@ HEADER_TEMPLATE = """\
#endif // {guard}
"""
+LLVM_LICENSE_TEXT = [
+ "Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.",
+ "See https://llvm.org/LICENSE.txt for license information.",
+ "SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception",
+]
+
class HeaderFile:
def __init__(self, name):
@@ -74,8 +85,10 @@ class HeaderFile:
self.enumerations = []
self.objects = []
self.functions = []
+ self.extra_standards = {}
self.standards = []
self.merge_yaml_files = []
+ self.license_text = []
def add_macro(self, macro):
self.macros.append(macro)
@@ -98,6 +111,11 @@ class HeaderFile:
self.enumerations = sorted(set(self.enumerations) | set(other.enumerations))
self.objects = sorted(set(self.objects) | set(other.objects))
self.functions = sorted(set(self.functions) | set(other.functions))
+ self.extra_standards |= other.extra_standards
+ if self.license_text:
+ assert not other.license_text, "only one `license_text` allowed"
+ else:
+ self.license_text = other.license_text
def all_types(self):
return reduce(
@@ -106,6 +124,13 @@ class HeaderFile:
set(self.types),
)
+ def all_attributes(self):
+ return reduce(
+ lambda a, b: a | b,
+ [set(f.attributes) for f in self.functions],
+ set(),
+ )
+
def all_standards(self):
# FIXME: Only functions have the "standard" field, but all the entity
# types should have one too.
@@ -114,16 +139,24 @@ class HeaderFile:
)
def includes(self):
- return {
- PurePosixPath("llvm-libc-macros") / macro.header
- for macro in self.macros
- if macro.header is not None
- } | {
- COMPILER_HEADER_TYPES.get(
- typ.type_name, PurePosixPath("llvm-libc-types") / f"{typ.type_name}.h"
- )
- for typ in self.all_types()
- }
+ return (
+ {
+ PurePosixPath("llvm-libc-macros") / macro.header
+ for macro in self.macros
+ if macro.header is not None
+ }
+ | {
+ COMPILER_HEADER_TYPES.get(
+ typ.name,
+ PurePosixPath("llvm-libc-types") / f"{typ.name}.h",
+ )
+ for typ in self.all_types()
+ }
+ | {
+ PurePosixPath("llvm-libc-macros") / f"{attr}.h"
+ for attr in self.all_attributes() - COMMON_ATTRIBUTES
+ }
+ )
def header_guard(self):
return "_LLVM_LIBC_" + "_".join(
@@ -131,24 +164,29 @@ class HeaderFile:
)
def library_description(self):
+ descriptions = LIBRARY_DESCRIPTIONS | self.extra_standards
# If the header itself is in standard C, just call it that.
if "stdc" in self.standards:
- return LIBRARY_DESCRIPTIONS["stdc"]
+ return descriptions["stdc"]
# If the header itself is in POSIX, just call it that.
if "posix" in self.standards:
- return LIBRARY_DESCRIPTIONS["posix"]
+ return descriptions["posix"]
# Otherwise, consider the standards for each symbol as well.
standards = self.all_standards()
# Otherwise, it's described by all those that apply, but ignoring
# "stdc" and "posix" since this is not a "stdc" or "posix" header.
return " / ".join(
sorted(
- LIBRARY_DESCRIPTIONS[standard]
+ descriptions[standard]
for standard in standards
if standard not in {"stdc", "posix"}
)
)
+ def license_lines(self):
+ lines = self.license_text or LLVM_LICENSE_TEXT
+ return "\n".join([f"// {line}" for line in lines])
+
def template(self, dir, files_read):
if self.template_file is not None:
# There's a custom template file, so just read it in and record
@@ -162,6 +200,7 @@ class HeaderFile:
library=self.library_description(),
header=self.name,
guard=self.header_guard(),
+ license_lines=self.license_lines(),
)
def public_api(self):
@@ -188,7 +227,7 @@ class HeaderFile:
)
]
- for macro in self.macros:
+ for macro in sorted(self.macros):
# When there is nothing to define, the Macro object converts to str
# as an empty string. Don't emit a blank line for those cases.
if str(macro):
@@ -203,7 +242,12 @@ class HeaderFile:
content.append("\n__BEGIN_C_DECLS\n")
current_guard = None
- for function in self.functions:
+ last_name = None
+ for function in sorted(self.functions):
+ # If the last function's name was the same after underscores,
+ # elide the blank line between the declarations.
+ if last_name == function.name_without_underscores():
+ content.pop()
if function.guard == None and current_guard == None:
content.append(str(function) + " __NOEXCEPT;")
content.append("")
@@ -225,6 +269,7 @@ class HeaderFile:
content.append(f"#ifdef {current_guard}")
content.append(str(function) + " __NOEXCEPT;")
content.append("")
+ last_name = function.name_without_underscores()
if current_guard != None:
content.pop()
content.append(f"#endif // {current_guard}")
diff --git a/libc/utils/hdrgen/hdrgen/macro.py b/libc/utils/hdrgen/hdrgen/macro.py
index e42e828..4664d9f 100644
--- a/libc/utils/hdrgen/hdrgen/macro.py
+++ b/libc/utils/hdrgen/hdrgen/macro.py
@@ -6,25 +6,15 @@
#
# ==-------------------------------------------------------------------------==#
-from functools import total_ordering
+from hdrgen.symbol import Symbol
-@total_ordering
-class Macro:
+class Macro(Symbol):
def __init__(self, name, value=None, header=None):
- self.name = name
+ super().__init__(name)
self.value = value
self.header = header
- def __eq__(self, other):
- return self.name == other.name
-
- def __lt__(self, other):
- return self.name < other.name
-
- def __hash__(self):
- return self.name.__hash__()
-
def __str__(self):
if self.header != None:
return ""
diff --git a/libc/utils/hdrgen/hdrgen/main.py b/libc/utils/hdrgen/hdrgen/main.py
index 25df41e..c12e89e 100755
--- a/libc/utils/hdrgen/hdrgen/main.py
+++ b/libc/utils/hdrgen/hdrgen/main.py
@@ -105,6 +105,7 @@ def main():
return 2
header.merge(merge_from_header)
+ assert header.name, f"`header: name.h` line is required in {yaml_file}"
return header
if args.json:
diff --git a/libc/utils/hdrgen/hdrgen/object.py b/libc/utils/hdrgen/hdrgen/object.py
index a311c37..a2ab496b 100644
--- a/libc/utils/hdrgen/hdrgen/object.py
+++ b/libc/utils/hdrgen/hdrgen/object.py
@@ -6,23 +6,13 @@
#
# ==-------------------------------------------------------------------------==#
-from functools import total_ordering
+from hdrgen.symbol import Symbol
-@total_ordering
-class Object:
+class Object(Symbol):
def __init__(self, name, type):
- self.name = name
+ super().__init__(name)
self.type = type
- def __eq__(self, other):
- return self.name == other.name
-
- def __lt__(self, other):
- return self.name < other.name
-
- def __hash__(self):
- return self.name.__hash__()
-
def __str__(self):
return f"extern {self.type} {self.name};"
diff --git a/libc/utils/hdrgen/hdrgen/symbol.py b/libc/utils/hdrgen/hdrgen/symbol.py
new file mode 100644
index 0000000..28e9def
--- /dev/null
+++ b/libc/utils/hdrgen/hdrgen/symbol.py
@@ -0,0 +1,41 @@
+# ====-- Symbol class for libc function headers----------------*- python -*--==#
+#
+# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# ==-------------------------------------------------------------------------==#
+
+from functools import total_ordering
+
+
+@total_ordering
+class Symbol:
+ """
+ Symbol is the common superclass for each kind of entity named by an
+ identifier. It provides the name field, and defines sort ordering,
+ hashing, and equality based only on the name. The sorting is pretty
+ presentation order for identifiers, which is to say it first sorts
+ lexically but ignores leading underscores and secondarily sorts with the
+ fewest underscores first.
+ """
+
+ def __init__(self, name):
+ assert name
+ self.name = name
+
+ def __eq__(self, other):
+ return self.name == other.name
+
+ def __hash__(self):
+ return self.name.__hash__()
+
+ def name_without_underscores(self):
+ return self.name.lstrip("_")
+
+ def name_sort_key(self):
+ ident = self.name_without_underscores()
+ return ident, len(self.name) - len(ident)
+
+ def __lt__(self, other):
+ return self.name_sort_key() < other.name_sort_key()
diff --git a/libc/utils/hdrgen/hdrgen/type.py b/libc/utils/hdrgen/hdrgen/type.py
index 0c0af85..20c1881 100644
--- a/libc/utils/hdrgen/hdrgen/type.py
+++ b/libc/utils/hdrgen/hdrgen/type.py
@@ -6,20 +6,10 @@
#
# ==-------------------------------------------------------------------------==#
-from functools import total_ordering
+from hdrgen.symbol import Symbol
-@total_ordering
-class Type:
- def __init__(self, type_name):
- assert type_name
- self.type_name = type_name
-
- def __eq__(self, other):
- return self.type_name == other.type_name
-
- def __lt__(self, other):
- return self.type_name < other.type_name
-
- def __hash__(self):
- return self.type_name.__hash__()
+class Type(Symbol):
+ # A type so far carries no specific information beyond its name.
+ def __init__(self, name):
+ super().__init__(name)
diff --git a/libc/utils/hdrgen/hdrgen/yaml_to_classes.py b/libc/utils/hdrgen/hdrgen/yaml_to_classes.py
index ebe7781d..9eddbe6 100644
--- a/libc/utils/hdrgen/hdrgen/yaml_to_classes.py
+++ b/libc/utils/hdrgen/hdrgen/yaml_to_classes.py
@@ -37,6 +37,8 @@ def yaml_to_classes(yaml_data, header_class, entry_points=None):
header = header_class(header_name)
header.template_file = yaml_data.get("header_template")
header.standards = yaml_data.get("standards", [])
+ header.extra_standards = yaml_data.get("extra_standards", {})
+ header.license_text = yaml_data.get("license_text", [])
header.merge_yaml_files = yaml_data.get("merge_yaml_files", [])
for macro_data in yaml_data.get("macros", []):
diff --git a/libc/utils/hdrgen/tests/expected_output/custom.h b/libc/utils/hdrgen/tests/expected_output/custom.h
new file mode 100644
index 0000000..5f9ed23
--- /dev/null
+++ b/libc/utils/hdrgen/tests/expected_output/custom.h
@@ -0,0 +1,21 @@
+//===-- Wile E. Coyote header <custom.h> --===//
+//
+// Caveat emptor.
+// I never studied law.
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef _LLVM_LIBC_CUSTOM_H
+#define _LLVM_LIBC_CUSTOM_H
+
+#include "__llvm-libc-common.h"
+#include "llvm-libc-types/meep.h"
+#include "llvm-libc-types/road.h"
+
+__BEGIN_C_DECLS
+
+road runner(meep, meep) __NOEXCEPT;
+
+__END_C_DECLS
+
+#endif // _LLVM_LIBC_CUSTOM_H
diff --git a/libc/utils/hdrgen/tests/expected_output/sorting.h b/libc/utils/hdrgen/tests/expected_output/sorting.h
new file mode 100644
index 0000000..a091a42
--- /dev/null
+++ b/libc/utils/hdrgen/tests/expected_output/sorting.h
@@ -0,0 +1,24 @@
+//===-- Standard C header <sorting.h> --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef _LLVM_LIBC_SORTING_H
+#define _LLVM_LIBC_SORTING_H
+
+#include "__llvm-libc-common.h"
+
+__BEGIN_C_DECLS
+
+void func_with_aliases(int) __NOEXCEPT;
+void _func_with_aliases(int) __NOEXCEPT;
+void __func_with_aliases(int) __NOEXCEPT;
+
+void gunk(const char *) __NOEXCEPT;
+
+__END_C_DECLS
+
+#endif // _LLVM_LIBC_SORTING_H
diff --git a/libc/utils/hdrgen/tests/expected_output/test_header.h b/libc/utils/hdrgen/tests/expected_output/test_header.h
index 748c098..49112a3 100644
--- a/libc/utils/hdrgen/tests/expected_output/test_header.h
+++ b/libc/utils/hdrgen/tests/expected_output/test_header.h
@@ -12,6 +12,7 @@
#include "__llvm-libc-common.h"
#include "llvm-libc-macros/float16-macros.h"
+#include "llvm-libc-macros/CONST_FUNC_A.h"
#include "llvm-libc-macros/test_more-macros.h"
#include "llvm-libc-macros/test_small-macros.h"
#include "llvm-libc-types/float128.h"
diff --git a/libc/utils/hdrgen/tests/expected_output/test_small.json b/libc/utils/hdrgen/tests/expected_output/test_small.json
index 9cc73d0..8502df2 100644
--- a/libc/utils/hdrgen/tests/expected_output/test_small.json
+++ b/libc/utils/hdrgen/tests/expected_output/test_small.json
@@ -4,6 +4,7 @@
"standards": [],
"includes": [
"__llvm-libc-common.h",
+ "llvm-libc-macros/CONST_FUNC_A.h",
"llvm-libc-macros/test_more-macros.h",
"llvm-libc-macros/test_small-macros.h",
"llvm-libc-types/float128.h",
diff --git a/libc/utils/hdrgen/tests/input/custom-common.yaml b/libc/utils/hdrgen/tests/input/custom-common.yaml
new file mode 100644
index 0000000..909a3ba
--- /dev/null
+++ b/libc/utils/hdrgen/tests/input/custom-common.yaml
@@ -0,0 +1,6 @@
+license_text:
+ - Caveat emptor.
+ - I never studied law.
+
+extra_standards:
+ acme: Wile E. Coyote
diff --git a/libc/utils/hdrgen/tests/input/custom.yaml b/libc/utils/hdrgen/tests/input/custom.yaml
new file mode 100644
index 0000000..7d3ff8e
--- /dev/null
+++ b/libc/utils/hdrgen/tests/input/custom.yaml
@@ -0,0 +1,13 @@
+merge_yaml_files:
+ - custom-common.yaml
+
+header: custom.h
+standards:
+ - acme
+
+functions:
+ - name: runner
+ return_type: road
+ arguments:
+ - type: meep
+ - type: meep
diff --git a/libc/utils/hdrgen/tests/input/sorting.yaml b/libc/utils/hdrgen/tests/input/sorting.yaml
new file mode 100644
index 0000000..3c26cde
--- /dev/null
+++ b/libc/utils/hdrgen/tests/input/sorting.yaml
@@ -0,0 +1,20 @@
+header: sorting.h
+standards:
+ - stdc
+functions:
+ - name: gunk
+ return_type: void
+ arguments:
+ - type: const char *
+ - name: _func_with_aliases
+ return_type: void
+ arguments:
+ - type: int
+ - name: func_with_aliases
+ return_type: void
+ arguments:
+ - type: int
+ - name: __func_with_aliases
+ return_type: void
+ arguments:
+ - type: int
diff --git a/libc/utils/hdrgen/tests/test_integration.py b/libc/utils/hdrgen/tests/test_integration.py
index bf393d2..b975d8f 100644
--- a/libc/utils/hdrgen/tests/test_integration.py
+++ b/libc/utils/hdrgen/tests/test_integration.py
@@ -59,6 +59,13 @@ class TestHeaderGenIntegration(unittest.TestCase):
self.run_script(yaml_file, output_file)
self.compare_files(output_file, expected_output_file)
+ def test_custom_license_and_standards(self):
+ yaml_file = self.source_dir / "input" / "custom.yaml"
+ expected_output_file = self.source_dir / "expected_output" / "custom.h"
+ output_file = self.output_dir / "custom.h"
+ self.run_script(yaml_file, output_file)
+ self.compare_files(output_file, expected_output_file)
+
def test_generate_json(self):
yaml_file = self.source_dir / "input/test_small.yaml"
expected_output_file = self.source_dir / "expected_output/test_small.json"
@@ -68,6 +75,13 @@ class TestHeaderGenIntegration(unittest.TestCase):
self.compare_files(output_file, expected_output_file)
+ def test_sorting(self):
+ yaml_file = self.source_dir / "input" / "sorting.yaml"
+ expected_output_file = self.source_dir / "expected_output" / "sorting.h"
+ output_file = self.output_dir / "sorting.h"
+ self.run_script(yaml_file, output_file)
+ self.compare_files(output_file, expected_output_file)
+
def main():
parser = argparse.ArgumentParser(description="TestHeaderGenIntegration arguments")
diff --git a/libcxx/include/__config b/libcxx/include/__config
index b4c081d..357f77b 100644
--- a/libcxx/include/__config
+++ b/libcxx/include/__config
@@ -1050,8 +1050,7 @@ typedef __char32_t char32_t;
# define _LIBCPP_CTAD_SUPPORTED_FOR_TYPE(_ClassName) static_assert(true, "")
# endif
-// TODO(LLVM 22): Remove the workaround
-# if defined(__OBJC__) && (!defined(_LIBCPP_CLANG_VER) || _LIBCPP_CLANG_VER < 2001)
+# if defined(__OBJC__) && defined(_LIBCPP_APPLE_CLANG_VER)
# define _LIBCPP_WORKAROUND_OBJCXX_COMPILER_INTRINSICS
# endif
@@ -1255,14 +1254,6 @@ typedef __char32_t char32_t;
# define _LIBCPP_DIAGNOSE_NULLPTR
# endif
-// TODO(LLVM 22): Remove this macro once LLVM19 support ends. __cpp_explicit_this_parameter has been set in LLVM20.
-// Clang-18 has support for deducing this, but it does not set the FTM.
-# if defined(__cpp_explicit_this_parameter) || (defined(_LIBCPP_CLANG_VER) && _LIBCPP_CLANG_VER >= 1800)
-# define _LIBCPP_HAS_EXPLICIT_THIS_PARAMETER 1
-# else
-# define _LIBCPP_HAS_EXPLICIT_THIS_PARAMETER 0
-# endif
-
#endif // __cplusplus
#endif // _LIBCPP___CONFIG
diff --git a/libcxx/include/__configuration/abi.h b/libcxx/include/__configuration/abi.h
index c9936df..38b85c6 100644
--- a/libcxx/include/__configuration/abi.h
+++ b/libcxx/include/__configuration/abi.h
@@ -61,14 +61,6 @@
// According to the Standard, `bitset::operator[] const` returns bool
# define _LIBCPP_ABI_BITSET_VECTOR_BOOL_CONST_SUBSCRIPT_RETURN_BOOL
-// In LLVM 20, we've changed to take these ABI breaks unconditionally. These flags only exist in case someone is running
-// into the static_asserts we added to catch the ABI break and don't care that it is one.
-// TODO(LLVM 22): Remove these flags
-# define _LIBCPP_ABI_LIST_REMOVE_NODE_POINTER_UB
-# define _LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB
-# define _LIBCPP_ABI_FIX_UNORDERED_NODE_POINTER_UB
-# define _LIBCPP_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB
-
// These flags are documented in ABIGuarantees.rst
# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT
# define _LIBCPP_ABI_DO_NOT_EXPORT_BASIC_STRING_COMMON
diff --git a/libcxx/include/__format/format_arg.h b/libcxx/include/__format/format_arg.h
index ed5e762..19794f0 100644
--- a/libcxx/include/__format/format_arg.h
+++ b/libcxx/include/__format/format_arg.h
@@ -149,7 +149,7 @@ _LIBCPP_HIDE_FROM_ABI decltype(auto) __visit_format_arg(_Visitor&& __vis, basic_
__libcpp_unreachable();
}
-# if _LIBCPP_STD_VER >= 26 && _LIBCPP_HAS_EXPLICIT_THIS_PARAMETER
+# if _LIBCPP_STD_VER >= 26
template <class _Rp, class _Visitor, class _Context>
_LIBCPP_HIDE_FROM_ABI _Rp __visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) {
@@ -200,7 +200,7 @@ _LIBCPP_HIDE_FROM_ABI _Rp __visit_format_arg(_Visitor&& __vis, basic_format_arg<
__libcpp_unreachable();
}
-# endif // _LIBCPP_STD_VER >= 26 && _LIBCPP_HAS_EXPLICIT_THIS_PARAMETER
+# endif // _LIBCPP_STD_VER >= 26
/// Contains the values used in basic_format_arg.
///
@@ -285,7 +285,7 @@ public:
_LIBCPP_HIDE_FROM_ABI explicit operator bool() const noexcept { return __type_ != __format::__arg_t::__none; }
-# if _LIBCPP_STD_VER >= 26 && _LIBCPP_HAS_EXPLICIT_THIS_PARAMETER
+# if _LIBCPP_STD_VER >= 26
// This function is user facing, so it must wrap the non-standard types of
// the "variant" in a handle to stay conforming. See __arg_t for more details.
@@ -329,7 +329,7 @@ public:
}
}
-# endif // _LIBCPP_STD_VER >= 26 && _LIBCPP_HAS_EXPLICIT_THIS_PARAMETER
+# endif // _LIBCPP_STD_VER >= 26
private:
using char_type = typename _Context::char_type;
@@ -371,11 +371,8 @@ private:
// This function is user facing, so it must wrap the non-standard types of
// the "variant" in a handle to stay conforming. See __arg_t for more details.
template <class _Visitor, class _Context>
-# if _LIBCPP_STD_VER >= 26 && _LIBCPP_HAS_EXPLICIT_THIS_PARAMETER
-_LIBCPP_DEPRECATED_IN_CXX26
-# endif
- _LIBCPP_HIDE_FROM_ABI decltype(auto)
- visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) {
+_LIBCPP_DEPRECATED_IN_CXX26 _LIBCPP_HIDE_FROM_ABI decltype(auto)
+visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) {
switch (__arg.__type_) {
# if _LIBCPP_HAS_INT128
case __format::__arg_t::__i128: {
@@ -387,7 +384,7 @@ _LIBCPP_DEPRECATED_IN_CXX26
typename __basic_format_arg_value<_Context>::__handle __h{__arg.__value_.__u128_};
return std::invoke(std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__h});
}
-# endif // _LIBCPP_STD_VER >= 26 && _LIBCPP_HAS_EXPLICIT_THIS_PARAMETER
+# endif // _LIBCPP_HAS_INT128
default:
return std::__visit_format_arg(std::forward<_Visitor>(__vis), __arg);
}
diff --git a/libcxx/include/__format/format_context.h b/libcxx/include/__format/format_context.h
index e672ee7..1771dd3 100644
--- a/libcxx/include/__format/format_context.h
+++ b/libcxx/include/__format/format_context.h
@@ -175,13 +175,13 @@ public:
__format::__determine_arg_t<basic_format_context, decltype(__arg)>(),
__basic_format_arg_value<basic_format_context>(__arg)};
};
-# if _LIBCPP_STD_VER >= 26 && _LIBCPP_HAS_EXPLICIT_THIS_PARAMETER
+# if _LIBCPP_STD_VER >= 26
return static_cast<_Context*>(__c)->arg(__id).visit(std::move(__visitor));
# else
_LIBCPP_SUPPRESS_DEPRECATED_PUSH
return std::visit_format_arg(std::move(__visitor), static_cast<_Context*>(__c)->arg(__id));
_LIBCPP_SUPPRESS_DEPRECATED_POP
-# endif // _LIBCPP_STD_VER >= 26 && _LIBCPP_HAS_EXPLICIT_THIS_PARAMETER
+# endif // _LIBCPP_STD_VER >= 26
}) {
}
diff --git a/libcxx/include/__hash_table b/libcxx/include/__hash_table
index 5432abb..e189794 100644
--- a/libcxx/include/__hash_table
+++ b/libcxx/include/__hash_table
@@ -83,18 +83,6 @@ struct __hash_node_base {
typedef _NodePtr __node_pointer;
typedef __node_base_pointer __next_pointer;
-// TODO(LLVM 22): Remove this check
-#ifndef _LIBCPP_ABI_FIX_UNORDERED_NODE_POINTER_UB
- static_assert(sizeof(__node_base_pointer) == sizeof(__node_pointer) && _LIBCPP_ALIGNOF(__node_base_pointer) ==
- _LIBCPP_ALIGNOF(__node_pointer),
- "It looks like you are using std::__hash_table (an implementation detail for the unordered containers) "
- "with a fancy pointer type that thas a different representation depending on whether it points to a "
- "__hash_table base pointer or a __hash_table node pointer (both of which are implementation details of "
- "the standard library). This means that your ABI is being broken between LLVM 19 and LLVM 20. If you "
- "don't care about your ABI being broken, define the _LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB macro to "
- "silence this diagnostic.");
-#endif
-
__next_pointer __next_;
_LIBCPP_HIDE_FROM_ABI __next_pointer __ptr() _NOEXCEPT {
diff --git a/libcxx/include/__iterator/concepts.h b/libcxx/include/__iterator/concepts.h
index f386887..3b43920 100644
--- a/libcxx/include/__iterator/concepts.h
+++ b/libcxx/include/__iterator/concepts.h
@@ -117,15 +117,12 @@ template <class _Tp>
concept __signed_integer_like = signed_integral<_Tp>;
template <class _Ip>
-concept weakly_incrementable =
- // TODO: remove this once the clang bug is fixed (https://llvm.org/PR48173).
- !same_as<_Ip, bool> && // Currently, clang does not handle bool correctly.
- movable<_Ip> && requires(_Ip __i) {
- typename iter_difference_t<_Ip>;
- requires __signed_integer_like<iter_difference_t<_Ip>>;
- { ++__i } -> same_as<_Ip&>; // not required to be equality-preserving
- __i++; // not required to be equality-preserving
- };
+concept weakly_incrementable = movable<_Ip> && requires(_Ip __i) {
+ typename iter_difference_t<_Ip>;
+ requires __signed_integer_like<iter_difference_t<_Ip>>;
+ { ++__i } -> same_as<_Ip&>; // not required to be equality-preserving
+ __i++; // not required to be equality-preserving
+};
// [iterator.concept.inc]
template <class _Ip>
diff --git a/libcxx/include/__math/traits.h b/libcxx/include/__math/traits.h
index 00db2a8..ff22cee 100644
--- a/libcxx/include/__math/traits.h
+++ b/libcxx/include/__math/traits.h
@@ -25,33 +25,26 @@ namespace __math {
// signbit
-// TODO(LLVM 22): Remove conditional once support for Clang 19 is dropped.
-#if defined(_LIBCPP_COMPILER_GCC) || __has_constexpr_builtin(__builtin_signbit)
-# define _LIBCPP_SIGNBIT_CONSTEXPR _LIBCPP_CONSTEXPR_SINCE_CXX23
-#else
-# define _LIBCPP_SIGNBIT_CONSTEXPR
-#endif
-
// The universal C runtime (UCRT) in the WinSDK provides floating point overloads
// for std::signbit(). By defining our overloads as templates, we can work around
// this issue as templates are less preferred than non-template functions.
template <class = void>
-[[__nodiscard__]] inline _LIBCPP_SIGNBIT_CONSTEXPR _LIBCPP_HIDE_FROM_ABI bool signbit(float __x) _NOEXCEPT {
+[[__nodiscard__]] inline _LIBCPP_CONSTEXPR_SINCE_CXX23 _LIBCPP_HIDE_FROM_ABI bool signbit(float __x) _NOEXCEPT {
return __builtin_signbit(__x);
}
template <class = void>
-[[__nodiscard__]] inline _LIBCPP_SIGNBIT_CONSTEXPR _LIBCPP_HIDE_FROM_ABI bool signbit(double __x) _NOEXCEPT {
+[[__nodiscard__]] inline _LIBCPP_CONSTEXPR_SINCE_CXX23 _LIBCPP_HIDE_FROM_ABI bool signbit(double __x) _NOEXCEPT {
return __builtin_signbit(__x);
}
template <class = void>
-[[__nodiscard__]] inline _LIBCPP_SIGNBIT_CONSTEXPR _LIBCPP_HIDE_FROM_ABI bool signbit(long double __x) _NOEXCEPT {
+[[__nodiscard__]] inline _LIBCPP_CONSTEXPR_SINCE_CXX23 _LIBCPP_HIDE_FROM_ABI bool signbit(long double __x) _NOEXCEPT {
return __builtin_signbit(__x);
}
template <class _A1, __enable_if_t<is_integral<_A1>::value, int> = 0>
-[[__nodiscard__]] inline _LIBCPP_SIGNBIT_CONSTEXPR _LIBCPP_HIDE_FROM_ABI bool signbit(_A1 __x) _NOEXCEPT {
+[[__nodiscard__]] inline _LIBCPP_CONSTEXPR_SINCE_CXX23 _LIBCPP_HIDE_FROM_ABI bool signbit(_A1 __x) _NOEXCEPT {
return __x < 0;
}
diff --git a/libcxx/include/__ranges/transform_view.h b/libcxx/include/__ranges/transform_view.h
index ae85dfa..ab1adf9 100644
--- a/libcxx/include/__ranges/transform_view.h
+++ b/libcxx/include/__ranges/transform_view.h
@@ -13,7 +13,6 @@
#include <__compare/three_way_comparable.h>
#include <__concepts/constructible.h>
#include <__concepts/convertible_to.h>
-#include <__concepts/copyable.h>
#include <__concepts/derived_from.h>
#include <__concepts/equality_comparable.h>
#include <__concepts/invocable.h>
@@ -64,7 +63,7 @@ concept __regular_invocable_with_range_ref = regular_invocable<_Fn, range_refere
template <class _View, class _Fn>
concept __transform_view_constraints =
view<_View> && is_object_v<_Fn> && regular_invocable<_Fn&, range_reference_t<_View>> &&
- __is_referenceable_v<invoke_result_t<_Fn&, range_reference_t<_View>>>;
+ __referenceable<invoke_result_t<_Fn&, range_reference_t<_View>>>;
# if _LIBCPP_STD_VER >= 23
template <input_range _View, move_constructible _Fn>
diff --git a/libcxx/include/__tree b/libcxx/include/__tree
index 0738c8c..6947969 100644
--- a/libcxx/include/__tree
+++ b/libcxx/include/__tree
@@ -823,18 +823,6 @@ public:
using __node_allocator _LIBCPP_NODEBUG = __rebind_alloc<__alloc_traits, __node>;
using __node_traits _LIBCPP_NODEBUG = allocator_traits<__node_allocator>;
-// TODO(LLVM 22): Remove this check
-#ifndef _LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB
- static_assert(sizeof(__node_base_pointer) == sizeof(__end_node_pointer) && _LIBCPP_ALIGNOF(__node_base_pointer) ==
- _LIBCPP_ALIGNOF(__end_node_pointer),
- "It looks like you are using std::__tree (an implementation detail for (multi)map/set) with a fancy "
- "pointer type that thas a different representation depending on whether it points to a __tree base "
- "pointer or a __tree node pointer (both of which are implementation details of the standard library). "
- "This means that your ABI is being broken between LLVM 19 and LLVM 20. If you don't care about your "
- "ABI being broken, define the _LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB macro to silence this "
- "diagnostic.");
-#endif
-
private:
// check for sane allocator pointer rebinding semantics. Rebinding the
// allocator for a new pointer type should be exactly the same as rebinding
diff --git a/libcxx/include/__type_traits/reference_constructs_from_temporary.h b/libcxx/include/__type_traits/reference_constructs_from_temporary.h
index 2ff549b..3d097ce 100644
--- a/libcxx/include/__type_traits/reference_constructs_from_temporary.h
+++ b/libcxx/include/__type_traits/reference_constructs_from_temporary.h
@@ -30,14 +30,8 @@ _LIBCPP_NO_SPECIALIZATIONS inline constexpr bool reference_constructs_from_tempo
#endif
-#if __has_builtin(__reference_constructs_from_temporary)
template <class _Tp, class _Up>
inline const bool __reference_constructs_from_temporary_v = __reference_constructs_from_temporary(_Tp, _Up);
-#else
-// TODO(LLVM 22): Remove this as all supported compilers should have __reference_constructs_from_temporary implemented.
-template <class _Tp, class _Up>
-inline const bool __reference_constructs_from_temporary_v = __reference_binds_to_temporary(_Tp, _Up);
-#endif
_LIBCPP_END_NAMESPACE_STD
diff --git a/libcxx/include/forward_list b/libcxx/include/forward_list
index df7da20..88d863f 100644
--- a/libcxx/include/forward_list
+++ b/libcxx/include/forward_list
@@ -284,17 +284,6 @@ struct __forward_node_traits {
typedef _NodePtr __node_pointer;
typedef __forward_begin_node<_NodePtr> __begin_node;
typedef __rebind_pointer_t<_NodePtr, __begin_node> __begin_node_pointer;
-
-// TODO(LLVM 22): Remove this check
-# ifndef _LIBCPP_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB
- static_assert(sizeof(__begin_node_pointer) == sizeof(__node_pointer) && _LIBCPP_ALIGNOF(__begin_node_pointer) ==
- _LIBCPP_ALIGNOF(__node_pointer),
- "It looks like you are using std::forward_list with a fancy pointer type that thas a different "
- "representation depending on whether it points to a forward_list base pointer or a forward_list node "
- "pointer (both of which are implementation details of the standard library). This means that your ABI "
- "is being broken between LLVM 19 and LLVM 20. If you don't care about your ABI being broken, define "
- "the _LIBCPP_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB macro to silence this diagnostic.");
-# endif
};
template <class _NodePtr>
diff --git a/libcxx/include/list b/libcxx/include/list
index c5c2a85..0ff85d2 100644
--- a/libcxx/include/list
+++ b/libcxx/include/list
@@ -276,17 +276,6 @@ template <class _Tp, class _VoidPtr>
struct __list_node_pointer_traits {
typedef __rebind_pointer_t<_VoidPtr, __list_node<_Tp, _VoidPtr> > __node_pointer;
typedef __rebind_pointer_t<_VoidPtr, __list_node_base<_Tp, _VoidPtr> > __base_pointer;
-
-// TODO(LLVM 22): Remove this check
-# ifndef _LIBCPP_ABI_LIST_REMOVE_NODE_POINTER_UB
- static_assert(sizeof(__node_pointer) == sizeof(__node_pointer) && _LIBCPP_ALIGNOF(__base_pointer) ==
- _LIBCPP_ALIGNOF(__node_pointer),
- "It looks like you are using std::list with a fancy pointer type that thas a different representation "
- "depending on whether it points to a list base pointer or a list node pointer (both of which are "
- "implementation details of the standard library). This means that your ABI is being broken between "
- "LLVM 19 and LLVM 20. If you don't care about your ABI being broken, define the "
- "_LIBCPP_ABI_LIST_REMOVE_NODE_POINTER_UB macro to silence this diagnostic.");
-# endif
};
template <class _Tp, class _VoidPtr>
diff --git a/libcxx/include/tuple b/libcxx/include/tuple
index 5f3bb72..466f501 100644
--- a/libcxx/include/tuple
+++ b/libcxx/include/tuple
@@ -301,7 +301,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 bool __tuple_compare_equal(c
template <class _Tp, class _Up, class _IndexSeq = make_index_sequence<tuple_size_v<_Tp>>>
inline constexpr bool __can_tuple_compare_equal = false;
-// TODO(LLVM 22): Remove `tuple_size_v<_Tp> == tuple_size_v<_Up>` here once once LLVM-20 support ends
+// TODO(LLVM 23): Remove `tuple_size_v<_Tp> == tuple_size_v<_Up>` here once once LLVM-20 support ends
// because the resolution of CWG2369 landed in LLVM-21.
template <class _Tp, class _Up, size_t... _Is>
requires(tuple_size_v<_Tp> == tuple_size_v<_Up>)
@@ -328,7 +328,7 @@ concept __tuple_like_no_tuple = __tuple_like<_Tp> && !__is_tuple_v<_Tp>;
template <class _Tp, class _Up, class _IndexSeq>
struct __tuple_common_comparison_category_impl {};
-// TODO(LLVM 22): Remove `tuple_size_v<_Tp> == tuple_size_v<_Up>` here once once LLVM-20 support ends
+// TODO(LLVM 23): Remove `tuple_size_v<_Tp> == tuple_size_v<_Up>` here once once LLVM-20 support ends
// because the resolution of CWG2369 landed in LLVM-21.
template <class _Tp, class _Up, size_t... _Is>
requires(tuple_size_v<_Tp> == tuple_size_v<_Up>) && requires {
diff --git a/libcxx/include/variant b/libcxx/include/variant
index 9beef14..8e95858 100644
--- a/libcxx/include/variant
+++ b/libcxx/include/variant
@@ -1299,7 +1299,7 @@ public:
__impl_.__swap(__that.__impl_);
}
-# if _LIBCPP_STD_VER >= 26 && _LIBCPP_HAS_EXPLICIT_THIS_PARAMETER
+# if _LIBCPP_STD_VER >= 26
// Helper class to implement [variant.visit]/10
// Constraints: The call to visit does not use an explicit template-argument-list
// that begins with a type template-argument.
diff --git a/libcxx/test/libcxx-03/utilities/meta/is_referenceable.compile.pass.cpp b/libcxx/test/libcxx-03/utilities/meta/is_referenceable.compile.pass.cpp
index 093bbae..f39d1a5 100644
--- a/libcxx/test/libcxx-03/utilities/meta/is_referenceable.compile.pass.cpp
+++ b/libcxx/test/libcxx-03/utilities/meta/is_referenceable.compile.pass.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
//
-// __is_referenceable_v<Tp>
+// __libcpp_is_referenceable<Tp>
//
// [defns.referenceable] defines "a referenceable type" as:
// An object type, a function type that does not have cv-qualifiers
diff --git a/libcxx/test/libcxx/input.output/iostreams.base/ios.base/ios.base.cons/dtor.uninitialized.pass.cpp b/libcxx/test/libcxx/input.output/iostreams.base/ios.base/ios.base.cons/dtor.uninitialized.pass.cpp
index f17c148..16d66e3 100644
--- a/libcxx/test/libcxx/input.output/iostreams.base/ios.base/ios.base.cons/dtor.uninitialized.pass.cpp
+++ b/libcxx/test/libcxx/input.output/iostreams.base/ios.base/ios.base.cons/dtor.uninitialized.pass.cpp
@@ -6,14 +6,12 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// UNSUPPORTED: no-exceptions
// The fix for issue 57964 requires an updated dylib due to explicit
// instantiations. That means Apple backdeployment targets remain broken.
-// XFAIL: using-built-library-before-llvm-19
+// TODO: Remove && !darwin once availability markup for LLVM 19 on macOS has been added
+// XFAIL: using-built-library-before-llvm-19 && !darwin
// <ios>
diff --git a/libcxx/test/libcxx/numerics/c.math/constexpr-cxx23-clang.pass.cpp b/libcxx/test/libcxx/numerics/c.math/constexpr-cxx23-clang.pass.cpp
index 3f17f21e..20887b8 100644
--- a/libcxx/test/libcxx/numerics/c.math/constexpr-cxx23-clang.pass.cpp
+++ b/libcxx/test/libcxx/numerics/c.math/constexpr-cxx23-clang.pass.cpp
@@ -220,16 +220,9 @@ int main(int, char**) {
ASSERT_CONSTEXPR_CXX23(std::isnormal(-1.0) == 1);
ASSERT_CONSTEXPR_CXX23(std::isnormal(-1.0L) == 1);
-// TODO(LLVM 22): Remove `__has_constexpr_builtin` conditional once support for Clang 19 is dropped.
-#if !__has_constexpr_builtin(__builtin_signbit)
- ASSERT_NOT_CONSTEXPR_CXX23(std::signbit(-1.0f) == 1);
- ASSERT_NOT_CONSTEXPR_CXX23(std::signbit(-1.0) == 1);
- ASSERT_NOT_CONSTEXPR_CXX23(std::signbit(-1.0L) == 1);
-#else
ASSERT_CONSTEXPR_CXX23(std::signbit(-1.0f) == 1);
ASSERT_CONSTEXPR_CXX23(std::signbit(-1.0) == 1);
ASSERT_CONSTEXPR_CXX23(std::signbit(-1.0L) == 1);
-#endif
ASSERT_NOT_CONSTEXPR_CXX23(std::isgreater(-1.0f, 0.0f) == 0);
ASSERT_NOT_CONSTEXPR_CXX23(std::isgreater(-1.0, 0.0) == 0);
diff --git a/libcxx/test/libcxx/utilities/expected/expected.expected/transform_error.mandates.verify.cpp b/libcxx/test/libcxx/utilities/expected/expected.expected/transform_error.mandates.verify.cpp
index 09ebd00..3e9bdd9 100644
--- a/libcxx/test/libcxx/utilities/expected/expected.expected/transform_error.mandates.verify.cpp
+++ b/libcxx/test/libcxx/utilities/expected/expected.expected/transform_error.mandates.verify.cpp
@@ -8,15 +8,6 @@
// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20
-// With clang-cl, some warnings have a 'which is a Microsoft extension' suffix
-// which break the tests. But #102851 will turn it into an error, making the test pass.
-// However, upstream libcxx buildbots do not build clang from source while testing, so
-// this tests still expected to fail on these bots.
-//
-// TODO(LLVM 22): Remove '0-1' from 'expected-error-re@*:* 0-1 {{union member {{.*}} has reference type {{.*}}}}'
-// and remove 'expected-warning-re@*:* 0-1 {{union member {{.*}} has reference type {{.*}}, which is a Microsoft extension}}'
-// once LLVM 22 releases. See https://llvm.org/PR104885.
-
// Test the mandates
// template<class F> constexpr auto transform_error(F&& f) &;
@@ -55,41 +46,39 @@ void test() {
{
std::expected<int, int> e;
e.transform_error(return_unexpected<int&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-1 {{no matching constructor for initialization of{{.*}}}}
// expected-error-re@*:* {{static assertion failed {{.*}}[expected.object.general] A program that instantiates the definition of template expected<T, E> for {{.*}} is ill-formed.}}
- // expected-error-re@*:* 0-1 {{union member {{.*}} has reference type {{.*}}}}
+ // expected-error-re@*:* {{union member {{.*}} has reference type {{.*}}}}
e.transform_error(return_no_object<int&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-1 {{no matching constructor for initialization of{{.*}}}}
+ // expected-error-re@*:* {{no matching constructor for initialization of{{.*}}}}
// expected-error-re@*:* {{static assertion failed {{.*}}[expected.object.general] A program that instantiates the definition of template expected<T, E> for {{.*}} is ill-formed.}}
- // expected-warning-re@*:* 0-1 {{union member {{.*}} has reference type {{.*}}, which is a Microsoft extension}}
}
// Test const& overload
{
const std::expected<int, int> e;
e.transform_error(return_unexpected<const int &>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-2 {{no matching constructor for initialization of{{.*}}}}
+ // expected-error-re@*:* {{no matching constructor for initialization of{{.*}}}}
e.transform_error(return_no_object<const int &>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-2 {{no matching constructor for initialization of{{.*}}}}
+ // expected-error-re@*:* {{no matching constructor for initialization of{{.*}}}}
}
// Test && overload
{
std::expected<int, int> e;
std::move(e).transform_error(return_unexpected<int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-2 {{no matching constructor for initialization of{{.*}}}}
+ // expected-error-re@*:* {{no matching constructor for initialization of{{.*}}}}
std::move(e).transform_error(return_no_object<int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-2 {{no matching constructor for initialization of{{.*}}}}
+ // expected-error-re@*:* {{no matching constructor for initialization of{{.*}}}}
}
// Test const&& overload
{
const std::expected<int, int> e;
std::move(e).transform_error(return_unexpected<const int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-2 {{no matching constructor for initialization of{{.*}}}}
+ // expected-error-re@*:* {{no matching constructor for initialization of{{.*}}}}
std::move(e).transform_error(return_no_object<const int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-2 {{no matching constructor for initialization of{{.*}}}}
+ // expected-error-re@*:* {{no matching constructor for initialization of{{.*}}}}
}
}
// clang-format on
diff --git a/libcxx/test/libcxx/utilities/expected/expected.void/transform_error.mandates.verify.cpp b/libcxx/test/libcxx/utilities/expected/expected.void/transform_error.mandates.verify.cpp
index 9fd7452..c5acc27 100644
--- a/libcxx/test/libcxx/utilities/expected/expected.void/transform_error.mandates.verify.cpp
+++ b/libcxx/test/libcxx/utilities/expected/expected.void/transform_error.mandates.verify.cpp
@@ -8,16 +8,6 @@
// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20
-// With clang-cl, some warnings have a 'which is a Microsoft extension' suffix
-// which break the tests. But #102851 will turn it into an error, making the test pass.
-// However, upstream libcxx buildbots do not build clang from source while testing, so
-// this tests still expected to fail on these bots.
-//
-// TODO(LLVM 22): Remove '0-1' from 'expected-error-re@*:* 0-1 {{union member {{.*}} has reference type {{.*}}}}'
-// and remove 'expected-warning-re@*:* 0-1 {{union member {{.*}} has reference type {{.*}}, which is a Microsoft extension}}'
-// and remove 'expected-error-re@*:* 0-1 {{call to deleted constructor of {{.*}}}}'
-// once LLVM 22 releases. See See https://llvm.org/PR104885.
-
// Test the mandates
// template<class F> constexpr auto transform_error(F&& f) &;
@@ -56,43 +46,36 @@ void test() {
{
std::expected<void, int> e;
e.transform_error(return_unexpected<int&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-1 {{no matching constructor for initialization of{{.*}}}}
+ // expected-error-re@*:* {{no matching constructor for initialization of{{.*}}}}
// expected-error-re@*:* {{static assertion failed {{.*}}A program that instantiates expected<T, E> with a E that is not a valid argument for unexpected<E> is ill-formed}}
- // expected-error-re@*:* 0-1 {{call to deleted constructor of {{.*}}}}
- // expected-error-re@*:* 0-1 {{union member {{.*}} has reference type {{.*}}}}
+ // expected-error-re@*:* {{union member {{.*}} has reference type {{.*}}}}
e.transform_error(return_no_object<int&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-1 {{no matching constructor for initialization of{{.*}}}}
+ // expected-error-re@*:* {{no matching constructor for initialization of{{.*}}}}
// expected-error-re@*:* {{static assertion failed {{.*}}A program that instantiates expected<T, E> with a E that is not a valid argument for unexpected<E> is ill-formed}}
- // expected-warning-re@*:* 0-1 {{union member {{.*}} has reference type {{.*}}, which is a Microsoft extension}}
}
// Test const& overload
{
const std::expected<void, int> e;
e.transform_error(return_unexpected<const int &>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-1 {{no matching constructor for initialization of{{.*}}}}
+ // expected-error-re@*:* {{no matching constructor for initialization of{{.*}}}}
e.transform_error(return_no_object<const int &>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-1 {{no matching constructor for initialization of{{.*}}}}
- // expected-error-re@*:* 0-1 {{call to deleted constructor of {{.*}}}}
+ // expected-error-re@*:* {{no matching constructor for initialization of{{.*}}}}
}
// Test && overload
{
std::expected<void, int> e;
std::move(e).transform_error(return_unexpected<int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-1 {{no matching constructor for initialization of{{.*}}}}
std::move(e).transform_error(return_no_object<int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-1 {{no matching constructor for initialization of{{.*}}}}
}
// Test const&& overload
{
const std::expected<void, int> e;
std::move(e).transform_error(return_unexpected<const int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-1 {{no matching constructor for initialization of{{.*}}}}
std::move(e).transform_error(return_no_object<const int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 0-1 {{no matching constructor for initialization of{{.*}}}}
}
}
// clang-format on
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/filebuf.virtuals/setbuf.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/filebuf.virtuals/setbuf.pass.cpp
index 9d14abc..00aa97a 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/filebuf.virtuals/setbuf.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/filebuf.virtuals/setbuf.pass.cpp
@@ -6,16 +6,14 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// <fstream>
// basic_streambuf<charT, traits>* setbuf(char_type* s, streamsize n) override;
// This test requires the fix to https://llvm.org/PR60509 in the dylib,
// which landed in 5afb937d8a30445642ccaf33866ee4cdd0713222.
-// XFAIL: using-built-library-before-llvm-19
+// TODO: Remove && !darwin once availability markup for LLVM 19 on macOS has been added
+// XFAIL: using-built-library-before-llvm-19 && !darwin
#include <fstream>
#include <cstddef>
diff --git a/libcxx/test/std/input.output/iostream.format/input.streams/istream.unformatted/sync.pass.cpp b/libcxx/test/std/input.output/iostream.format/input.streams/istream.unformatted/sync.pass.cpp
index 3b68595..b04d2c0 100644
--- a/libcxx/test/std/input.output/iostream.format/input.streams/istream.unformatted/sync.pass.cpp
+++ b/libcxx/test/std/input.output/iostream.format/input.streams/istream.unformatted/sync.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// <istream>
// int sync();
@@ -16,7 +13,8 @@
// The fix for bug 51497 and bug 51499 require and updated dylib due to
// explicit instantiations. That means Apple backdeployment targets remain
// broken.
-// XFAIL: using-built-library-before-llvm-19
+// TODO: Remove && !darwin once availability markup for LLVM 19 on macOS has been added
+// XFAIL: using-built-library-before-llvm-19 && !darwin
#include <istream>
#include <cassert>
diff --git a/libcxx/test/std/localization/locale.categories/category.collate/locale.collate.byname/compare.pass.cpp b/libcxx/test/std/localization/locale.categories/category.collate/locale.collate.byname/compare.pass.cpp
index 4905ed4..8ae6bc2 100644
--- a/libcxx/test/std/localization/locale.categories/category.collate/locale.collate.byname/compare.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.collate/locale.collate.byname/compare.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// Bionic has minimal locale support, investigate this later.
// XFAIL: LIBCXX-ANDROID-FIXME
@@ -56,14 +53,7 @@ int main(int, char**)
ASSERT_COMPARE(std::string, "AAA", "BBB", -1);
ASSERT_COMPARE(std::string, "bbb", "aaa", 1);
ASSERT_COMPARE(std::string, "ccc", "ccc", 0);
-
-#if defined(__APPLE__)
- // Apple's default collation is case-sensitive
- ASSERT_COMPARE(std::string, "aaaaaaA", "BaaaaaA", 1);
-#else
- // Glibc, Windows, and FreeBSD's default collation is case-insensitive
ASSERT_COMPARE(std::string, "aaaaaaA", "BaaaaaA", -1);
-#endif
}
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
@@ -73,13 +63,7 @@ int main(int, char**)
ASSERT_COMPARE(std::wstring, L"AAA", L"BBB", -1);
ASSERT_COMPARE(std::wstring, L"bbb", L"aaa", 1);
ASSERT_COMPARE(std::wstring, L"ccc", L"ccc", 0);
-#if defined(__APPLE__)
- // Apple's default collation is case-sensitive
- ASSERT_COMPARE(std::wstring, L"aaaaaaA", L"BaaaaaA", 1);
-#else
- // Glibc, Windows, and FreeBSD's default collation is case-insensitive
ASSERT_COMPARE(std::wstring, L"aaaaaaA", L"BaaaaaA", -1);
-#endif
}
#endif
}
diff --git a/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_fr_FR.pass.cpp b/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_fr_FR.pass.cpp
index ea6b079..c9ed59f 100644
--- a/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_fr_FR.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_fr_FR.pass.cpp
@@ -6,11 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
-// XFAIL: darwin
-
// NetBSD does not support LC_MONETARY at the moment
// XFAIL: netbsd
diff --git a/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_ru_RU.pass.cpp b/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_ru_RU.pass.cpp
index f98758d..371cf0e 100644
--- a/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_ru_RU.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_ru_RU.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// NetBSD does not support LC_MONETARY at the moment
// XFAIL: netbsd
diff --git a/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_zh_CN.pass.cpp b/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_zh_CN.pass.cpp
index 6980b7a..c86df7e 100644
--- a/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_zh_CN.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.get/locale.money.get.members/get_long_double_zh_CN.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// NetBSD does not support LC_MONETARY at the moment
// XFAIL: netbsd
@@ -158,7 +155,7 @@ int main(int, char**)
std::noshowbase(ios);
}
{ // negative one, showbase
-#ifdef _AIX
+#if defined(_AIX) || defined(__APPLE__)
std::string v = "-" + currency_symbol + "0.01";
#else
std::string v = currency_symbol + "-0.01";
@@ -172,7 +169,7 @@ int main(int, char**)
assert(ex == -1);
}
{ // negative one, showbase
-#ifdef _AIX
+#if defined(_AIX) || defined(__APPLE__)
std::string v = "-" + currency_symbol + "0.01";
#else
std::string v = currency_symbol + "-0.01";
@@ -212,7 +209,7 @@ int main(int, char**)
std::noshowbase(ios);
}
{ // negative, showbase
-#ifdef _AIX
+#if defined(_AIX) || defined(__APPLE__)
std::string v = "-" + currency_symbol + "1,234,567.89";
#else
std::string v = currency_symbol + "-1,234,567.89";
@@ -333,7 +330,7 @@ int main(int, char**)
std::noshowbase(ios);
}
{ // negative one, showbase
-#if defined(TEST_HAS_GLIBC) || defined(_AIX)
+#if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
std::string v = "-" + currency_name + "0.01";
#else
std::string v = currency_name + "-0.01";
@@ -348,7 +345,7 @@ int main(int, char**)
assert(ex == -1);
}
{ // negative one, showbase
-#if defined(TEST_HAS_GLIBC) || defined(_AIX)
+#if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
std::string v = "-" + currency_name + "0.01";
#else
std::string v = currency_name + "-0.01";
@@ -389,7 +386,7 @@ int main(int, char**)
std::noshowbase(ios);
}
{ // negative, showbase
-#if defined(TEST_HAS_GLIBC) || defined(_AIX)
+#if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
std::string v = "-" + currency_name + "1,234,567.89";
#else
std::string v = currency_name + "-1,234,567.89";
@@ -518,7 +515,7 @@ int main(int, char**)
std::noshowbase(ios);
}
{ // negative one, showbase
-# ifdef _AIX
+# if defined(_AIX) || defined(__APPLE__)
std::wstring v = L"-" + w_currency_symbol + L"0.01";
# else
std::wstring v = w_currency_symbol + L"-0.01";
@@ -532,7 +529,7 @@ int main(int, char**)
assert(ex == -1);
}
{ // negative one, showbase
-# ifdef _AIX
+# if defined(_AIX) || defined(__APPLE__)
std::wstring v = L"-" + w_currency_symbol + L"0.01";
# else
std::wstring v = w_currency_symbol + L"-0.01";
@@ -572,7 +569,7 @@ int main(int, char**)
std::noshowbase(ios);
}
{ // negative, showbase
-# ifdef _AIX
+# if defined(_AIX) || defined(__APPLE__)
std::wstring v = L"-" + w_currency_symbol + L"1,234,567.89";
# else
std::wstring v = w_currency_symbol + L"-1,234,567.89";
@@ -693,7 +690,7 @@ int main(int, char**)
std::noshowbase(ios);
}
{ // negative one, showbase
-# if defined(TEST_HAS_GLIBC) || defined(_AIX)
+# if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
std::wstring v = L"-" + w_currency_name + L"0.01";
# else
std::wstring v = w_currency_name + L"-0.01";
@@ -707,7 +704,7 @@ int main(int, char**)
assert(ex == -1);
}
{ // negative one, showbase
-# if defined(TEST_HAS_GLIBC) || defined(_AIX)
+# if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
std::wstring v = L"-" + w_currency_name + L"0.01";
# else
std::wstring v = w_currency_name + L"-0.01";
@@ -747,7 +744,7 @@ int main(int, char**)
std::noshowbase(ios);
}
{ // negative, showbase
-# if defined(TEST_HAS_GLIBC) || defined(_AIX)
+# if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
std::wstring v = L"-" + w_currency_name + L"1,234,567.89";
# else
std::wstring v = w_currency_name + L"-1,234,567.89";
diff --git a/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_fr_FR.pass.cpp b/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_fr_FR.pass.cpp
index 1474599..f9d7998 100644
--- a/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_fr_FR.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_fr_FR.pass.cpp
@@ -6,11 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
-// XFAIL: darwin
-
// NetBSD does not support LC_MONETARY at the moment
// XFAIL: netbsd
diff --git a/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_ru_RU.pass.cpp b/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_ru_RU.pass.cpp
index 0455e59..be1e397 100644
--- a/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_ru_RU.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_ru_RU.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// NetBSD does not support LC_MONETARY at the moment
// XFAIL: netbsd
diff --git a/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_zh_CN.pass.cpp b/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_zh_CN.pass.cpp
index 68640fa..25046a8 100644
--- a/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_zh_CN.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.monetary/locale.money.put/locale.money.put.members/put_long_double_zh_CN.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// NetBSD does not support LC_MONETARY at the moment
// XFAIL: netbsd
@@ -122,7 +119,7 @@ int main(int, char**)
char str[100];
cpp17_output_iterator<char*> iter = f.put(cpp17_output_iterator<char*>(str), false, ios, '*', v);
std::string ex(str, base(iter));
-#ifdef _AIX
+#if defined(_AIX) || defined(__APPLE__)
assert(ex == "-" + currency_symbol + "0.01");
#else
assert(ex == currency_symbol + "-0.01");
@@ -142,7 +139,7 @@ int main(int, char**)
char str[100];
cpp17_output_iterator<char*> iter = f.put(cpp17_output_iterator<char*>(str), false, ios, '*', v);
std::string ex(str, base(iter));
-#ifdef _AIX
+#if defined(_AIX) || defined(__APPLE__)
assert(ex == "-" + currency_symbol + "1,234,567.89");
#else
assert(ex == currency_symbol + "-1,234,567.89");
@@ -156,7 +153,7 @@ int main(int, char**)
char str[100];
cpp17_output_iterator<char*> iter = f.put(cpp17_output_iterator<char*>(str), false, ios, ' ', v);
std::string ex(str, base(iter));
-#ifdef _AIX
+#if defined(_AIX) || defined(__APPLE__)
assert(ex == "-" + currency_symbol + "1,234,567.89" + currency_symbol_padding);
#else
assert(ex == currency_symbol + "-1,234,567.89" + currency_symbol_padding);
@@ -171,7 +168,7 @@ int main(int, char**)
char str[100];
cpp17_output_iterator<char*> iter = f.put(cpp17_output_iterator<char*>(str), false, ios, ' ', v);
std::string ex(str, base(iter));
-#ifdef _AIX
+#if defined(_AIX) || defined(__APPLE__)
assert(ex == "-" + currency_symbol + currency_symbol_padding + "1,234,567.89");
#else
assert(ex == currency_symbol + "-" + currency_symbol_padding + "1,234,567.89");
@@ -186,7 +183,7 @@ int main(int, char**)
char str[100];
cpp17_output_iterator<char*> iter = f.put(cpp17_output_iterator<char*>(str), false, ios, ' ', v);
std::string ex(str, base(iter));
-#ifdef _AIX
+#if defined(_AIX) || defined(__APPLE__)
assert(ex == currency_symbol_padding + "-" + currency_symbol + "1,234,567.89");
#else
assert(ex == currency_symbol_padding + currency_symbol + "-1,234,567.89");
@@ -239,7 +236,7 @@ int main(int, char**)
char str[100];
cpp17_output_iterator<char*> iter = f.put(cpp17_output_iterator<char*>(str), true, ios, '*', v);
std::string ex(str, base(iter));
-#if defined(TEST_HAS_GLIBC) || defined(_AIX)
+#if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
assert(ex == "-" + currency_name + "0.01");
#else
assert(ex == currency_name + "-0.01");
@@ -259,7 +256,7 @@ int main(int, char**)
char str[100];
cpp17_output_iterator<char*> iter = f.put(cpp17_output_iterator<char*>(str), true, ios, '*', v);
std::string ex(str, base(iter));
-#if defined(TEST_HAS_GLIBC) || defined(_AIX)
+#if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
assert(ex == "-" + currency_name + "1,234,567.89");
#else
assert(ex == currency_name + "-1,234,567.89");
@@ -273,7 +270,7 @@ int main(int, char**)
char str[100];
cpp17_output_iterator<char*> iter = f.put(cpp17_output_iterator<char*>(str), true, ios, ' ', v);
std::string ex(str, base(iter));
-#if defined(TEST_HAS_GLIBC) || defined(_AIX)
+#if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
assert(ex == "-" + currency_name + "1,234,567.89" + currency_name_padding);
#else
assert(ex == currency_name + "-1,234,567.89" + currency_name_padding);
@@ -288,7 +285,7 @@ int main(int, char**)
char str[100];
cpp17_output_iterator<char*> iter = f.put(cpp17_output_iterator<char*>(str), true, ios, ' ', v);
std::string ex(str, base(iter));
-#if defined(TEST_HAS_GLIBC) || defined(_AIX)
+#if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
assert(ex == "-" + currency_name + currency_name_padding + "1,234,567.89");
#else
assert(ex == currency_name + "-" + currency_name_padding + "1,234,567.89");
@@ -303,7 +300,7 @@ int main(int, char**)
char str[100];
cpp17_output_iterator<char*> iter = f.put(cpp17_output_iterator<char*>(str), true, ios, ' ', v);
std::string ex(str, base(iter));
-#if defined(TEST_HAS_GLIBC) || defined(_AIX)
+#if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
assert(ex == currency_name_padding + "-" + currency_name + "1,234,567.89");
#else
assert(ex == currency_name_padding + currency_name + "-1,234,567.89");
@@ -366,7 +363,7 @@ int main(int, char**)
wchar_t str[100];
cpp17_output_iterator<wchar_t*> iter = f.put(cpp17_output_iterator<wchar_t*>(str), false, ios, '*', v);
std::wstring ex(str, base(iter));
-# ifdef _AIX
+# if defined(_AIX) || defined(__APPLE__)
assert(ex == L"-" + currency_symbol + L"0.01");
# else
assert(ex == currency_symbol + L"-0.01");
@@ -386,7 +383,7 @@ int main(int, char**)
wchar_t str[100];
cpp17_output_iterator<wchar_t*> iter = f.put(cpp17_output_iterator<wchar_t*>(str), false, ios, '*', v);
std::wstring ex(str, base(iter));
-# ifdef _AIX
+# if defined(_AIX) || defined(__APPLE__)
assert(ex == L"-" + currency_symbol + L"1,234,567.89");
# else
assert(ex == currency_symbol + L"-1,234,567.89");
@@ -400,7 +397,7 @@ int main(int, char**)
wchar_t str[100];
cpp17_output_iterator<wchar_t*> iter = f.put(cpp17_output_iterator<wchar_t*>(str), false, ios, ' ', v);
std::wstring ex(str, base(iter));
-# ifdef _AIX
+# if defined(_AIX) || defined(__APPLE__)
assert(ex == L"-" + currency_symbol + L"1,234,567.89 ");
# else
assert(ex == currency_symbol + L"-1,234,567.89 ");
@@ -415,7 +412,7 @@ int main(int, char**)
wchar_t str[100];
cpp17_output_iterator<wchar_t*> iter = f.put(cpp17_output_iterator<wchar_t*>(str), false, ios, ' ', v);
std::wstring ex(str, base(iter));
-# ifdef _AIX
+# if defined(_AIX) || defined(__APPLE__)
assert(ex == L"-" + currency_symbol + L" 1,234,567.89");
# else
assert(ex == currency_symbol + L"- 1,234,567.89");
@@ -430,7 +427,7 @@ int main(int, char**)
wchar_t str[100];
cpp17_output_iterator<wchar_t*> iter = f.put(cpp17_output_iterator<wchar_t*>(str), false, ios, ' ', v);
std::wstring ex(str, base(iter));
-# ifdef _AIX
+# if defined(_AIX) || defined(__APPLE__)
assert(ex == L" -" + currency_symbol + L"1,234,567.89");
# else
assert(ex == L" " + currency_symbol + L"-1,234,567.89");
@@ -483,7 +480,7 @@ int main(int, char**)
wchar_t str[100];
cpp17_output_iterator<wchar_t*> iter = f.put(cpp17_output_iterator<wchar_t*>(str), true, ios, '*', v);
std::wstring ex(str, base(iter));
-# if defined(TEST_HAS_GLIBC) || defined(_AIX)
+# if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
assert(ex == L"-" + currency_name + L"0.01");
#else
assert(ex == currency_name + L"-0.01");
@@ -503,7 +500,7 @@ int main(int, char**)
wchar_t str[100];
cpp17_output_iterator<wchar_t*> iter = f.put(cpp17_output_iterator<wchar_t*>(str), true, ios, '*', v);
std::wstring ex(str, base(iter));
-# if defined(TEST_HAS_GLIBC) || defined(_AIX)
+# if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
assert(ex == L"-" + currency_name + L"1,234,567.89");
#else
assert(ex == currency_name + L"-1,234,567.89");
@@ -517,7 +514,7 @@ int main(int, char**)
wchar_t str[100];
cpp17_output_iterator<wchar_t*> iter = f.put(cpp17_output_iterator<wchar_t*>(str), true, ios, ' ', v);
std::wstring ex(str, base(iter));
-# if defined(TEST_HAS_GLIBC) || defined(_AIX)
+# if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
assert(ex == L"-" + currency_name + L"1,234,567.89" + currency_name_padding);
#else
assert(ex == currency_name + L"-1,234,567.89" + currency_name_padding);
@@ -532,7 +529,7 @@ int main(int, char**)
wchar_t str[100];
cpp17_output_iterator<wchar_t*> iter = f.put(cpp17_output_iterator<wchar_t*>(str), true, ios, ' ', v);
std::wstring ex(str, base(iter));
-# if defined(TEST_HAS_GLIBC) || defined(_AIX)
+# if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
assert(ex == L"-" + currency_name + currency_name_padding + L"1,234,567.89");
#else
assert(ex == currency_name + L"-" + currency_name_padding + L"1,234,567.89");
@@ -547,7 +544,7 @@ int main(int, char**)
wchar_t str[100];
cpp17_output_iterator<wchar_t*> iter = f.put(cpp17_output_iterator<wchar_t*>(str), true, ios, ' ', v);
std::wstring ex(str, base(iter));
-# if defined(TEST_HAS_GLIBC) || defined(_AIX)
+# if defined(TEST_HAS_GLIBC) || defined(_AIX) || defined(__APPLE__)
assert(ex == currency_name_padding + L"-" + currency_name + L"1,234,567.89");
#else
assert(ex == currency_name_padding + currency_name + L"-1,234,567.89");
diff --git a/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/curr_symbol.pass.cpp b/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/curr_symbol.pass.cpp
index 9c1253d..e7f0f29 100644
--- a/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/curr_symbol.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/curr_symbol.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// NetBSD does not support LC_MONETARY at the moment
// XFAIL: netbsd
@@ -117,11 +114,7 @@ int main(int, char**)
{
Fnf f(LOCALE_fr_FR_UTF_8, 1);
-#ifdef __APPLE__
- assert(f.curr_symbol() == " Eu");
-#else
assert(f.curr_symbol() == " \u20ac");
-#endif
}
{
Fnt f(LOCALE_fr_FR_UTF_8, 1);
@@ -130,11 +123,7 @@ int main(int, char**)
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
Fwf f(LOCALE_fr_FR_UTF_8, 1);
-#ifdef __APPLE__
- assert(f.curr_symbol() == L" Eu");
-#else
assert(f.curr_symbol() == L" \u20ac");
-#endif
}
{
Fwt f(LOCALE_fr_FR_UTF_8, 1);
@@ -164,7 +153,7 @@ int main(int, char**)
{
Fnf f(LOCALE_zh_CN_UTF_8, 1);
-#ifdef _WIN32
+#if defined(_WIN32) || defined(__APPLE__)
assert(f.curr_symbol() == "\xC2\xA5"); // \u00A5
#else
assert(f.curr_symbol() == "\xEF\xBF\xA5"); // \uFFE5
@@ -177,7 +166,7 @@ int main(int, char**)
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
Fwf f(LOCALE_zh_CN_UTF_8, 1);
-#ifdef _WIN32
+#if defined(_WIN32) || defined(__APPLE__)
assert(f.curr_symbol() == L"\u00A5");
#else
assert(f.curr_symbol() == L"\uFFE5");
diff --git a/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/grouping.pass.cpp b/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/grouping.pass.cpp
index 630b273..90dc6c4 100644
--- a/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/grouping.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/grouping.pass.cpp
@@ -6,11 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
-// XFAIL: darwin
-//
// NetBSD does not support LC_MONETARY at the moment
// XFAIL: netbsd
diff --git a/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/neg_format.pass.cpp b/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/neg_format.pass.cpp
index a3e3d853..e952814 100644
--- a/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/neg_format.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/neg_format.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// NetBSD does not support LC_MONETARY at the moment
// XFAIL: netbsd
@@ -82,14 +79,6 @@ void assert_sign_symbol_none_value(std::money_base::pattern p)
assert(p.field[3] == std::money_base::value);
}
-void assert_value_none_symbol_sign(std::money_base::pattern p)
-{
- assert(p.field[0] == std::money_base::value);
- assert(p.field[1] == std::money_base::none);
- assert(p.field[2] == std::money_base::symbol);
- assert(p.field[3] == std::money_base::sign);
-}
-
void assert_sign_value_none_symbol(std::money_base::pattern p)
{
assert(p.field[0] == std::money_base::sign);
@@ -149,39 +138,23 @@ int main(int, char**)
{
Fnf f(LOCALE_fr_FR_UTF_8, 1);
std::money_base::pattern p = f.neg_format();
-#ifdef __APPLE__
- assert_value_none_symbol_sign(p);
-#else
assert_sign_value_none_symbol(p);
-#endif
}
{
Fnt f(LOCALE_fr_FR_UTF_8, 1);
std::money_base::pattern p = f.neg_format();
-#ifdef __APPLE__
- assert_value_none_symbol_sign(p);
-#else
assert_sign_value_none_symbol(p);
-#endif
}
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
{
Fwf f(LOCALE_fr_FR_UTF_8, 1);
std::money_base::pattern p = f.neg_format();
-#ifdef __APPLE__
- assert_value_none_symbol_sign(p);
-#else
assert_sign_value_none_symbol(p);
-#endif
}
{
Fwt f(LOCALE_fr_FR_UTF_8, 1);
std::money_base::pattern p = f.neg_format();
-#ifdef __APPLE__
- assert_value_none_symbol_sign(p);
-#else
assert_sign_value_none_symbol(p);
-#endif
}
#endif // TEST_HAS_NO_WIDE_CHARACTERS
@@ -211,7 +184,7 @@ int main(int, char**)
{
Fnf f(LOCALE_zh_CN_UTF_8, 1);
std::money_base::pattern p = f.neg_format();
-#ifdef _AIX
+#if defined(_AIX) || defined(__APPLE__)
assert_sign_symbol_none_value(p);
#else
assert_symbol_sign_none_value(p);
@@ -220,7 +193,7 @@ int main(int, char**)
{
Fnt f(LOCALE_zh_CN_UTF_8, 1);
std::money_base::pattern p = f.neg_format();
-#if defined(_WIN32) || defined(__APPLE__)
+#if defined(_WIN32)
assert_symbol_sign_none_value(p);
#else
assert_sign_symbol_none_value(p);
@@ -230,7 +203,7 @@ int main(int, char**)
{
Fwf f(LOCALE_zh_CN_UTF_8, 1);
std::money_base::pattern p = f.neg_format();
-#ifdef _AIX
+#if defined(_AIX) || defined(__APPLE__)
assert_sign_symbol_none_value(p);
#else
assert_symbol_sign_none_value(p);
@@ -239,7 +212,7 @@ int main(int, char**)
{
Fwt f(LOCALE_zh_CN_UTF_8, 1);
std::money_base::pattern p = f.neg_format();
-#if defined(_WIN32) || defined(__APPLE__)
+#if defined(_WIN32)
assert_symbol_sign_none_value(p);
#else
assert_sign_symbol_none_value(p);
diff --git a/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/pos_format.pass.cpp b/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/pos_format.pass.cpp
index 671620a..11832a7 100644
--- a/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/pos_format.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.monetary/locale.moneypunct.byname/pos_format.pass.cpp
@@ -5,7 +5,7 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
+
// NetBSD does not support LC_MONETARY at the moment
// XFAIL: netbsd
@@ -79,14 +79,6 @@ void assert_sign_symbol_none_value(std::money_base::pattern p)
assert(p.field[3] == std::money_base::value);
}
-void assert_value_none_symbol_sign(std::money_base::pattern p)
-{
- assert(p.field[0] == std::money_base::value);
- assert(p.field[1] == std::money_base::none);
- assert(p.field[2] == std::money_base::symbol);
- assert(p.field[3] == std::money_base::sign);
-}
-
void assert_sign_value_none_symbol(std::money_base::pattern p)
{
assert(p.field[0] == std::money_base::sign);
diff --git a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp
index 612d373..31682fea 100644
--- a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp
@@ -6,12 +6,10 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// The fix for LWG2381 (https://github.com/llvm/llvm-project/pull/77948) changed behavior of
// FP parsing. This requires 3e15c97fa3812993bdc319827a5c6d867b765ae8 in the dylib.
-// XFAIL: using-built-library-before-llvm-19
+// TODO: Remove && !darwin once availability markup for LLVM 19 on macOS has been added
+// XFAIL: using-built-library-before-llvm-19 && !darwin
// <locale>
diff --git a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp
index 58bc9e5..57eedc8 100644
--- a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp
@@ -6,12 +6,10 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// The fix for LWG2381 (https://github.com/llvm/llvm-project/pull/77948) changed behavior of
// FP parsing. This requires 3e15c97fa3812993bdc319827a5c6d867b765ae8 in the dylib.
-// XFAIL: using-built-library-before-llvm-19
+// TODO: Remove && !darwin once availability markup for LLVM 19 on macOS has been added
+// XFAIL: using-built-library-before-llvm-19 && !darwin
// <locale>
diff --git a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp
index bf8bb65..8324ee3 100644
--- a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp
@@ -6,12 +6,10 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// The fix for LWG2381 (https://github.com/llvm/llvm-project/pull/77948) changed behavior of
// FP parsing. This requires 3e15c97fa3812993bdc319827a5c6d867b765ae8 in the dylib.
-// XFAIL: using-built-library-before-llvm-19
+// TODO: Remove && !darwin once availability markup for LLVM 19 on macOS has been added
+// XFAIL: using-built-library-before-llvm-19 && !darwin
// <locale>
diff --git a/libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/grouping.pass.cpp b/libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/grouping.pass.cpp
index a87c5e0..11ec754 100644
--- a/libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/grouping.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/grouping.pass.cpp
@@ -5,10 +5,7 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
+//
// NetBSD does not support LC_NUMERIC at the moment
// XFAIL: netbsd
@@ -63,7 +60,7 @@ int main(int, char**)
}
{
std::locale l(LOCALE_fr_FR_UTF_8);
-#if defined(TEST_HAS_GLIBC) || defined(_WIN32) || defined(_AIX)
+#if defined(TEST_HAS_GLIBC) || defined(_WIN32) || defined(_AIX) || defined(__APPLE__)
const char* const group = "\3";
#else
const char* const group = "\x7f";
diff --git a/libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/thousands_sep.pass.cpp b/libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/thousands_sep.pass.cpp
index ef39e8a..53f2c85 100644
--- a/libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/thousands_sep.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/facet.numpunct/locale.numpunct.byname/thousands_sep.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// NetBSD does not support LC_NUMERIC at the moment
// XFAIL: netbsd
@@ -69,7 +66,7 @@ int main(int, char**)
// The below tests work around GLIBC's use of U202F as LC_NUMERIC thousands_sep.
std::locale l(LOCALE_fr_FR_UTF_8);
{
-#if defined(_CS_GNU_LIBC_VERSION) || defined(_WIN32) || defined(_AIX)
+#if defined(_CS_GNU_LIBC_VERSION) || defined(_WIN32) || defined(_AIX) || defined(__APPLE__)
const char sep = ' ';
#else
const char sep = ',';
diff --git a/libcxx/test/std/time/time.duration/time.duration.nonmember/ostream.pass.cpp b/libcxx/test/std/time/time.duration/time.duration.nonmember/ostream.pass.cpp
index 4e84db9..97ac0427 100644
--- a/libcxx/test/std/time/time.duration/time.duration.nonmember/ostream.pass.cpp
+++ b/libcxx/test/std/time/time.duration/time.duration.nonmember/ostream.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// UNSUPPORTED: c++03, c++11, c++14, c++17
// UNSUPPORTED: no-localization
// UNSUPPORTED: GCC-ALWAYS_INLINE-FIXME
@@ -83,17 +80,10 @@ static void test_values() {
assert(stream_c_locale<CharT>(1'000.123456s) == SV("1000.1235s"));
if constexpr (std::same_as<CharT, char>) {
-#if defined(__APPLE__)
- assert(stream_fr_FR_locale<CharT>(-1'000'000s) == SV("-1000000s"));
- assert(stream_fr_FR_locale<CharT>(1'000'000s) == SV("1000000s"));
- assert(stream_fr_FR_locale<CharT>(-1'000.123456s) == SV("-1000,1235s"));
- assert(stream_fr_FR_locale<CharT>(1'000.123456s) == SV("1000,1235s"));
-#else
assert(stream_fr_FR_locale<CharT>(-1'000'000s) == SV("-1 000 000s"));
assert(stream_fr_FR_locale<CharT>(1'000'000s) == SV("1 000 000s"));
assert(stream_fr_FR_locale<CharT>(-1'000.123456s) == SV("-1 000,1235s"));
assert(stream_fr_FR_locale<CharT>(1'000.123456s) == SV("1 000,1235s"));
-#endif
} else {
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
assert(stream_fr_FR_locale<CharT>(-1'000'000s) == L"-1" FR_THOU_SEP "000" FR_THOU_SEP "000s");
diff --git a/libcxx/test/std/time/time.syn/formatter.duration.pass.cpp b/libcxx/test/std/time/time.syn/formatter.duration.pass.cpp
index 973bce8..f1f7deb 100644
--- a/libcxx/test/std/time/time.syn/formatter.duration.pass.cpp
+++ b/libcxx/test/std/time/time.syn/formatter.duration.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// UNSUPPORTED: c++03, c++11, c++14, c++17
// UNSUPPORTED: no-localization
// UNSUPPORTED: GCC-ALWAYS_INLINE-FIXME
@@ -408,19 +405,11 @@ static void test_valid_positive_integral_values() {
"%OM='00'\t"
"%S='00'\t"
"%OS='00'\t"
-# if defined(__APPLE__)
- "%p='AM'\t"
-# else
"%p='午前'\t"
-# endif
"%R='00:00'\t"
"%T='00:00:00'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='12:00:00 AM'\t"
-# else
"%r='12:00:00 午前'\t"
-# endif
"%X='00時00分00秒'\t"
"%EX='00時00分00秒'\t"
# elif defined(_WIN32)
@@ -448,19 +437,11 @@ static void test_valid_positive_integral_values() {
"%OM='59'\t"
"%S='59'\t"
"%OS='59'\t"
-# if defined(__APPLE__)
- "%p='AM'\t"
-# else
"%p='午前'\t"
-# endif
"%R='11:59'\t"
"%T='11:59:59'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='11:59:59 AM'\t"
-# else
"%r='11:59:59 午前'\t"
-# endif
"%X='11時59分59秒'\t"
"%EX='11時59分59秒'\t"
# elif defined(_WIN32)
@@ -488,19 +469,11 @@ static void test_valid_positive_integral_values() {
"%OM='00'\t"
"%S='00'\t"
"%OS='00'\t"
-# if defined(__APPLE__)
- "%p='PM'\t"
-# else
"%p='午後'\t"
-# endif
"%R='12:00'\t"
"%T='12:00:00'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='12:00:00 PM'\t"
-# else
"%r='12:00:00 午後'\t"
-# endif
"%X='12時00分00秒'\t"
"%EX='12時00分00秒'\t"
# else
@@ -528,19 +501,11 @@ static void test_valid_positive_integral_values() {
"%OM='59'\t"
"%S='59'\t"
"%OS='59'\t"
-# if defined(__APPLE__)
- "%p='PM'\t"
-# else
"%p='午後'\t"
-# endif
"%R='23:59'\t"
"%T='23:59:59'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='11:59:59 PM'\t"
-# else
"%r='11:59:59 午後'\t"
-# endif
"%X='23時59分59秒'\t"
"%EX='23時59分59秒'\t"
# else
@@ -568,19 +533,11 @@ static void test_valid_positive_integral_values() {
"%OM='00'\t"
"%S='00'\t"
"%OS='00'\t"
-# if defined(__APPLE__)
- "%p='AM'\t"
-# else
"%p='午前'\t"
-# endif
"%R='00:00'\t"
"%T='00:00:00'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='12:00:00 AM'\t"
-# else
"%r='12:00:00 午前'\t"
-# endif
"%X='00時00分00秒'\t"
"%EX='00時00分00秒'\t"
# elif defined(_WIN32)
@@ -835,19 +792,11 @@ static void test_valid_negative_integral_values() {
"%OM='59'\t"
"%S='59'\t"
"%OS='59'\t"
-# if defined(__APPLE__)
- "%p='PM'\t"
-# else
"%p='午後'\t"
-# endif
"%R='23:59'\t"
"%T='23:59:59'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='11:59:59 PM'\t"
-# else
"%r='11:59:59 午後'\t"
-# endif
"%X='23時59分59秒'\t"
"%EX='23時59分59秒'\t"
# elif defined(_WIN32)
diff --git a/libcxx/test/std/time/time.syn/formatter.file_time.pass.cpp b/libcxx/test/std/time/time.syn/formatter.file_time.pass.cpp
index 28a972b..e258c41 100644
--- a/libcxx/test/std/time/time.syn/formatter.file_time.pass.cpp
+++ b/libcxx/test/std/time/time.syn/formatter.file_time.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// UNSUPPORTED: c++03, c++11, c++14, c++17
// UNSUPPORTED: no-localization
// UNSUPPORTED: GCC-ALWAYS_INLINE-FIXME
@@ -695,19 +692,11 @@ static void test_valid_values_time() {
"%OM='00'\t"
"%S='00'\t"
"%OS='00'\t"
-# if defined(__APPLE__)
- "%p='AM'\t"
-# else
"%p='午前'\t"
-# endif
"%R='00:00'\t"
"%T='00:00:00'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='12:00:00 AM'\t"
-# else
"%r='12:00:00 午前'\t"
-# endif
"%X='00時00分00秒'\t"
"%EX='00時00分00秒'\t"
# elif defined(_WIN32)
@@ -732,19 +721,11 @@ static void test_valid_values_time() {
"%OM='31'\t"
"%S='30.123'\t"
"%OS='30.123'\t"
-# if defined(__APPLE__)
- "%p='PM'\t"
-# else
"%p='午後'\t"
-# endif
"%R='23:31'\t"
"%T='23:31:30.123'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='11:31:30 PM'\t"
-# else
"%r='11:31:30 午後'\t"
-# endif
"%X='23時31分30秒'\t"
"%EX='23時31分30秒'\t"
# elif defined(_WIN32)
diff --git a/libcxx/test/std/time/time.syn/formatter.hh_mm_ss.pass.cpp b/libcxx/test/std/time/time.syn/formatter.hh_mm_ss.pass.cpp
index 82d9b4c..bbd9c07 100644
--- a/libcxx/test/std/time/time.syn/formatter.hh_mm_ss.pass.cpp
+++ b/libcxx/test/std/time/time.syn/formatter.hh_mm_ss.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// UNSUPPORTED: c++03, c++11, c++14, c++17
// UNSUPPORTED: no-localization
// UNSUPPORTED: GCC-ALWAYS_INLINE-FIXME
@@ -302,19 +299,11 @@ static void test_valid_values() {
"%OM='00'\t"
"%S='00'\t"
"%OS='00'\t"
-# if defined(__APPLE__)
- "%p='AM'\t"
-# else
"%p='午前'\t"
-# endif
"%R='00:00'\t"
"%T='00:00:00'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='12:00:00 AM'\t"
-# else
"%r='12:00:00 午前'\t"
-# endif
"%X='00時00分00秒'\t"
"%EX='00時00分00秒'\t"
# elif defined(_WIN32)
@@ -339,19 +328,11 @@ static void test_valid_values() {
"%OM='31'\t"
"%S='30.123'\t"
"%OS='30.123'\t"
-# if defined(__APPLE__)
- "%p='PM'\t"
-# else
"%p='午後'\t"
-# endif
"%R='23:31'\t"
"%T='23:31:30.123'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='11:31:30 PM'\t"
-# else
"%r='11:31:30 午後'\t"
-# endif
"%X='23時31分30秒'\t"
"%EX='23時31分30秒'\t"
# elif defined(_WIN32)
@@ -376,19 +357,11 @@ static void test_valid_values() {
"%OM='02'\t"
"%S='01.123456789012'\t"
"%OS='01.123456789012'\t"
-# if defined(__APPLE__)
- "%p='AM'\t"
-# else
"%p='午前'\t"
-# endif
"%R='03:02'\t"
"%T='03:02:01.123456789012'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='03:02:01 AM'\t"
-# else
"%r='03:02:01 午前'\t"
-# endif
"%X='03時02分01秒'\t"
"%EX='03時02分01秒'\t"
# elif defined(_WIN32)
@@ -413,19 +386,11 @@ static void test_valid_values() {
"%OM='01'\t"
"%S='01'\t"
"%OS='01'\t"
-# if defined(__APPLE__)
- "%p='AM'\t"
-# else
"%p='午前'\t"
-# endif
"%R='01:01'\t"
"%T='01:01:01'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='01:01:01 AM'\t"
-# else
"%r='01:01:01 午前'\t"
-# endif
"%X='01時01分01秒'\t"
"%EX='01時01分01秒'\t"
# elif defined(_WIN32)
diff --git a/libcxx/test/std/time/time.syn/formatter.local_time.pass.cpp b/libcxx/test/std/time/time.syn/formatter.local_time.pass.cpp
index bd23337..ce3af8e 100644
--- a/libcxx/test/std/time/time.syn/formatter.local_time.pass.cpp
+++ b/libcxx/test/std/time/time.syn/formatter.local_time.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// UNSUPPORTED: c++03, c++11, c++14, c++17
// UNSUPPORTED: no-localization
// UNSUPPORTED: GCC-ALWAYS_INLINE-FIXME
@@ -694,19 +691,11 @@ static void test_valid_values_time() {
"%OM='00'\t"
"%S='00'\t"
"%OS='00'\t"
-# if defined(__APPLE__)
- "%p='AM'\t"
-# else
"%p='午前'\t"
-# endif
"%R='00:00'\t"
"%T='00:00:00'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='12:00:00 AM'\t"
-# else
"%r='12:00:00 午前'\t"
-# endif
"%X='00時00分00秒'\t"
"%EX='00時00分00秒'\t"
# elif defined(_WIN32)
@@ -731,19 +720,11 @@ static void test_valid_values_time() {
"%OM='31'\t"
"%S='30.123'\t"
"%OS='30.123'\t"
-# if defined(__APPLE__)
- "%p='PM'\t"
-# else
"%p='午後'\t"
-# endif
"%R='23:31'\t"
"%T='23:31:30.123'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='11:31:30 PM'\t"
-# else
"%r='11:31:30 午後'\t"
-# endif
"%X='23時31分30秒'\t"
"%EX='23時31分30秒'\t"
# elif defined(_WIN32)
diff --git a/libcxx/test/std/time/time.syn/formatter.sys_time.pass.cpp b/libcxx/test/std/time/time.syn/formatter.sys_time.pass.cpp
index 9c9c8e0..9238f3d 100644
--- a/libcxx/test/std/time/time.syn/formatter.sys_time.pass.cpp
+++ b/libcxx/test/std/time/time.syn/formatter.sys_time.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// UNSUPPORTED: c++03, c++11, c++14, c++17
// UNSUPPORTED: no-localization
// UNSUPPORTED: GCC-ALWAYS_INLINE-FIXME
@@ -691,19 +688,11 @@ static void test_valid_values_time() {
"%OM='00'\t"
"%S='00'\t"
"%OS='00'\t"
-# if defined(__APPLE__)
- "%p='AM'\t"
-# else
"%p='午前'\t"
-# endif
"%R='00:00'\t"
"%T='00:00:00'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='12:00:00 AM'\t"
-# else
"%r='12:00:00 午前'\t"
-# endif
"%X='00時00分00秒'\t"
"%EX='00時00分00秒'\t"
# elif defined(_WIN32)
@@ -728,19 +717,11 @@ static void test_valid_values_time() {
"%OM='31'\t"
"%S='30.123'\t"
"%OS='30.123'\t"
-# if defined(__APPLE__)
- "%p='PM'\t"
-# else
"%p='午後'\t"
-# endif
"%R='23:31'\t"
"%T='23:31:30.123'\t"
# if defined(__APPLE__) || defined(__FreeBSD__)
-# if defined(__APPLE__)
- "%r='11:31:30 PM'\t"
-# else
"%r='11:31:30 午後'\t"
-# endif
"%X='23時31分30秒'\t"
"%EX='23時31分30秒'\t"
# elif defined(_WIN32)
diff --git a/libcxx/test/std/utilities/format/format.arguments/format.arg/visit.pass.cpp b/libcxx/test/std/utilities/format/format.arguments/format.arg/visit.pass.cpp
index 20e0a5e..68fe8b6 100644
--- a/libcxx/test/std/utilities/format/format.arguments/format.arg/visit.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.arguments/format.arg/visit.pass.cpp
@@ -8,8 +8,6 @@
// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
// UNSUPPORTED: GCC-ALWAYS_INLINE-FIXME
-// The tested functionality needs deducing this.
-// XFAIL: apple-clang
// <format>
diff --git a/libcxx/test/std/utilities/format/format.arguments/format.arg/visit.return_type.pass.cpp b/libcxx/test/std/utilities/format/format.arguments/format.arg/visit.return_type.pass.cpp
index 8a79dd4..4ae63e8 100644
--- a/libcxx/test/std/utilities/format/format.arguments/format.arg/visit.return_type.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.arguments/format.arg/visit.return_type.pass.cpp
@@ -8,8 +8,6 @@
// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
// UNSUPPORTED: GCC-ALWAYS_INLINE-FIXME
-// The tested functionality needs deducing this.
-// XFAIL: apple-clang
// <format>
diff --git a/libcxx/test/std/utilities/format/format.arguments/format.arg/visit_format_arg.deprecated.verify.cpp b/libcxx/test/std/utilities/format/format.arguments/format.arg/visit_format_arg.deprecated.verify.cpp
index 146ceba..77df72d 100644
--- a/libcxx/test/std/utilities/format/format.arguments/format.arg/visit_format_arg.deprecated.verify.cpp
+++ b/libcxx/test/std/utilities/format/format.arguments/format.arg/visit_format_arg.deprecated.verify.cpp
@@ -8,7 +8,6 @@
// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
// UNSUPPORTED: GCC-ALWAYS_INLINE-FIXME
-// XFAIL: apple-clang
// <format>
diff --git a/libcxx/test/std/utilities/format/format.arguments/format.arg/visit_format_arg.pass.cpp b/libcxx/test/std/utilities/format/format.arguments/format.arg/visit_format_arg.pass.cpp
index d99675a7..9b7c8a7 100644
--- a/libcxx/test/std/utilities/format/format.arguments/format.arg/visit_format_arg.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.arguments/format.arg/visit_format_arg.pass.cpp
@@ -9,6 +9,8 @@
// UNSUPPORTED: c++03, c++11, c++14, c++17
// UNSUPPORTED: GCC-ALWAYS_INLINE-FIXME
+// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
+
// <format>
// template<class Visitor, class Context>
@@ -25,10 +27,6 @@
#include "make_string.h"
#include "min_allocator.h"
-#if TEST_STD_VER >= 26 && defined(TEST_HAS_EXPLICIT_THIS_PARAMETER)
-TEST_CLANG_DIAGNOSTIC_IGNORED("-Wdeprecated-declarations")
-#endif
-
template <class Context, class To, class From>
void test(From value) {
auto store = std::make_format_args<Context>(value);
diff --git a/libcxx/test/std/utilities/format/format.arguments/format.args/get.pass.cpp b/libcxx/test/std/utilities/format/format.arguments/format.args/get.pass.cpp
index c7dd82d..cbddc4f 100644
--- a/libcxx/test/std/utilities/format/format.arguments/format.args/get.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.arguments/format.args/get.pass.cpp
@@ -32,7 +32,7 @@ void test(From value) {
else
assert(false);
};
-#if TEST_STD_VER >= 26 && defined(TEST_HAS_EXPLICIT_THIS_PARAMETER)
+#if TEST_STD_VER >= 26
format_args.get(0).visit(visitor);
#else
std::visit_format_arg(visitor, format_args.get(0));
@@ -47,7 +47,7 @@ void test_handle(T value) {
std::basic_format_args<Context> format_args{store};
auto visitor = [](auto a) { assert((std::is_same_v<decltype(a), typename std::basic_format_arg<Context>::handle>)); };
-#if TEST_STD_VER >= 26 && defined(TEST_HAS_EXPLICIT_THIS_PARAMETER)
+#if TEST_STD_VER >= 26
format_args.get(0).visit(visitor);
#else
std::visit_format_arg(visitor, format_args.get(0));
@@ -73,7 +73,7 @@ void test_string_view(From value) {
else
assert(false);
};
-#if TEST_STD_VER >= 26 && defined(TEST_HAS_EXPLICIT_THIS_PARAMETER)
+#if TEST_STD_VER >= 26
format_args.get(0).visit(visitor);
#else
std::visit_format_arg(visitor, format_args.get(0));
diff --git a/libcxx/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR20855_tuple_ref_binding_diagnostics.pass.cpp b/libcxx/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR20855_tuple_ref_binding_diagnostics.pass.cpp
index d78de0e..0f6a673 100644
--- a/libcxx/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR20855_tuple_ref_binding_diagnostics.pass.cpp
+++ b/libcxx/test/std/utilities/tuple/tuple.tuple/tuple.cnstr/PR20855_tuple_ref_binding_diagnostics.pass.cpp
@@ -16,17 +16,6 @@
#include <tuple>
#include <string>
#include <cassert>
-#include "test_macros.h"
-
-#if TEST_HAS_BUILTIN(__reference_constructs_from_temporary)
-# define ASSERT_REFERENCE_BINDS_TEMPORARY(...) static_assert(__reference_constructs_from_temporary(__VA_ARGS__), "")
-# define ASSERT_NOT_REFERENCE_BINDS_TEMPORARY(...) \
- static_assert(!__reference_constructs_from_temporary(__VA_ARGS__), "")
-#else
-// TODO(LLVM 22): Remove this as all support compilers should have __reference_constructs_from_temporary implemented.
-# define ASSERT_REFERENCE_BINDS_TEMPORARY(...) static_assert(__reference_binds_to_temporary(__VA_ARGS__), "")
-# define ASSERT_NOT_REFERENCE_BINDS_TEMPORARY(...) static_assert(!__reference_binds_to_temporary(__VA_ARGS__), "")
-#endif
template <class Tp>
struct ConvertsTo {
@@ -42,17 +31,6 @@ struct ConvertsTo {
struct Base {};
struct Derived : Base {};
-
-static_assert(std::is_same<decltype("abc"), decltype(("abc"))>::value, "");
-ASSERT_REFERENCE_BINDS_TEMPORARY(std::string const&, decltype("abc"));
-ASSERT_REFERENCE_BINDS_TEMPORARY(std::string const&, decltype(("abc")));
-ASSERT_REFERENCE_BINDS_TEMPORARY(std::string const&, const char*&&);
-
-ASSERT_NOT_REFERENCE_BINDS_TEMPORARY(int&, const ConvertsTo<int&>&);
-ASSERT_NOT_REFERENCE_BINDS_TEMPORARY(const int&, ConvertsTo<int&>&);
-ASSERT_NOT_REFERENCE_BINDS_TEMPORARY(Base&, Derived&);
-
-
static_assert(std::is_constructible<int&, std::reference_wrapper<int>>::value, "");
static_assert(std::is_constructible<int const&, std::reference_wrapper<int>>::value, "");
diff --git a/libcxx/test/std/utilities/variant/variant.visit.member/robust_against_adl.pass.cpp b/libcxx/test/std/utilities/variant/variant.visit.member/robust_against_adl.pass.cpp
index 7be7c7f..38cf34a 100644
--- a/libcxx/test/std/utilities/variant/variant.visit.member/robust_against_adl.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.visit.member/robust_against_adl.pass.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
-// XFAIL: apple-clang
// <variant>
diff --git a/libcxx/test/std/utilities/variant/variant.visit.member/visit.pass.cpp b/libcxx/test/std/utilities/variant/variant.visit.member/visit.pass.cpp
index f68112d..aeb1297 100644
--- a/libcxx/test/std/utilities/variant/variant.visit.member/visit.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.visit.member/visit.pass.cpp
@@ -7,8 +7,6 @@
//===----------------------------------------------------------------------===//
// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
-// The tested functionality needs deducing this.
-// XFAIL: apple-clang
// <variant>
diff --git a/libcxx/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp b/libcxx/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp
index 90320ae..7ca0590 100644
--- a/libcxx/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp
+++ b/libcxx/test/std/utilities/variant/variant.visit.member/visit_return_type.pass.cpp
@@ -7,8 +7,6 @@
//===----------------------------------------------------------------------===//
// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
-// The tested functionality needs deducing this.
-// XFAIL: apple-clang
// <variant>
diff --git a/libcxx/test/support/locale_helpers.h b/libcxx/test/support/locale_helpers.h
index 946c2fe..3cec739 100644
--- a/libcxx/test/support/locale_helpers.h
+++ b/libcxx/test/support/locale_helpers.h
@@ -73,6 +73,12 @@ MultiStringType currency_symbol_ru_RU() {
return MKSTR("\u20BD"); // U+20BD RUBLE SIGN
#elif defined(_WIN32) || defined(__FreeBSD__) || defined(_AIX)
return MKSTR("\u20BD"); // U+20BD RUBLE SIGN
+#elif defined(__APPLE__)
+ if (__builtin_available(macOS 15.4, *)) {
+ return MKSTR("\u20BD"); // U+20BD RUBLE SIGN
+ } else {
+ return MKSTR("\u0440\u0443\u0431.");
+ }
#else
return MKSTR("\u0440\u0443\u0431.");
#endif
@@ -81,6 +87,12 @@ MultiStringType currency_symbol_ru_RU() {
MultiStringType currency_symbol_zh_CN() {
#if defined(_WIN32)
return MKSTR("\u00A5"); // U+00A5 YEN SIGN
+#elif defined(__APPLE__)
+ if (__builtin_available(macOS 15.4, *)) {
+ return MKSTR("\u00A5"); // U+00A5 YEN SIGN
+ } else {
+ return MKSTR("\uFFE5"); // U+FFE5 FULLWIDTH YEN SIGN
+ }
#else
return MKSTR("\uFFE5"); // U+FFE5 FULLWIDTH YEN SIGN
#endif
diff --git a/libcxx/test/support/test_basic_format_arg.h b/libcxx/test/support/test_basic_format_arg.h
index f51f6e9..99cd558 100644
--- a/libcxx/test/support/test_basic_format_arg.h
+++ b/libcxx/test/support/test_basic_format_arg.h
@@ -21,7 +21,7 @@ bool test_basic_format_arg(std::basic_format_arg<Context> arg, T expected) {
else
return false;
};
-#if TEST_STD_VER >= 26 && defined(TEST_HAS_EXPLICIT_THIS_PARAMETER)
+#if TEST_STD_VER >= 26
return arg.visit(std::move(visitor));
#else
return std::visit_format_arg(std::move(visitor), arg);
diff --git a/libcxx/test/support/test_macros.h b/libcxx/test/support/test_macros.h
index c4e1600..8d88d6f 100644
--- a/libcxx/test/support/test_macros.h
+++ b/libcxx/test/support/test_macros.h
@@ -531,13 +531,6 @@ inline Tp const& DoNotOptimize(Tp const& value) {
# define TEST_IF_AIX(arg_true, arg_false) arg_false
#endif
-// Clang-18 has support for deducing this, but it does not set the FTM.
-#ifdef _LIBCPP_USE_FROZEN_CXX03_HEADERS
-// This is a C++20 featue, so we don't care whether the compiler could support it
-#elif defined(_LIBCPP_VERSION) && _LIBCPP_HAS_EXPLICIT_THIS_PARAMETER
-# define TEST_HAS_EXPLICIT_THIS_PARAMETER
-#endif
-
// Placement `operator new`/`operator new[]` are not yet constexpr in C++26
// when using MS ABI, because they are from <vcruntime_new.h>.
#if defined(__cpp_lib_constexpr_new) && __cpp_lib_constexpr_new >= 202406L
diff --git a/libcxxabi/test/uncaught_exception.pass.cpp b/libcxxabi/test/uncaught_exception.pass.cpp
index 8e8468c..e977320 100644
--- a/libcxxabi/test/uncaught_exception.pass.cpp
+++ b/libcxxabi/test/uncaught_exception.pass.cpp
@@ -6,9 +6,6 @@
//
//===----------------------------------------------------------------------===//
-// TODO(mordante) Investigate
-// UNSUPPORTED: apple-clang
-
// UNSUPPORTED: no-exceptions
// This tests that libc++abi still provides __cxa_uncaught_exception() for
@@ -18,7 +15,8 @@
// to undefined symbols when linking against a libc++ that re-exports the symbols,
// but running against a libc++ that doesn't. Fortunately, usage of __cxa_uncaught_exception()
// in the wild seems to be close to non-existent.
-// XFAIL: using-built-library-before-llvm-19
+// TODO: Remove && !darwin once availability markup for LLVM 19 on macOS has been added
+// XFAIL: using-built-library-before-llvm-19 && !darwin
#include <cxxabi.h>
#include <cassert>
diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp
index bbf4b29..a4150eb 100644
--- a/lld/ELF/SyntheticSections.cpp
+++ b/lld/ELF/SyntheticSections.cpp
@@ -2749,14 +2749,13 @@ RelroPaddingSection::RelroPaddingSection(Ctx &ctx)
: SyntheticSection(ctx, ".relro_padding", SHT_NOBITS, SHF_ALLOC | SHF_WRITE,
1) {}
-RandomizePaddingSection::RandomizePaddingSection(Ctx &ctx, uint64_t size,
- OutputSection *parent)
- : SyntheticSection(ctx, ".randomize_padding", SHT_PROGBITS, SHF_ALLOC, 1),
+PaddingSection::PaddingSection(Ctx &ctx, uint64_t size, OutputSection *parent)
+ : SyntheticSection(ctx, ".padding", SHT_PROGBITS, SHF_ALLOC, 1),
size(size) {
this->parent = parent;
}
-void RandomizePaddingSection::writeTo(uint8_t *buf) {
+void PaddingSection::writeTo(uint8_t *buf) {
std::array<uint8_t, 4> filler = getParent()->getFiller(ctx);
uint8_t *end = buf + size;
for (; buf + 4 <= end; buf += 4)
diff --git a/lld/ELF/SyntheticSections.h b/lld/ELF/SyntheticSections.h
index ac3ec63..38e6811 100644
--- a/lld/ELF/SyntheticSections.h
+++ b/lld/ELF/SyntheticSections.h
@@ -779,11 +779,11 @@ public:
void writeTo(uint8_t *buf) override {}
};
-class RandomizePaddingSection final : public SyntheticSection {
+class PaddingSection final : public SyntheticSection {
uint64_t size;
public:
- RandomizePaddingSection(Ctx &ctx, uint64_t size, OutputSection *parent);
+ PaddingSection(Ctx &ctx, uint64_t size, OutputSection *parent);
size_t getSize() const override { return size; }
void writeTo(uint8_t *buf) override;
};
diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index 4fa8039..083b4fb 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -1495,15 +1495,14 @@ static void randomizeSectionPadding(Ctx &ctx) {
if (auto *isd = dyn_cast<InputSectionDescription>(bc)) {
SmallVector<InputSection *, 0> tmp;
if (os->ptLoad != curPtLoad) {
- tmp.push_back(make<RandomizePaddingSection>(
- ctx, g() % ctx.arg.maxPageSize, os));
+ tmp.push_back(
+ make<PaddingSection>(ctx, g() % ctx.arg.maxPageSize, os));
curPtLoad = os->ptLoad;
}
for (InputSection *isec : isd->sections) {
// Probability of inserting padding is 1 in 16.
if (g() % 16 == 0)
- tmp.push_back(
- make<RandomizePaddingSection>(ctx, isec->addralign, os));
+ tmp.push_back(make<PaddingSection>(ctx, isec->addralign, os));
tmp.push_back(isec);
}
isd->sections = std::move(tmp);
diff --git a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py
index fd07324..405e91f 100644
--- a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py
+++ b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/lldbdap_testcase.py
@@ -15,6 +15,8 @@ import base64
# DAP tests as a whole have been flakey on the Windows on Arm bot. See:
# https://github.com/llvm/llvm-project/issues/137660
@skipIf(oslist=["windows"], archs=["aarch64"])
+# The Arm Linux bot needs stable resources before it can run these tests reliably.
+@skipIf(oslist=["linux"], archs=["arm$"])
class DAPTestCaseBase(TestBase):
# set timeout based on whether ASAN was enabled or not. Increase
# timeout by a factor of 10 if ASAN is enabled.
diff --git a/lldb/source/Host/windows/ProcessLauncherWindows.cpp b/lldb/source/Host/windows/ProcessLauncherWindows.cpp
index e1b4b7e..f5adada 100644
--- a/lldb/source/Host/windows/ProcessLauncherWindows.cpp
+++ b/lldb/source/Host/windows/ProcessLauncherWindows.cpp
@@ -16,7 +16,6 @@
#include "llvm/Support/Program.h"
#include <string>
-#include <unordered_set>
#include <vector>
using namespace lldb;
@@ -92,13 +91,13 @@ ProcessLauncherWindows::LaunchProcess(const ProcessLaunchInfo &launch_info,
startupinfo.hStdOutput =
stdout_handle ? stdout_handle : ::GetStdHandle(STD_OUTPUT_HANDLE);
- std::unordered_set<HANDLE> inherited_handles;
+ std::vector<HANDLE> inherited_handles;
if (startupinfo.hStdError)
- inherited_handles.insert(startupinfo.hStdError);
+ inherited_handles.push_back(startupinfo.hStdError);
if (startupinfo.hStdInput)
- inherited_handles.insert(startupinfo.hStdInput);
+ inherited_handles.push_back(startupinfo.hStdInput);
if (startupinfo.hStdOutput)
- inherited_handles.insert(startupinfo.hStdOutput);
+ inherited_handles.push_back(startupinfo.hStdOutput);
SIZE_T attributelist_size = 0;
InitializeProcThreadAttributeList(/*lpAttributeList=*/nullptr,
@@ -121,15 +120,13 @@ ProcessLauncherWindows::LaunchProcess(const ProcessLaunchInfo &launch_info,
const FileAction *act = launch_info.GetFileActionAtIndex(i);
if (act->GetAction() == FileAction::eFileActionDuplicate &&
act->GetFD() == act->GetActionArgument())
- inherited_handles.insert(reinterpret_cast<HANDLE>(act->GetFD()));
+ inherited_handles.push_back(reinterpret_cast<HANDLE>(act->GetFD()));
}
if (!inherited_handles.empty()) {
- std::vector<HANDLE> handles(inherited_handles.begin(),
- inherited_handles.end());
if (!UpdateProcThreadAttribute(
startupinfoex.lpAttributeList, /*dwFlags=*/0,
- PROC_THREAD_ATTRIBUTE_HANDLE_LIST, handles.data(),
- handles.size() * sizeof(HANDLE),
+ PROC_THREAD_ATTRIBUTE_HANDLE_LIST, inherited_handles.data(),
+ inherited_handles.size() * sizeof(HANDLE),
/*lpPreviousValue=*/nullptr, /*lpReturnSize=*/nullptr)) {
error = Status(::GetLastError(), eErrorTypeWin32);
return HostProcess();
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
index 36bc176..c049829 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
@@ -450,6 +450,10 @@ ParsedDWARFTypeAttributes::ParsedDWARFTypeAttributes(const DWARFDIE &die) {
byte_size = form_value.Unsigned();
break;
+ case DW_AT_bit_size:
+ data_bit_size = form_value.Unsigned();
+ break;
+
case DW_AT_alignment:
alignment = form_value.Unsigned();
break;
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h
index da58f4c..f5f7071 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h
@@ -574,6 +574,7 @@ struct ParsedDWARFTypeAttributes {
lldb_private::plugin::dwarf::DWARFFormValue type;
lldb::LanguageType class_language = lldb::eLanguageTypeUnknown;
std::optional<uint64_t> byte_size;
+ std::optional<uint64_t> data_bit_size;
std::optional<uint64_t> alignment;
size_t calling_convention = llvm::dwarf::DW_CC_normal;
uint32_t bit_stride = 0;
diff --git a/lldb/test/API/commands/register/register/aarch64_dynamic_regset/TestArm64DynamicRegsets.py b/lldb/test/API/commands/register/register/aarch64_dynamic_regset/TestArm64DynamicRegsets.py
index eb121ec..a985ebb 100644
--- a/lldb/test/API/commands/register/register/aarch64_dynamic_regset/TestArm64DynamicRegsets.py
+++ b/lldb/test/API/commands/register/register/aarch64_dynamic_regset/TestArm64DynamicRegsets.py
@@ -97,6 +97,9 @@ class RegisterCommandsTestCase(TestBase):
@skipIf(oslist=no_match(["linux"]))
def test_aarch64_dynamic_regset_config(self):
"""Test AArch64 Dynamic Register sets configuration."""
+ if not self.isAArch64SVE():
+ self.skipTest("SVE must be present")
+
register_sets = self.setup_register_config_test()
for registerSet in register_sets:
@@ -259,6 +262,8 @@ class RegisterCommandsTestCase(TestBase):
def test_aarch64_dynamic_regset_config_sme_write_za_to_enable(self):
"""Test that ZA and ZT0 (if present) shows as 0s when disabled and
can be enabled by writing to ZA."""
+ if not self.isAArch64SVE():
+ self.skipTest("SVE must be present.")
if not self.isAArch64SME():
self.skipTest("SME must be present.")
@@ -270,6 +275,8 @@ class RegisterCommandsTestCase(TestBase):
def test_aarch64_dynamic_regset_config_sme_write_zt0_to_enable(self):
"""Test that ZA and ZT0 (if present) shows as 0s when disabled and
can be enabled by writing to ZT0."""
+ if not self.isAArch64SVE():
+ self.skipTest("SVE must be present.")
if not self.isAArch64SME():
self.skipTest("SME must be present.")
if not self.isAArch64SME2():
diff --git a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/optional/TestDataFormatterLibcxxOptionalSimulator.py b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/optional/TestDataFormatterLibcxxOptionalSimulator.py
index 3fefe87..7463f88 100644
--- a/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/optional/TestDataFormatterLibcxxOptionalSimulator.py
+++ b/lldb/test/API/functionalities/data-formatter/data-formatter-stl/libcxx-simulators/optional/TestDataFormatterLibcxxOptionalSimulator.py
@@ -53,6 +53,8 @@ for r in range(2):
# causing this test to fail. This was reverted in newer version of clang
# with commit 52a9ba7ca.
@skipIf(compiler="clang", compiler_version=["=", "17"])
+ @skipIf(compiler="clang", compiler_version=["=", "18"])
+ @skipIf(compiler="clang", compiler_version=["=", "19"])
@functools.wraps(LibcxxOptionalDataFormatterSimulatorTestCase._run_test)
def test_method(self, defines=defines):
LibcxxOptionalDataFormatterSimulatorTestCase._run_test(self, defines)
diff --git a/lldb/test/API/lang/cpp/libcxx-internals-recognizer/TestLibcxxInternalsRecognizer.py b/lldb/test/API/lang/cpp/libcxx-internals-recognizer/TestLibcxxInternalsRecognizer.py
index d8a729b..2f942da 100644
--- a/lldb/test/API/lang/cpp/libcxx-internals-recognizer/TestLibcxxInternalsRecognizer.py
+++ b/lldb/test/API/lang/cpp/libcxx-internals-recognizer/TestLibcxxInternalsRecognizer.py
@@ -9,7 +9,7 @@ class LibCxxInternalsRecognizerTestCase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
@add_test_categories(["libc++"])
- @skipIf(compiler="clang", compiler_version=["<", "19.0"])
+ @skipIf(compiler="clang", compiler_version=["<=", "19.0"])
def test_frame_recognizer(self):
"""Test that implementation details of libc++ are hidden"""
self.build()
diff --git a/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py b/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py
index dc6bf38..09b1322 100644
--- a/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py
+++ b/lldb/test/API/tools/lldb-dap/launch/TestDAP_launch.py
@@ -642,6 +642,7 @@ class TestDAP_launch(lldbdap_testcase.DAPTestCaseBase):
@skipIfAsan
@skipIfWindows
@skipIf(oslist=["linux"], archs=no_match(["x86_64"]))
+ @skipIfBuildType(["debug"])
def test_stdio_redirection_and_console(self):
"""
Test stdio redirection and console.
diff --git a/lldb/unittests/SymbolFile/DWARF/DWARFASTParserClangTests.cpp b/lldb/unittests/SymbolFile/DWARF/DWARFASTParserClangTests.cpp
index 1abce69..064ed6d 100644
--- a/lldb/unittests/SymbolFile/DWARF/DWARFASTParserClangTests.cpp
+++ b/lldb/unittests/SymbolFile/DWARF/DWARFASTParserClangTests.cpp
@@ -1651,3 +1651,93 @@ DWARF:
EXPECT_EQ(param_die, ast_parser.GetObjectParameter(sub2, context_die));
}
}
+
+TEST_F(DWARFASTParserClangTests, TestTypeBitSize) {
+ // Tests that we correctly parse DW_AT_bit_size of a DW_AT_base_type.
+
+ const char *yamldata = R"(
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+ Machine: EM_AARCH64
+DWARF:
+ debug_str:
+ - _BitInt(2)
+ debug_abbrev:
+ - ID: 0
+ Table:
+ - Code: 0x1
+ Tag: DW_TAG_compile_unit
+ Children: DW_CHILDREN_yes
+ Attributes:
+ - Attribute: DW_AT_language
+ Form: DW_FORM_data2
+ - Code: 0x2
+ Tag: DW_TAG_base_type
+ Children: DW_CHILDREN_no
+ Attributes:
+ - Attribute: DW_AT_name
+ Form: DW_FORM_strp
+ - Attribute: DW_AT_encoding
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_byte_size
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_bit_size
+ Form: DW_FORM_data1
+
+ debug_info:
+ - Version: 5
+ UnitType: DW_UT_compile
+ AddrSize: 8
+ Entries:
+
+# DW_TAG_compile_unit
+# DW_AT_language [DW_FORM_data2] (DW_LANG_C_plus_plus)
+
+ - AbbrCode: 0x1
+ Values:
+ - Value: 0x04
+
+# DW_TAG_base_type
+# DW_AT_name [DW_FORM_strp] ('_BitInt(2)')
+
+ - AbbrCode: 0x2
+ Values:
+ - Value: 0x0
+ - Value: 0x05
+ - Value: 0x01
+ - Value: 0x02
+...
+)";
+
+ YAMLModuleTester t(yamldata);
+
+ DWARFUnit *unit = t.GetDwarfUnit();
+ ASSERT_NE(unit, nullptr);
+ const DWARFDebugInfoEntry *cu_entry = unit->DIE().GetDIE();
+ ASSERT_EQ(cu_entry->Tag(), DW_TAG_compile_unit);
+ ASSERT_EQ(unit->GetDWARFLanguageType(), DW_LANG_C_plus_plus);
+ DWARFDIE cu_die(unit, cu_entry);
+
+ auto holder = std::make_unique<clang_utils::TypeSystemClangHolder>("ast");
+ auto &ast_ctx = *holder->GetAST();
+ DWARFASTParserClangStub ast_parser(ast_ctx);
+
+ auto type_die = cu_die.GetFirstChild();
+ ASSERT_TRUE(type_die.IsValid());
+ ASSERT_EQ(type_die.Tag(), DW_TAG_base_type);
+
+ ParsedDWARFTypeAttributes attrs(type_die);
+ EXPECT_EQ(attrs.byte_size.value_or(0), 1U);
+ EXPECT_EQ(attrs.data_bit_size.value_or(0), 2U);
+
+ SymbolContext sc;
+ auto type_sp =
+ ast_parser.ParseTypeFromDWARF(sc, type_die, /*type_is_new_ptr=*/nullptr);
+ ASSERT_NE(type_sp, nullptr);
+
+ EXPECT_EQ(llvm::expectedToOptional(type_sp->GetByteSize(nullptr)).value_or(0),
+ 1U);
+}
diff --git a/llvm/docs/CommandGuide/llvm-cxxfilt.rst b/llvm/docs/CommandGuide/llvm-cxxfilt.rst
index 8c61ced..8e509ce 100644
--- a/llvm/docs/CommandGuide/llvm-cxxfilt.rst
+++ b/llvm/docs/CommandGuide/llvm-cxxfilt.rst
@@ -54,8 +54,7 @@ OPTIONS
.. option:: --no-strip-underscore, -n
- Do not strip a leading underscore. This is the default for all platforms
- except Mach-O based hosts.
+ Do not strip a leading underscore. This is the default for all platforms.
.. option:: --quote
@@ -64,7 +63,7 @@ OPTIONS
.. option:: --strip-underscore, -_
Strip a single leading underscore, if present, from each input name before
- demangling. On by default on Mach-O based platforms.
+ demangling.
.. option:: --types, -t
diff --git a/llvm/docs/DeveloperPolicy.rst b/llvm/docs/DeveloperPolicy.rst
index 45f2df2..9135406 100644
--- a/llvm/docs/DeveloperPolicy.rst
+++ b/llvm/docs/DeveloperPolicy.rst
@@ -1189,6 +1189,55 @@ Suggested disclaimer for the project README and the main project web page:
necessarily a reflection of the completeness or stability of the code, it
does indicate that the project is not yet endorsed as a component of LLVM.
+Adding or enabling a new LLVM pass
+----------------------------------
+
+The guidelines here are primarily targeted at the enablement of new major
+passes in the target-independent optimization pipeline. Small additions, or
+backend-specific passes, require a lesser degree of care. Before creating a new
+pass, consider whether the functionality can be integrated into an existing
+pass first. This is often both faster and more powerful.
+
+When adding a new pass, the goal should be to enable it as part of the default
+optimization pipeline as early as possible and then continue development
+incrementally. (This does not apply to passes that are only relevant for
+specific uses of LLVM, such as GC support passes.)
+
+The recommended workflow is:
+
+1. Implement a basic version of the pass and add it to the pass pipeline behind
+ a flag that is disabled by default. The initial version should focus on
+ handling simple cases correctly and efficiently.
+2. Enable the pass by default. Separating this step allows easily disabling the
+ pass if issues are encountered, without having to revert the entire
+ implementation.
+3. Incrementally extend the pass with new functionality. As the pass is already
+ enabled, it becomes easier to identify the specific change that has caused a
+ regression in correctness, optimization quality or compile-time.
+
+When enabling a pass, certain requirements must be met (in no particular order):
+
+ * **Maintenance:** The pass (and any analyses it depends on) must have at
+ least one maintainer.
+ * **Usefulness:** There should be evidence that the pass improves performance
+ (or whatever metric it optimizes for) on real-world workloads. Improvements
+ seen only on synthetic benchmarks may be insufficient.
+ * **Compile-Time:** The pass should not have a large impact on compile-time,
+ where the evaluation of what "large" means is up to reviewer discretion, and
+ may differ based on the value the pass provides. In any case, it is expected
+ that a concerted effort has been made to mitigate the compile-time impact,
+ both for the average case, and for pathological cases.
+ * **Correctness:** The pass should have no known correctness issues (except
+ global correctness issues that affect all of LLVM). If an old pass is being
+ enabled (rather than implementing a new one incrementally), additional due
+ diligence is required. The pass should be fully reviewed to ensure that it
+ still complies with current quality standards. Fuzzing with disabled
+ profitability checks may help gain additional confidence in the
+ implementation.
+
+If non-trivial issues are found in a newly enabled pass, it may be temporarily
+disabled again, until the issues have been resolved.
+
.. _copyright-license-patents:
Copyright, License, and Patents
diff --git a/llvm/docs/GettingInvolved.rst b/llvm/docs/GettingInvolved.rst
index 4b4b09a..039d616 100644
--- a/llvm/docs/GettingInvolved.rst
+++ b/llvm/docs/GettingInvolved.rst
@@ -223,6 +223,10 @@ what to add to your calendar invite.
- `ics <https://calendar.google.com/calendar/ical/c_673c6cd64474c0aff173bf8fa609559f93d654e0984d9d91d71abd32d28c0486%40group.calendar.google.com/public/basic.ics>`__
`gcal <https://calendar.google.com/calendar/embed?src=c_673c6cd64474c0aff173bf8fa609559f93d654e0984d9d91d71abd32d28c0486%40group.calendar.google.com&ctz=America%2FLos_Angeles>`__
-
+ * - GlobalISel
+ - Every 2nd Tuesday of the month
+ - `gcal <https://calendar.google.com/calendar/u/0?cid=ZDcyMjc0ZjZiZjNhMzFlYmE3NTNkMWM2MGM2NjM5ZWU3ZDE2MjM4MGFlZDc2ZjViY2UyYzMwNzVhZjk4MzQ4ZEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t>`__
+ - `Meeting details/agenda <https://docs.google.com/document/d/1Ry8O4-Tm5BFj9AMjr8qTQFU80z-ptiNQ62687NaIvLs/edit?usp=sharing>`__
For event owners, our Discord bot also supports sending automated announcements
@@ -254,10 +258,6 @@ the future.
- `ics <https://calendar.google.com/calendar/ical/c_1mincouiltpa24ac14of14lhi4%40group.calendar.google.com/public/basic.ics>`__
`gcal <https://calendar.google.com/calendar/embed?src=c_1mincouiltpa24ac14of14lhi4%40group.calendar.google.com>`__
- `Minutes/docs <https://docs.google.com/document/d/1-uEEZfmRdPThZlctOq9eXlmUaSSAAi8oKxhrPY_lpjk/edit#>`__
- * - GlobalISel
- - Every 2nd Tuesday of the month
- - `gcal <https://calendar.google.com/calendar/u/0?cid=ZDcyMjc0ZjZiZjNhMzFlYmE3NTNkMWM2MGM2NjM5ZWU3ZDE2MjM4MGFlZDc2ZjViY2UyYzMwNzVhZjk4MzQ4ZEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t>`__
- - `Meeting details/agenda <https://docs.google.com/document/d/1Ry8O4-Tm5BFj9AMjr8qTQFU80z-ptiNQ62687NaIvLs/edit?usp=sharing>`__
* - Vector Predication
- Every 2 weeks on Tuesdays, 3pm UTC
-
diff --git a/llvm/include/llvm/ADT/GenericCycleImpl.h b/llvm/include/llvm/ADT/GenericCycleImpl.h
index 4039078..00f85ca 100644
--- a/llvm/include/llvm/ADT/GenericCycleImpl.h
+++ b/llvm/include/llvm/ADT/GenericCycleImpl.h
@@ -561,6 +561,17 @@ auto GenericCycleInfo<ContextT>::getSmallestCommonCycle(CycleT *A,
return A;
}
+/// \brief Find the innermost cycle containing both given blocks.
+///
+/// \returns the innermost cycle containing both \p A and \p B
+/// or nullptr if there is no such cycle.
+template <typename ContextT>
+auto GenericCycleInfo<ContextT>::getSmallestCommonCycle(BlockT *A,
+ BlockT *B) const
+ -> CycleT * {
+ return getSmallestCommonCycle(getCycle(A), getCycle(B));
+}
+
/// \brief get the depth for the cycle which containing a given block.
///
/// \returns the depth for the innermost cycle containing \p Block or 0 if it is
diff --git a/llvm/include/llvm/ADT/GenericCycleInfo.h b/llvm/include/llvm/ADT/GenericCycleInfo.h
index b8b6e3e..c31bab3 100644
--- a/llvm/include/llvm/ADT/GenericCycleInfo.h
+++ b/llvm/include/llvm/ADT/GenericCycleInfo.h
@@ -298,6 +298,7 @@ public:
CycleT *getCycle(const BlockT *Block) const;
CycleT *getSmallestCommonCycle(CycleT *A, CycleT *B) const;
+ CycleT *getSmallestCommonCycle(BlockT *A, BlockT *B) const;
unsigned getCycleDepth(const BlockT *Block) const;
CycleT *getTopLevelParentCycle(BlockT *Block);
diff --git a/llvm/include/llvm/ADT/TypeSwitch.h b/llvm/include/llvm/ADT/TypeSwitch.h
index 5657303..50ca1d5 100644
--- a/llvm/include/llvm/ADT/TypeSwitch.h
+++ b/llvm/include/llvm/ADT/TypeSwitch.h
@@ -111,6 +111,7 @@ public:
return std::move(*result);
return defaultFn(this->value);
}
+
/// As a default, return the given value.
[[nodiscard]] ResultT Default(ResultT defaultResult) {
if (result)
@@ -118,6 +119,22 @@ public:
return defaultResult;
}
+ /// Default for pointer-like results types that accept `nullptr`.
+ template <typename ArgT = ResultT,
+ typename =
+ std::enable_if_t<std::is_constructible_v<ArgT, std::nullptr_t>>>
+ [[nodiscard]] ResultT Default(std::nullptr_t) {
+ return Default(ResultT(nullptr));
+ }
+
+ /// Default for optional results types that accept `std::nullopt`.
+ template <typename ArgT = ResultT,
+ typename =
+ std::enable_if_t<std::is_constructible_v<ArgT, std::nullopt_t>>>
+ [[nodiscard]] ResultT Default(std::nullopt_t) {
+ return Default(ResultT(std::nullopt));
+ }
+
/// Declare default as unreachable, making sure that all cases were handled.
[[nodiscard]] ResultT DefaultUnreachable(
const char *message = "Fell off the end of a type-switch") {
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h b/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h
index c69b6f7..8620726 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// Implements ExecutorProcessControl::MemoryAccess by making calls to
+// Implements the MemoryAccess interface by making calls to
// ExecutorProcessControl::callWrapperAsync.
//
// This simplifies the implementaton of new ExecutorProcessControl instances,
@@ -19,6 +19,7 @@
#define LLVM_EXECUTIONENGINE_ORC_EPCGENERICMEMORYACCESS_H
#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/MemoryAccess.h"
namespace llvm {
namespace orc {
diff --git a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
index 87b9520..d7f0e3a3d4 100644
--- a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
+++ b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
@@ -1167,6 +1167,14 @@ struct ThreadsT {
using EmptyTrait = std::true_type;
};
+// V6.0: [14.8] `threadset` clause
+template <typename T, typename I, typename E> //
+struct ThreadsetT {
+ ENUM(ThreadsetPolicy, Omp_Pool, Omp_Team);
+ using WrapperTrait = std::true_type;
+ ThreadsetPolicy v;
+};
+
// V5.2: [5.9.1] `to` clause
template <typename T, typename I, typename E> //
struct ToT {
@@ -1352,9 +1360,9 @@ using WrapperClausesT = std::variant<
ProcBindT<T, I, E>, ReverseOffloadT<T, I, E>, SafelenT<T, I, E>,
SelfMapsT<T, I, E>, SeverityT<T, I, E>, SharedT<T, I, E>, SimdlenT<T, I, E>,
SizesT<T, I, E>, PermutationT<T, I, E>, ThreadLimitT<T, I, E>,
- UnifiedAddressT<T, I, E>, UnifiedSharedMemoryT<T, I, E>, UniformT<T, I, E>,
- UpdateT<T, I, E>, UseDeviceAddrT<T, I, E>, UseDevicePtrT<T, I, E>,
- UsesAllocatorsT<T, I, E>>;
+ ThreadsetT<T, I, E>, UnifiedAddressT<T, I, E>,
+ UnifiedSharedMemoryT<T, I, E>, UniformT<T, I, E>, UpdateT<T, I, E>,
+ UseDeviceAddrT<T, I, E>, UseDevicePtrT<T, I, E>, UsesAllocatorsT<T, I, E>>;
template <typename T, typename I, typename E>
using UnionOfAllClausesT = typename type::Union< //
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td
index 61a1a05..208609f 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMP.td
+++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td
@@ -539,6 +539,10 @@ def OMPC_GroupPrivate : Clause<[Spelling<"groupprivate">]> {
def OMPC_Threads : Clause<[Spelling<"threads">]> {
let clangClass = "OMPThreadsClause";
}
+def OMPC_Threadset : Clause<[Spelling<"threadset">]> {
+ let clangClass = "OMPThreadsetClause";
+ let flangClass = "OmpThreadsetClause";
+}
def OMPC_To : Clause<[Spelling<"to">]> {
let clangClass = "OMPToClause";
let flangClass = "OmpToClause";
@@ -1254,6 +1258,7 @@ def OMP_Task : Directive<[Spelling<"task">]> {
VersionedClause<OMPC_Final>,
VersionedClause<OMPC_If>,
VersionedClause<OMPC_Priority>,
+ VersionedClause<OMPC_Threadset, 60>,
VersionedClause<OMPC_Replayable, 60>,
VersionedClause<OMPC_Transparent, 60>,
];
@@ -1297,6 +1302,7 @@ def OMP_TaskLoop : Directive<[Spelling<"taskloop">]> {
VersionedClause<OMPC_Final>,
VersionedClause<OMPC_If>,
VersionedClause<OMPC_Priority>,
+ VersionedClause<OMPC_Threadset, 60>,
VersionedClause<OMPC_Replayable, 60>,
VersionedClause<OMPC_Transparent, 60>,
];
diff --git a/llvm/include/llvm/Support/GenericLoopInfo.h b/llvm/include/llvm/Support/GenericLoopInfo.h
index 2775a87..b6bb360 100644
--- a/llvm/include/llvm/Support/GenericLoopInfo.h
+++ b/llvm/include/llvm/Support/GenericLoopInfo.h
@@ -615,6 +615,17 @@ public:
return L ? L->getLoopDepth() : 0;
}
+ /// \brief Find the innermost loop containing both given loops.
+ ///
+ /// \returns the innermost loop containing both \p A and \p B
+ /// or nullptr if there is no such loop.
+ LoopT *getSmallestCommonLoop(LoopT *A, LoopT *B) const;
+ /// \brief Find the innermost loop containing both given blocks.
+ ///
+ /// \returns the innermost loop containing both \p A and \p B
+ /// or nullptr if there is no such loop.
+ LoopT *getSmallestCommonLoop(BlockT *A, BlockT *B) const;
+
// True if the block is a loop header node
bool isLoopHeader(const BlockT *BB) const {
const LoopT *L = getLoopFor(BB);
diff --git a/llvm/include/llvm/Support/GenericLoopInfoImpl.h b/llvm/include/llvm/Support/GenericLoopInfoImpl.h
index 6fc508b..5416780 100644
--- a/llvm/include/llvm/Support/GenericLoopInfoImpl.h
+++ b/llvm/include/llvm/Support/GenericLoopInfoImpl.h
@@ -355,7 +355,7 @@ void LoopBase<BlockT, LoopT>::verifyLoop() const {
if (BB == getHeader()) {
assert(!OutsideLoopPreds.empty() && "Loop is unreachable!");
} else if (!OutsideLoopPreds.empty()) {
- // A non-header loop shouldn't be reachable from outside the loop,
+ // A non-header loop block shouldn't be reachable from outside the loop,
// though it is permitted if the predecessor is not itself actually
// reachable.
BlockT *EntryBB = &BB->getParent()->front();
@@ -645,6 +645,36 @@ LoopInfoBase<BlockT, LoopT>::getLoopsInReverseSiblingPreorder() const {
return PreOrderLoops;
}
+template <class BlockT, class LoopT>
+LoopT *LoopInfoBase<BlockT, LoopT>::getSmallestCommonLoop(LoopT *A,
+ LoopT *B) const {
+ if (!A || !B)
+ return nullptr;
+
+ // If lops A and B have different depth replace them with parent loop
+ // until they have the same depth.
+ while (A->getLoopDepth() > B->getLoopDepth())
+ A = A->getParentLoop();
+ while (B->getLoopDepth() > A->getLoopDepth())
+ B = B->getParentLoop();
+
+ // Loops A and B are at same depth but may be disjoint, replace them with
+ // parent loops until we find loop that contains both or we run out of
+ // parent loops.
+ while (A != B) {
+ A = A->getParentLoop();
+ B = B->getParentLoop();
+ }
+
+ return A;
+}
+
+template <class BlockT, class LoopT>
+LoopT *LoopInfoBase<BlockT, LoopT>::getSmallestCommonLoop(BlockT *A,
+ BlockT *B) const {
+ return getSmallestCommonLoop(getLoopFor(A), getLoopFor(B));
+}
+
// Debugging
template <class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
diff --git a/llvm/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h b/llvm/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h
index ced446d..9dcd4b5 100644
--- a/llvm/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h
+++ b/llvm/include/llvm/Transforms/Instrumentation/PGOInstrumentation.h
@@ -26,8 +26,6 @@
namespace llvm {
-LLVM_ABI extern cl::opt<bool> DebugInfoCorrelate;
-
class Function;
class Instruction;
class Module;
diff --git a/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h b/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
index e677cbf..49885b7 100644
--- a/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CycleInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Printable.h"
@@ -262,6 +263,34 @@ LLVM_ABI BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To,
MemorySSAUpdater *MSSAU = nullptr,
const Twine &BBName = "");
+/// \brief Create a new intermediate target block for a callbr edge.
+///
+/// Create a new basic block between a callbr instruction and one of its
+/// successors. The new block replaces the original successor in the callbr
+/// instruction and unconditionally branches to the original successor. This
+/// is useful for normalizing control flow, e.g., when transforming
+/// irreducible loops.
+///
+/// \param CallBrBlock block containing the callbr instruction
+/// \param Succ original successor block
+/// \param SuccIdx index of the original successor in the callbr
+/// instruction
+/// \param DTU optional \p DomTreeUpdater for updating the
+/// dominator tree
+/// \param CI optional \p CycleInfo for updating cycle membership
+/// \param LI optional \p LoopInfo for updating loop membership
+/// \param UpdatedLI optional output flag indicating if \p LoopInfo has
+/// been updated
+///
+/// \returns newly created intermediate target block
+///
+/// \note This function updates PHI nodes, dominator tree, loop info, and
+/// cycle info as needed.
+LLVM_ABI BasicBlock *
+SplitCallBrEdge(BasicBlock *CallBrBlock, BasicBlock *Succ, unsigned SuccIdx,
+ DomTreeUpdater *DTU = nullptr, CycleInfo *CI = nullptr,
+ LoopInfo *LI = nullptr, bool *UpdatedLI = nullptr);
+
/// Sets the unwind edge of an instruction to a particular successor.
LLVM_ABI void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ);
diff --git a/llvm/include/llvm/Transforms/Utils/ControlFlowUtils.h b/llvm/include/llvm/Transforms/Utils/ControlFlowUtils.h
index 810fef2..17cde82 100644
--- a/llvm/include/llvm/Transforms/Utils/ControlFlowUtils.h
+++ b/llvm/include/llvm/Transforms/Utils/ControlFlowUtils.h
@@ -15,10 +15,13 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/CycleInfo.h"
namespace llvm {
class BasicBlock;
+class CallBrInst;
+class LoopInfo;
class DomTreeUpdater;
/// Given a set of branch descriptors [BB, Succ0, Succ1], create a "hub" such
@@ -104,7 +107,8 @@ struct ControlFlowHub {
: BB(BB), Succ0(Succ0), Succ1(Succ1) {}
};
- void addBranch(BasicBlock *BB, BasicBlock *Succ0, BasicBlock *Succ1) {
+ void addBranch(BasicBlock *BB, BasicBlock *Succ0,
+ BasicBlock *Succ1 = nullptr) {
assert(BB);
assert(Succ0 || Succ1);
Branches.emplace_back(BB, Succ0, Succ1);
diff --git a/llvm/lib/Analysis/DependenceAnalysis.cpp b/llvm/lib/Analysis/DependenceAnalysis.cpp
index 84ee8c0..11d8294 100644
--- a/llvm/lib/Analysis/DependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/DependenceAnalysis.cpp
@@ -2854,14 +2854,18 @@ bool DependenceInfo::testMIV(const SCEV *Src, const SCEV *Dst,
banerjeeMIVtest(Src, Dst, Loops, Result);
}
-// Given a product, e.g., 10*X*Y, returns the first constant operand,
-// in this case 10. If there is no constant part, returns std::nullopt.
-static std::optional<APInt> getConstantPart(const SCEV *Expr) {
+/// Given a SCEVMulExpr, returns its first operand if its first operand is a
+/// constant and the product doesn't overflow in a signed sense. Otherwise,
+/// returns std::nullopt. For example, given (10 * X * Y)<nsw>, it returns 10.
+/// Notably, if it doesn't have nsw, the multiplication may overflow, and if
+/// so, it may not a multiple of 10.
+static std::optional<APInt> getConstanCoefficient(const SCEV *Expr) {
if (const auto *Constant = dyn_cast<SCEVConstant>(Expr))
return Constant->getAPInt();
if (const auto *Product = dyn_cast<SCEVMulExpr>(Expr))
if (const auto *Constant = dyn_cast<SCEVConstant>(Product->getOperand(0)))
- return Constant->getAPInt();
+ if (Product->hasNoSignedWrap())
+ return Constant->getAPInt();
return std::nullopt;
}
@@ -2887,7 +2891,7 @@ bool DependenceInfo::accumulateCoefficientsGCD(const SCEV *Expr,
if (AddRec->getLoop() == CurLoop) {
CurLoopCoeff = Step;
} else {
- std::optional<APInt> ConstCoeff = getConstantPart(Step);
+ std::optional<APInt> ConstCoeff = getConstanCoefficient(Step);
// If the coefficient is the product of a constant and other stuff, we can
// use the constant in the GCD computation.
@@ -2940,7 +2944,7 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
const SCEV *Coeff = AddRec->getStepRecurrence(*SE);
// If the coefficient is the product of a constant and other stuff,
// we can use the constant in the GCD computation.
- std::optional<APInt> ConstCoeff = getConstantPart(Coeff);
+ std::optional<APInt> ConstCoeff = getConstanCoefficient(Coeff);
if (!ConstCoeff)
return false;
RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff->abs());
@@ -2958,7 +2962,7 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
const SCEV *Coeff = AddRec->getStepRecurrence(*SE);
// If the coefficient is the product of a constant and other stuff,
// we can use the constant in the GCD computation.
- std::optional<APInt> ConstCoeff = getConstantPart(Coeff);
+ std::optional<APInt> ConstCoeff = getConstanCoefficient(Coeff);
if (!ConstCoeff)
return false;
RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff->abs());
@@ -2979,7 +2983,7 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
} else if (const SCEVMulExpr *Product = dyn_cast<SCEVMulExpr>(Operand)) {
// Search for constant operand to participate in GCD;
// If none found; return false.
- std::optional<APInt> ConstOp = getConstantPart(Product);
+ std::optional<APInt> ConstOp = getConstanCoefficient(Product);
if (!ConstOp)
return false;
ExtraGCD = APIntOps::GreatestCommonDivisor(ExtraGCD, ConstOp->abs());
@@ -3032,7 +3036,7 @@ bool DependenceInfo::gcdMIVtest(const SCEV *Src, const SCEV *Dst,
Delta = SE->getMinusSCEV(SrcCoeff, DstCoeff);
// If the coefficient is the product of a constant and other stuff,
// we can use the constant in the GCD computation.
- std::optional<APInt> ConstCoeff = getConstantPart(Delta);
+ std::optional<APInt> ConstCoeff = getConstanCoefficient(Delta);
if (!ConstCoeff)
// The difference of the two coefficients might not be a product
// or constant, in which case we give up on this direction.
diff --git a/llvm/lib/Frontend/Driver/CodeGenOptions.cpp b/llvm/lib/Frontend/Driver/CodeGenOptions.cpp
index df88490..b546e81 100644
--- a/llvm/lib/Frontend/Driver/CodeGenOptions.cpp
+++ b/llvm/lib/Frontend/Driver/CodeGenOptions.cpp
@@ -12,7 +12,6 @@
#include "llvm/TargetParser/Triple.h"
namespace llvm {
-extern llvm::cl::opt<bool> DebugInfoCorrelate;
extern llvm::cl::opt<llvm::InstrProfCorrelator::ProfCorrelatorKind>
ProfileCorrelate;
} // namespace llvm
@@ -64,8 +63,7 @@ TargetLibraryInfoImpl *createTLII(const llvm::Triple &TargetTriple,
}
std::string getDefaultProfileGenName() {
- return llvm::DebugInfoCorrelate ||
- llvm::ProfileCorrelate != InstrProfCorrelator::NONE
+ return llvm::ProfileCorrelate != InstrProfCorrelator::NONE
? "default_%m.proflite"
: "default_%m.profraw";
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 9ce1224..aed325c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -221,12 +221,22 @@ bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
bool AMDGPUInstructionSelector::selectCOPY_SCC_VCC(MachineInstr &I) const {
const DebugLoc &DL = I.getDebugLoc();
MachineBasicBlock *BB = I.getParent();
+ Register VCCReg = I.getOperand(1).getReg();
+ MachineInstr *Cmp;
+
+ if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
+ unsigned CmpOpc =
+ STI.isWave64() ? AMDGPU::S_CMP_LG_U64 : AMDGPU::S_CMP_LG_U32;
+ Cmp = BuildMI(*BB, &I, DL, TII.get(CmpOpc)).addReg(VCCReg).addImm(0);
+ } else {
+ // For gfx7 and earlier, S_CMP_LG_U64 doesn't exist, so we use S_OR_B64
+ // which sets SCC as a side effect.
+ Register DeadDst = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
+ Cmp = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_OR_B64), DeadDst)
+ .addReg(VCCReg)
+ .addReg(VCCReg);
+ }
- unsigned CmpOpc =
- STI.isWave64() ? AMDGPU::S_CMP_LG_U64 : AMDGPU::S_CMP_LG_U32;
- MachineInstr *Cmp = BuildMI(*BB, &I, DL, TII.get(CmpOpc))
- .addReg(I.getOperand(1).getReg())
- .addImm(0);
if (!constrainSelectedInstRegOperands(*Cmp, TII, TRI, RBI))
return false;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
index 5407566..b84c30e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
@@ -500,6 +500,16 @@ void RegBankLegalizeHelper::lowerUnpackMinMax(MachineInstr &MI) {
MI.eraseFromParent();
}
+void RegBankLegalizeHelper::lowerUnpackAExt(MachineInstr &MI) {
+ auto [Op1Lo, Op1Hi] = unpackAExt(MI.getOperand(1).getReg());
+ auto [Op2Lo, Op2Hi] = unpackAExt(MI.getOperand(2).getReg());
+ auto ResLo = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Op1Lo, Op2Lo});
+ auto ResHi = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Op1Hi, Op2Hi});
+ B.buildBuildVectorTrunc(MI.getOperand(0).getReg(),
+ {ResLo.getReg(0), ResHi.getReg(0)});
+ MI.eraseFromParent();
+}
+
static bool isSignedBFE(MachineInstr &MI) {
if (GIntrinsic *GI = dyn_cast<GIntrinsic>(&MI))
return (GI->is(Intrinsic::amdgcn_sbfe));
@@ -804,6 +814,8 @@ void RegBankLegalizeHelper::lower(MachineInstr &MI,
}
break;
}
+ case UnpackAExt:
+ return lowerUnpackAExt(MI);
case WidenMMOToS32:
return widenMMOToS32(cast<GAnyLoad>(MI));
}
@@ -1120,7 +1132,8 @@ void RegBankLegalizeHelper::applyMappingDst(
assert(RB == SgprRB);
Register NewDst = MRI.createVirtualRegister(SgprRB_S32);
Op.setReg(NewDst);
- B.buildTrunc(Reg, NewDst);
+ if (!MRI.use_empty(Reg))
+ B.buildTrunc(Reg, NewDst);
break;
}
case InvalidMapping: {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h
index d937815..ad3ff1d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h
@@ -124,6 +124,7 @@ private:
void lowerSplitTo32Select(MachineInstr &MI);
void lowerSplitTo32SExtInReg(MachineInstr &MI);
void lowerUnpackMinMax(MachineInstr &MI);
+ void lowerUnpackAExt(MachineInstr &MI);
};
} // end namespace AMDGPU
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
index a67b12a..01abd35 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
@@ -470,7 +470,19 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST,
.Uni(S16, {{Sgpr32Trunc}, {Sgpr32AExt, Sgpr32AExt}})
.Div(S16, {{Vgpr16}, {Vgpr16, Vgpr16}})
.Uni(S32, {{Sgpr32}, {Sgpr32, Sgpr32}})
- .Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}});
+ .Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}})
+ .Uni(V2S16, {{SgprV2S16}, {SgprV2S16, SgprV2S16}, UnpackAExt})
+ .Div(V2S16, {{VgprV2S16}, {VgprV2S16, VgprV2S16}})
+ .Uni(S64, {{Sgpr64}, {Sgpr64, Sgpr64}})
+ .Div(S64, {{Vgpr64}, {Vgpr64, Vgpr64}});
+
+ addRulesForGOpcs({G_UADDO, G_USUBO}, Standard)
+ .Uni(S32, {{Sgpr32, Sgpr32Trunc}, {Sgpr32, Sgpr32}})
+ .Div(S32, {{Vgpr32, Vcc}, {Vgpr32, Vgpr32}});
+
+ addRulesForGOpcs({G_UADDE, G_USUBE}, Standard)
+ .Uni(S32, {{Sgpr32, Sgpr32Trunc}, {Sgpr32, Sgpr32, Sgpr32AExtBoolInReg}})
+ .Div(S32, {{Vgpr32, Vcc}, {Vgpr32, Vgpr32, Vcc}});
addRulesForGOpcs({G_MUL}, Standard).Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}});
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h
index 93e0efd..030bd75 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h
@@ -223,7 +223,8 @@ enum LoweringMethodID {
UniCstExt,
SplitLoad,
WidenLoad,
- WidenMMOToS32
+ WidenMMOToS32,
+ UnpackAExt
};
enum FastRulesTypes {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 75a94ac..b28c50e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1315,6 +1315,9 @@ void AMDGPUPassConfig::addIRPasses() {
isPassEnabled(EnableImageIntrinsicOptimizer))
addPass(createAMDGPUImageIntrinsicOptimizerPass(&TM));
+ if (EnableUniformIntrinsicCombine)
+ addPass(createAMDGPUUniformIntrinsicCombineLegacyPass());
+
// This can be disabled by passing ::Disable here or on the command line
// with --expand-variadics-override=disable.
addPass(createExpandVariadicsPass(ExpandVariadicsMode::Lowering));
@@ -2066,6 +2069,8 @@ void AMDGPUCodeGenPassBuilder::addIRPasses(AddIRPass &addPass) const {
if (isPassEnabled(EnableImageIntrinsicOptimizer))
addPass(AMDGPUImageIntrinsicOptimizerPass(TM));
+ if (EnableUniformIntrinsicCombine)
+ addPass(AMDGPUUniformIntrinsicCombinePass());
// This can be disabled by passing ::Disable here or on the command line
// with --expand-variadics-override=disable.
addPass(ExpandVariadicsPass(ExpandVariadicsMode::Lowering));
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index b34ab2a..8bb2808 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -7035,9 +7035,15 @@ static SDValue lowerBALLOTIntrinsic(const SITargetLowering &TLI, SDNode *N,
SDLoc SL(N);
if (Src.getOpcode() == ISD::SETCC) {
+ SDValue Op0 = Src.getOperand(0);
+ SDValue Op1 = Src.getOperand(1);
+ // Need to expand bfloat to float for comparison (setcc).
+ if (Op0.getValueType() == MVT::bf16) {
+ Op0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Op0);
+ Op1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Op1);
+ }
// (ballot (ISD::SETCC ...)) -> (AMDGPUISD::SETCC ...)
- return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src.getOperand(0),
- Src.getOperand(1), Src.getOperand(2));
+ return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Op0, Op1, Src.getOperand(2));
}
if (const ConstantSDNode *Arg = dyn_cast<ConstantSDNode>(Src)) {
// (ballot 0) -> 0
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index a4d3d62..6b06534 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -22109,6 +22109,11 @@ bool ARMTargetLowering::isComplexDeinterleavingOperationSupported(
ScalarTy->isIntegerTy(32));
}
+ArrayRef<MCPhysReg> ARMTargetLowering::getRoundingControlRegisters() const {
+ static const MCPhysReg RCRegs[] = {ARM::FPSCR_RM};
+ return RCRegs;
+}
+
Value *ARMTargetLowering::createComplexDeinterleavingIR(
IRBuilderBase &B, ComplexDeinterleavingOperation OperationType,
ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 357d2c5..bf3438b 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -1009,6 +1009,8 @@ class VectorType;
bool isUnsupportedFloatingType(EVT VT) const;
+ ArrayRef<MCPhysReg> getRoundingControlRegisters() const override;
+
SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
SDValue ARMcc, SDValue Flags, SelectionDAG &DAG) const;
SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
index b9cdd6a..ce2de75 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -544,7 +544,7 @@ int HexagonSubtarget::updateLatency(MachineInstr &SrcInst,
if (!hasV60Ops())
return Latency;
- auto &QII = static_cast<const HexagonInstrInfo &>(*getInstrInfo());
+ const HexagonInstrInfo &QII = *getInstrInfo();
// BSB scheduling.
if (QII.isHVXVec(SrcInst) || useBSBScheduling())
Latency = (Latency + 1) >> 1;
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 9a6afa1..b25a054 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -3995,6 +3995,7 @@ bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits,
case RISCV::CTZW:
case RISCV::CPOPW:
case RISCV::SLLI_UW:
+ case RISCV::ABSW:
case RISCV::FMV_W_X:
case RISCV::FCVT_H_W:
case RISCV::FCVT_H_W_INX:
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 1c930ac..56881f7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -433,6 +433,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (Subtarget.hasStdExtP() ||
(Subtarget.hasVendorXCValu() && !Subtarget.is64Bit())) {
setOperationAction(ISD::ABS, XLenVT, Legal);
+ if (Subtarget.is64Bit())
+ setOperationAction(ISD::ABS, MVT::i32, Custom);
} else if (Subtarget.hasShortForwardBranchOpt()) {
// We can use PseudoCCSUB to implement ABS.
setOperationAction(ISD::ABS, XLenVT, Legal);
@@ -14816,8 +14818,16 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
"Unexpected custom legalisation");
+ if (Subtarget.hasStdExtP()) {
+ SDValue Src =
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
+ SDValue Abs = DAG.getNode(RISCVISD::ABSW, DL, MVT::i64, Src);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Abs));
+ return;
+ }
+
if (Subtarget.hasStdExtZbb()) {
- // Emit a special ABSW node that will be expanded to NEGW+MAX at isel.
+ // Emit a special node that will be expanded to NEGW+MAX at isel.
// This allows us to remember that the result is sign extended. Expanding
// to NEGW+MAX here requires a Freeze which breaks ComputeNumSignBits.
SDValue Src = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64,
@@ -20290,6 +20300,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
break;
}
+ case RISCVISD::ABSW:
case RISCVISD::CLZW:
case RISCVISD::CTZW: {
// Only the lower 32 bits of the first operand are read
@@ -21862,6 +21873,7 @@ unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
case RISCVISD::REMUW:
case RISCVISD::ROLW:
case RISCVISD::RORW:
+ case RISCVISD::ABSW:
case RISCVISD::FCVT_W_RV64:
case RISCVISD::FCVT_WU_RV64:
case RISCVISD::STRICT_FCVT_W_RV64:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
index cc085bb..4cbbba3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
@@ -1461,5 +1461,10 @@ let Predicates = [HasStdExtP, IsRV32] in {
// Codegen patterns
//===----------------------------------------------------------------------===//
+def riscv_absw : RVSDNode<"ABSW", SDTIntUnaryOp>;
+
let Predicates = [HasStdExtP] in
def : PatGpr<abs, ABS>;
+
+let Predicates = [HasStdExtP, IsRV64] in
+def : PatGpr<riscv_absw, ABSW>;
diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
index d08115b..ea98cdb 100644
--- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
+++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
@@ -172,6 +172,7 @@ static bool hasAllNBitUsers(const MachineInstr &OrigMI,
case RISCV::CTZW:
case RISCV::CPOPW:
case RISCV::SLLI_UW:
+ case RISCV::ABSW:
case RISCV::FMV_W_X:
case RISCV::FCVT_H_W:
case RISCV::FCVT_H_W_INX:
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 624cff2..49beada 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -48778,10 +48778,9 @@ static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
SDValue BC0 = peekThroughBitcasts(Op0);
if (BC0.getOpcode() == X86ISD::PCMPEQ &&
ISD::isBuildVectorAllZeros(BC0.getOperand(1).getNode())) {
- SDLoc DL(EFLAGS);
CC = (CC == X86::COND_B ? X86::COND_E : X86::COND_NE);
- SDValue X = DAG.getBitcast(OpVT, BC0.getOperand(0));
- return DAG.getNode(EFLAGS.getOpcode(), DL, VT, X, X);
+ SDValue X = DAG.getBitcast(OpVT, DAG.getFreeze(BC0.getOperand(0)));
+ return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, X, X);
}
}
}
@@ -48837,7 +48836,7 @@ static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
MVT FloatSVT = MVT::getFloatingPointVT(EltBits);
MVT FloatVT =
MVT::getVectorVT(FloatSVT, OpVT.getSizeInBits() / EltBits);
- Res = DAG.getBitcast(FloatVT, Res);
+ Res = DAG.getBitcast(FloatVT, DAG.getFreeze(Res));
return DAG.getNode(X86ISD::TESTP, SDLoc(EFLAGS), VT, Res, Res);
} else if (EltBits == 16) {
MVT MovmskVT = BCVT.is128BitVector() ? MVT::v16i8 : MVT::v32i8;
@@ -48856,8 +48855,30 @@ static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
}
// TESTZ(X,-1) == TESTZ(X,X)
- if (ISD::isBuildVectorAllOnes(Op1.getNode()))
+ if (ISD::isBuildVectorAllOnes(Op1.getNode())) {
+ Op0 = DAG.getFreeze(Op0);
return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op0, Op0);
+ }
+
+ // Attempt to convert PTESTZ(X,SIGNMASK) -> VTESTPD/PSZ(X,X) on AVX targets.
+ if (EFLAGS.getOpcode() == X86ISD::PTEST && Subtarget.hasAVX()) {
+ KnownBits KnownOp1 = DAG.computeKnownBits(Op1);
+ assert(KnownOp1.getBitWidth() == 64 &&
+ "Illegal PTEST vector element width");
+ if (KnownOp1.isConstant()) {
+ const APInt &Mask = KnownOp1.getConstant();
+ if (Mask.isSignMask()) {
+ MVT FpVT = MVT::getVectorVT(MVT::f64, OpVT.getSizeInBits() / 64);
+ Op0 = DAG.getBitcast(FpVT, DAG.getFreeze(Op0));
+ return DAG.getNode(X86ISD::TESTP, SDLoc(EFLAGS), VT, Op0, Op0);
+ }
+ if (Mask.isSplat(32) && Mask.trunc(32).isSignMask()) {
+ MVT FpVT = MVT::getVectorVT(MVT::f32, OpVT.getSizeInBits() / 32);
+ Op0 = DAG.getBitcast(FpVT, DAG.getFreeze(Op0));
+ return DAG.getNode(X86ISD::TESTP, SDLoc(EFLAGS), VT, Op0, Op0);
+ }
+ }
+ }
// TESTZ(OR(LO(X),HI(X)),OR(LO(Y),HI(Y))) -> TESTZ(X,Y)
// TODO: Add COND_NE handling?
@@ -53480,6 +53501,80 @@ static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+// Look for a RMW operation that only touches one bit of a larger than legal
+// type and fold it to a BTC/BTR/BTS pattern acting on a single i32 sub value.
+static SDValue narrowBitOpRMW(StoreSDNode *St, const SDLoc &DL,
+ SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ using namespace SDPatternMatch;
+
+ // Only handle normal stores and its chain was a matching normal load.
+ auto *Ld = dyn_cast<LoadSDNode>(St->getChain());
+ if (!ISD::isNormalStore(St) || !St->isSimple() || !Ld ||
+ !ISD::isNormalLoad(Ld) || !Ld->isSimple() ||
+ Ld->getBasePtr() != St->getBasePtr() ||
+ Ld->getOffset() != St->getOffset())
+ return SDValue();
+
+ SDValue LoadVal(Ld, 0);
+ SDValue StoredVal = St->getValue();
+ EVT VT = StoredVal.getValueType();
+
+ // Only narrow larger than legal scalar integers.
+ if (!VT.isScalarInteger() ||
+ VT.getSizeInBits() <= (Subtarget.is64Bit() ? 64 : 32))
+ return SDValue();
+
+ // BTR: X & ~(1 << ShAmt)
+ // BTS: X | (1 << ShAmt)
+ // BTC: X ^ (1 << ShAmt)
+ SDValue ShAmt;
+ if (!StoredVal.hasOneUse() ||
+ !(sd_match(StoredVal, m_And(m_Specific(LoadVal),
+ m_Not(m_Shl(m_One(), m_Value(ShAmt))))) ||
+ sd_match(StoredVal,
+ m_Or(m_Specific(LoadVal), m_Shl(m_One(), m_Value(ShAmt)))) ||
+ sd_match(StoredVal,
+ m_Xor(m_Specific(LoadVal), m_Shl(m_One(), m_Value(ShAmt))))))
+ return SDValue();
+
+ // Ensure the shift amount is in bounds.
+ KnownBits KnownAmt = DAG.computeKnownBits(ShAmt);
+ if (KnownAmt.getMaxValue().uge(VT.getSizeInBits()))
+ return SDValue();
+
+ // Split the shift into an alignment shift that moves the active i32 block to
+ // the bottom bits for truncation and a modulo shift that can act on the i32.
+ EVT AmtVT = ShAmt.getValueType();
+ SDValue AlignAmt = DAG.getNode(ISD::AND, DL, AmtVT, ShAmt,
+ DAG.getSignedConstant(-32LL, DL, AmtVT));
+ SDValue ModuloAmt =
+ DAG.getNode(ISD::AND, DL, AmtVT, ShAmt, DAG.getConstant(31, DL, AmtVT));
+
+ // Compute the byte offset for the i32 block that is changed by the RMW.
+ // combineTruncate will adjust the load for us in a similar way.
+ EVT PtrVT = St->getBasePtr().getValueType();
+ SDValue PtrBitOfs = DAG.getZExtOrTrunc(AlignAmt, DL, PtrVT);
+ SDValue PtrByteOfs = DAG.getNode(ISD::SRL, DL, PtrVT, PtrBitOfs,
+ DAG.getShiftAmountConstant(3, PtrVT, DL));
+ SDValue NewPtr = DAG.getMemBasePlusOffset(St->getBasePtr(), PtrByteOfs, DL,
+ SDNodeFlags::NoUnsignedWrap);
+
+ // Reconstruct the BTC/BTR/BTS pattern for the i32 block and store.
+ SDValue X = DAG.getNode(ISD::SRL, DL, VT, LoadVal, AlignAmt);
+ X = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
+
+ SDValue Mask =
+ DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(1, DL, MVT::i32),
+ DAG.getZExtOrTrunc(ModuloAmt, DL, MVT::i8));
+ if (StoredVal.getOpcode() == ISD::AND)
+ Mask = DAG.getNOT(DL, Mask, MVT::i32);
+
+ SDValue Res = DAG.getNode(StoredVal.getOpcode(), DL, MVT::i32, X, Mask);
+ return DAG.getStore(St->getChain(), DL, Res, NewPtr, St->getPointerInfo(),
+ Align(), St->getMemOperand()->getFlags());
+}
+
static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
@@ -53706,6 +53801,9 @@ static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
}
}
+ if (SDValue R = narrowBitOpRMW(St, dl, DAG, Subtarget))
+ return R;
+
// Convert store(cmov(load(p), x, CC), p) to cstore(x, p, CC)
// store(cmov(x, load(p), CC), p) to cstore(x, p, InvertCC)
if ((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
@@ -54660,8 +54758,9 @@ static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
// truncation, see if we can convert the shift into a pointer offset instead.
// Limit this to normal (non-ext) scalar integer loads.
if (SrcVT.isScalarInteger() && Src.getOpcode() == ISD::SRL &&
- Src.hasOneUse() && Src.getOperand(0).hasOneUse() &&
- ISD::isNormalLoad(Src.getOperand(0).getNode())) {
+ Src.hasOneUse() && ISD::isNormalLoad(Src.getOperand(0).getNode()) &&
+ (Src.getOperand(0).hasOneUse() ||
+ !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, SrcVT))) {
auto *Ld = cast<LoadSDNode>(Src.getOperand(0));
if (Ld->isSimple() && VT.isByteSized() &&
isPowerOf2_64(VT.getSizeInBits())) {
@@ -56459,6 +56558,7 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
+ using namespace SDPatternMatch;
const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
const SDValue LHS = N->getOperand(0);
const SDValue RHS = N->getOperand(1);
@@ -56517,6 +56617,37 @@ static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
if (SDValue AndN = MatchAndCmpEq(RHS, LHS))
return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
+ // If we're performing a bit test on a larger than legal type, attempt
+ // to (aligned) shift down the value to the bottom 32-bits and then
+ // perform the bittest on the i32 value.
+ // ICMP_ZERO(AND(X,SHL(1,IDX)))
+ // --> ICMP_ZERO(AND(TRUNC(SRL(X,AND(IDX,-32))),SHL(1,AND(IDX,31))))
+ if (isNullConstant(RHS) &&
+ OpVT.getScalarSizeInBits() > (Subtarget.is64Bit() ? 64 : 32)) {
+ SDValue X, ShAmt;
+ if (sd_match(LHS, m_OneUse(m_And(m_Value(X),
+ m_Shl(m_One(), m_Value(ShAmt)))))) {
+ // Only attempt this if the shift amount is known to be in bounds.
+ KnownBits KnownAmt = DAG.computeKnownBits(ShAmt);
+ if (KnownAmt.getMaxValue().ult(OpVT.getScalarSizeInBits())) {
+ EVT AmtVT = ShAmt.getValueType();
+ SDValue AlignAmt =
+ DAG.getNode(ISD::AND, DL, AmtVT, ShAmt,
+ DAG.getSignedConstant(-32LL, DL, AmtVT));
+ SDValue ModuloAmt = DAG.getNode(ISD::AND, DL, AmtVT, ShAmt,
+ DAG.getConstant(31, DL, AmtVT));
+ SDValue Mask = DAG.getNode(
+ ISD::SHL, DL, MVT::i32, DAG.getConstant(1, DL, MVT::i32),
+ DAG.getZExtOrTrunc(ModuloAmt, DL, MVT::i8));
+ X = DAG.getNode(ISD::SRL, DL, OpVT, X, AlignAmt);
+ X = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
+ X = DAG.getNode(ISD::AND, DL, MVT::i32, X, Mask);
+ return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, MVT::i32),
+ CC);
+ }
+ }
+ }
+
// cmpeq(trunc(x),C) --> cmpeq(x,C)
// cmpne(trunc(x),C) --> cmpne(x,C)
// iff x upper bits are zero.
diff --git a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
index 7795cce..b5548d4 100644
--- a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -69,14 +69,6 @@ namespace llvm {
// Command line option to enable vtable value profiling. Defined in
// ProfileData/InstrProf.cpp: -enable-vtable-value-profiling=
extern cl::opt<bool> EnableVTableValueProfiling;
-// TODO: Remove -debug-info-correlate in next LLVM release, in favor of
-// -profile-correlate=debug-info.
-cl::opt<bool> DebugInfoCorrelate(
- "debug-info-correlate",
- cl::desc("Use debug info to correlate profiles. (Deprecated, use "
- "-profile-correlate=debug-info)"),
- cl::init(false));
-
LLVM_ABI cl::opt<InstrProfCorrelator::ProfCorrelatorKind> ProfileCorrelate(
"profile-correlate",
cl::desc("Use debug info or binary file to correlate profiles."),
@@ -1047,7 +1039,7 @@ void InstrLowerer::lowerValueProfileInst(InstrProfValueProfileInst *Ind) {
// in lightweight mode. We need to move the value profile pointer to the
// Counter struct to get this working.
assert(
- !DebugInfoCorrelate && ProfileCorrelate == InstrProfCorrelator::NONE &&
+ ProfileCorrelate == InstrProfCorrelator::NONE &&
"Value profiling is not yet supported with lightweight instrumentation");
GlobalVariable *Name = Ind->getName();
auto It = ProfileDataMap.find(Name);
@@ -1504,7 +1496,7 @@ static inline Constant *getVTableAddrForProfData(GlobalVariable *GV) {
}
void InstrLowerer::getOrCreateVTableProfData(GlobalVariable *GV) {
- assert(!DebugInfoCorrelate &&
+ assert(ProfileCorrelate != InstrProfCorrelator::DEBUG_INFO &&
"Value profiling is not supported with lightweight instrumentation");
if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
return;
@@ -1584,8 +1576,7 @@ GlobalVariable *InstrLowerer::setupProfileSection(InstrProfInstBase *Inc,
// Use internal rather than private linkage so the counter variable shows up
// in the symbol table when using debug info for correlation.
- if ((DebugInfoCorrelate ||
- ProfileCorrelate == InstrProfCorrelator::DEBUG_INFO) &&
+ if (ProfileCorrelate == InstrProfCorrelator::DEBUG_INFO &&
TT.isOSBinFormatMachO() && Linkage == GlobalValue::PrivateLinkage)
Linkage = GlobalValue::InternalLinkage;
@@ -1691,8 +1682,7 @@ InstrLowerer::getOrCreateRegionCounters(InstrProfCntrInstBase *Inc) {
auto *CounterPtr = setupProfileSection(Inc, IPSK_cnts);
PD.RegionCounters = CounterPtr;
- if (DebugInfoCorrelate ||
- ProfileCorrelate == InstrProfCorrelator::DEBUG_INFO) {
+ if (ProfileCorrelate == InstrProfCorrelator::DEBUG_INFO) {
LLVMContext &Ctx = M.getContext();
Function *Fn = Inc->getParent()->getParent();
if (auto *SP = Fn->getSubprogram()) {
@@ -1737,7 +1727,7 @@ InstrLowerer::getOrCreateRegionCounters(InstrProfCntrInstBase *Inc) {
void InstrLowerer::createDataVariable(InstrProfCntrInstBase *Inc) {
// When debug information is correlated to profile data, a data variable
// is not needed.
- if (DebugInfoCorrelate || ProfileCorrelate == InstrProfCorrelator::DEBUG_INFO)
+ if (ProfileCorrelate == InstrProfCorrelator::DEBUG_INFO)
return;
GlobalVariable *NamePtr = Inc->getName();
diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
index 71736cf..af53fa0 100644
--- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
+++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
@@ -456,7 +456,7 @@ createIRLevelProfileFlagVar(Module &M,
ProfileVersion |= VARIANT_MASK_INSTR_ENTRY;
if (PGOInstrumentLoopEntries)
ProfileVersion |= VARIANT_MASK_INSTR_LOOP_ENTRIES;
- if (DebugInfoCorrelate || ProfileCorrelate == InstrProfCorrelator::DEBUG_INFO)
+ if (ProfileCorrelate == InstrProfCorrelator::DEBUG_INFO)
ProfileVersion |= VARIANT_MASK_DBG_CORRELATE;
if (PGOFunctionEntryCoverage)
ProfileVersion |=
diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 7ebcc21..4ba4ba3 100644
--- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -162,8 +162,6 @@ class IndVarSimplify {
const SCEV *ExitCount,
PHINode *IndVar, SCEVExpander &Rewriter);
- bool sinkUnusedInvariants(Loop *L);
-
public:
IndVarSimplify(LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
const DataLayout &DL, TargetLibraryInfo *TLI,
@@ -1079,85 +1077,6 @@ linearFunctionTestReplace(Loop *L, BasicBlock *ExitingBB,
return true;
}
-//===----------------------------------------------------------------------===//
-// sinkUnusedInvariants. A late subpass to cleanup loop preheaders.
-//===----------------------------------------------------------------------===//
-
-/// If there's a single exit block, sink any loop-invariant values that
-/// were defined in the preheader but not used inside the loop into the
-/// exit block to reduce register pressure in the loop.
-bool IndVarSimplify::sinkUnusedInvariants(Loop *L) {
- BasicBlock *ExitBlock = L->getExitBlock();
- if (!ExitBlock) return false;
-
- BasicBlock *Preheader = L->getLoopPreheader();
- if (!Preheader) return false;
-
- bool MadeAnyChanges = false;
- for (Instruction &I : llvm::make_early_inc_range(llvm::reverse(*Preheader))) {
-
- // Skip BB Terminator.
- if (Preheader->getTerminator() == &I)
- continue;
-
- // New instructions were inserted at the end of the preheader.
- if (isa<PHINode>(I))
- break;
-
- // Don't move instructions which might have side effects, since the side
- // effects need to complete before instructions inside the loop. Also don't
- // move instructions which might read memory, since the loop may modify
- // memory. Note that it's okay if the instruction might have undefined
- // behavior: LoopSimplify guarantees that the preheader dominates the exit
- // block.
- if (I.mayHaveSideEffects() || I.mayReadFromMemory())
- continue;
-
- // Skip debug or pseudo instructions.
- if (I.isDebugOrPseudoInst())
- continue;
-
- // Skip eh pad instructions.
- if (I.isEHPad())
- continue;
-
- // Don't sink alloca: we never want to sink static alloca's out of the
- // entry block, and correctly sinking dynamic alloca's requires
- // checks for stacksave/stackrestore intrinsics.
- // FIXME: Refactor this check somehow?
- if (isa<AllocaInst>(&I))
- continue;
-
- // Determine if there is a use in or before the loop (direct or
- // otherwise).
- bool UsedInLoop = false;
- for (Use &U : I.uses()) {
- Instruction *User = cast<Instruction>(U.getUser());
- BasicBlock *UseBB = User->getParent();
- if (PHINode *P = dyn_cast<PHINode>(User)) {
- unsigned i =
- PHINode::getIncomingValueNumForOperand(U.getOperandNo());
- UseBB = P->getIncomingBlock(i);
- }
- if (UseBB == Preheader || L->contains(UseBB)) {
- UsedInLoop = true;
- break;
- }
- }
-
- // If there is, the def must remain in the preheader.
- if (UsedInLoop)
- continue;
-
- // Otherwise, sink it to the exit block.
- I.moveBefore(ExitBlock->getFirstInsertionPt());
- SE->forgetValue(&I);
- MadeAnyChanges = true;
- }
-
- return MadeAnyChanges;
-}
-
static void replaceExitCond(BranchInst *BI, Value *NewCond,
SmallVectorImpl<WeakTrackingVH> &DeadInsts) {
auto *OldCond = BI->getCondition();
@@ -2065,10 +1984,6 @@ bool IndVarSimplify::run(Loop *L) {
// The Rewriter may not be used from this point on.
- // Loop-invariant instructions in the preheader that aren't used in the
- // loop may be sunk below the loop to reduce register pressure.
- Changed |= sinkUnusedInvariants(L);
-
// rewriteFirstIterationLoopExitValues does not rely on the computation of
// trip count and therefore can further simplify exit values in addition to
// rewriteLoopExitValues.
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index b2c526b..d13b990 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -211,9 +211,15 @@ static Instruction *cloneInstructionInExitBlock(
static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
MemorySSAUpdater &MSSAU);
-static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest,
- ICFLoopSafetyInfo &SafetyInfo,
- MemorySSAUpdater &MSSAU, ScalarEvolution *SE);
+static void moveInstructionBefore(
+ Instruction &I, BasicBlock::iterator Dest, ICFLoopSafetyInfo &SafetyInfo,
+ MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
+ MemorySSA::InsertionPlace Point = MemorySSA::BeforeTerminator);
+
+static bool sinkUnusedInvariantsFromPreheaderToExit(
+ Loop *L, AAResults *AA, ICFLoopSafetyInfo *SafetyInfo,
+ MemorySSAUpdater &MSSAU, ScalarEvolution *SE, DominatorTree *DT,
+ SinkAndHoistLICMFlags &SinkFlags, OptimizationRemarkEmitter *ORE);
static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
function_ref<void(Instruction *)> Fn);
@@ -471,6 +477,12 @@ bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
TLI, TTI, L, MSSAU, &SafetyInfo, Flags, ORE)
: sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
MSSAU, &SafetyInfo, Flags, ORE);
+
+ // sink pre-header defs that are unused in-loop into the unique exit to reduce
+ // pressure.
+ Changed |= sinkUnusedInvariantsFromPreheaderToExit(L, AA, &SafetyInfo, MSSAU,
+ SE, DT, Flags, ORE);
+
Flags.setIsSink(false);
if (Preheader)
Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, AC, TLI, L,
@@ -1456,19 +1468,80 @@ static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest,
ICFLoopSafetyInfo &SafetyInfo,
- MemorySSAUpdater &MSSAU,
- ScalarEvolution *SE) {
+ MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
+ MemorySSA::InsertionPlace Point) {
SafetyInfo.removeInstruction(&I);
SafetyInfo.insertInstructionTo(&I, Dest->getParent());
I.moveBefore(*Dest->getParent(), Dest);
if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
MSSAU.getMemorySSA()->getMemoryAccess(&I)))
- MSSAU.moveToPlace(OldMemAcc, Dest->getParent(),
- MemorySSA::BeforeTerminator);
+ MSSAU.moveToPlace(OldMemAcc, Dest->getParent(), Point);
if (SE)
SE->forgetBlockAndLoopDispositions(&I);
}
+// If there's a single exit block, sink any loop-invariant values that were
+// defined in the preheader but not used inside the loop into the exit block
+// to reduce register pressure in the loop.
+static bool sinkUnusedInvariantsFromPreheaderToExit(
+ Loop *L, AAResults *AA, ICFLoopSafetyInfo *SafetyInfo,
+ MemorySSAUpdater &MSSAU, ScalarEvolution *SE, DominatorTree *DT,
+ SinkAndHoistLICMFlags &SinkFlags, OptimizationRemarkEmitter *ORE) {
+ BasicBlock *ExitBlock = L->getExitBlock();
+ if (!ExitBlock)
+ return false;
+
+ BasicBlock *Preheader = L->getLoopPreheader();
+ if (!Preheader)
+ return false;
+
+ bool MadeAnyChanges = false;
+
+ for (Instruction &I : llvm::make_early_inc_range(llvm::reverse(*Preheader))) {
+
+ // Skip terminator.
+ if (Preheader->getTerminator() == &I)
+ continue;
+
+ // New instructions were inserted at the end of the preheader.
+ if (isa<PHINode>(I))
+ break;
+
+ // Don't move instructions which might have side effects, since the side
+ // effects need to complete before instructions inside the loop. Note that
+ // it's okay if the instruction might have undefined behavior: LoopSimplify
+ // guarantees that the preheader dominates the exit block.
+ if (I.mayHaveSideEffects())
+ continue;
+
+ if (!canSinkOrHoistInst(I, AA, DT, L, MSSAU, true, SinkFlags, nullptr))
+ continue;
+
+ // Determine if there is a use in or before the loop (direct or
+ // otherwise).
+ bool UsedInLoopOrPreheader = false;
+ for (Use &U : I.uses()) {
+ auto *UserI = cast<Instruction>(U.getUser());
+ BasicBlock *UseBB = UserI->getParent();
+ if (auto *PN = dyn_cast<PHINode>(UserI)) {
+ UseBB = PN->getIncomingBlock(U);
+ }
+ if (UseBB == Preheader || L->contains(UseBB)) {
+ UsedInLoopOrPreheader = true;
+ break;
+ }
+ }
+ if (UsedInLoopOrPreheader)
+ continue;
+
+ moveInstructionBefore(I, ExitBlock->getFirstInsertionPt(), *SafetyInfo,
+ MSSAU, SE, MemorySSA::Beginning);
+ MadeAnyChanges = true;
+ }
+
+ return MadeAnyChanges;
+}
+
static Instruction *sinkThroughTriviallyReplaceablePHI(
PHINode *TPN, Instruction *I, LoopInfo *LI,
SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 1a279b6..001215a 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -1318,6 +1318,11 @@ public:
/// the loop, in which case some special-case heuristics may be used.
bool AllFixupsOutsideLoop = true;
+ /// This records whether all of the fixups using this LSRUse are unconditional
+ /// within the loop, meaning they will be executed on every path to the loop
+ /// latch. This includes fixups before early exits.
+ bool AllFixupsUnconditional = true;
+
/// RigidFormula is set to true to guarantee that this use will be associated
/// with a single formula--the one that initially matched. Some SCEV
/// expressions cannot be expanded. This allows LSR to consider the registers
@@ -1421,16 +1426,22 @@ void Cost::RateRegister(const Formula &F, const SCEV *Reg,
if (TTI->isIndexedLoadLegal(TTI->MIM_PostInc, AR->getType()) ||
TTI->isIndexedStoreLegal(TTI->MIM_PostInc, AR->getType())) {
const SCEV *Start;
- const SCEVConstant *Step;
- if (match(AR, m_scev_AffineAddRec(m_SCEV(Start), m_SCEVConstant(Step))))
+ const APInt *Step;
+ if (match(AR, m_scev_AffineAddRec(m_SCEV(Start), m_scev_APInt(Step)))) {
// If the step size matches the base offset, we could use pre-indexed
// addressing.
- if (((AMK & TTI::AMK_PreIndexed) && F.BaseOffset.isFixed() &&
- Step->getAPInt() == F.BaseOffset.getFixedValue()) ||
- ((AMK & TTI::AMK_PostIndexed) && !isa<SCEVConstant>(Start) &&
- SE->isLoopInvariant(Start, L)))
+ bool CanPreIndex = (AMK & TTI::AMK_PreIndexed) &&
+ F.BaseOffset.isFixed() &&
+ *Step == F.BaseOffset.getFixedValue();
+ bool CanPostIndex = (AMK & TTI::AMK_PostIndexed) &&
+ !isa<SCEVConstant>(Start) &&
+ SE->isLoopInvariant(Start, L);
+ // We can only pre or post index when the load/store is unconditional.
+ if ((CanPreIndex || CanPostIndex) && LU.AllFixupsUnconditional)
LoopCost = 0;
+ }
}
+
// If the loop counts down to zero and we'll be using a hardware loop then
// the addrec will be combined into the hardware loop instruction.
if (LU.Kind == LSRUse::ICmpZero && F.countsDownToZero() &&
@@ -1783,6 +1794,9 @@ void LSRUse::print(raw_ostream &OS) const {
if (AllFixupsOutsideLoop)
OS << ", all-fixups-outside-loop";
+ if (AllFixupsUnconditional)
+ OS << ", all-fixups-unconditional";
+
if (WidestFixupType)
OS << ", widest fixup type: " << *WidestFixupType;
}
@@ -2213,6 +2227,7 @@ class LSRInstance {
void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
void CountRegisters(const Formula &F, size_t LUIdx);
bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F);
+ bool IsFixupExecutedEachIncrement(const LSRFixup &LF) const;
void CollectLoopInvariantFixupsAndFormulae();
@@ -3607,6 +3622,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
LF.PostIncLoops = TmpPostIncLoops;
LF.Offset = Offset;
LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
+ LU.AllFixupsUnconditional &= IsFixupExecutedEachIncrement(LF);
// Create SCEV as Formula for calculating baseline cost
if (!VisitedLSRUse.count(LUIdx) && !LF.isUseFullyOutsideLoop(L)) {
@@ -3680,6 +3696,14 @@ bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) {
return true;
}
+/// Test whether this fixup will be executed each time the corresponding IV
+/// increment instruction is executed.
+bool LSRInstance::IsFixupExecutedEachIncrement(const LSRFixup &LF) const {
+ // If the fixup block dominates the IV increment block then there is no path
+ // through the loop to the increment that doesn't pass through the fixup.
+ return DT.dominates(LF.UserInst->getParent(), IVIncInsertPos->getParent());
+}
+
/// Check for other uses of loop-invariant values which we're tracking. These
/// other uses will pin these values in registers, making them less profitable
/// for elimination.
@@ -3803,6 +3827,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
LF.OperandValToReplace = U;
LF.Offset = Offset;
LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
+ LU.AllFixupsUnconditional &= IsFixupExecutedEachIncrement(LF);
if (!LU.WidestFixupType ||
SE.getTypeSizeInBits(LU.WidestFixupType) <
SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
@@ -4940,6 +4965,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
LLVM_DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); dbgs() << '\n');
LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop;
+ LUThatHas->AllFixupsUnconditional &= LU.AllFixupsUnconditional;
// Transfer the fixups of LU to LUThatHas.
for (LSRFixup &Fixup : LU.Fixups) {
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index e043d07..08be5df 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -1534,8 +1534,8 @@ bool MemCpyOptPass::performStackMoveOptzn(Instruction *Load, Instruction *Store,
bool SrcNotDom = false;
auto CaptureTrackingWithModRef =
- [&](Instruction *AI,
- function_ref<bool(Instruction *)> ModRefCallback) -> bool {
+ [&](Instruction *AI, function_ref<bool(Instruction *)> ModRefCallback,
+ bool &AddressCaptured) -> bool {
SmallVector<Instruction *, 8> Worklist;
Worklist.push_back(AI);
unsigned MaxUsesToExplore = getDefaultMaxUsesToExploreForCaptureTracking();
@@ -1559,8 +1559,9 @@ bool MemCpyOptPass::performStackMoveOptzn(Instruction *Load, Instruction *Store,
if (!Visited.insert(&U).second)
continue;
UseCaptureInfo CI = DetermineUseCaptureKind(U, AI);
- if (capturesAnything(CI.UseCC))
+ if (capturesAnyProvenance(CI.UseCC))
return false;
+ AddressCaptured |= capturesAddress(CI.UseCC);
if (UI->mayReadOrWriteMemory()) {
if (UI->isLifetimeStartOrEnd()) {
@@ -1627,7 +1628,9 @@ bool MemCpyOptPass::performStackMoveOptzn(Instruction *Load, Instruction *Store,
return true;
};
- if (!CaptureTrackingWithModRef(DestAlloca, DestModRefCallback))
+ bool DestAddressCaptured = false;
+ if (!CaptureTrackingWithModRef(DestAlloca, DestModRefCallback,
+ DestAddressCaptured))
return false;
// Bailout if Dest may have any ModRef before Store.
if (!ReachabilityWorklist.empty() &&
@@ -1653,7 +1656,14 @@ bool MemCpyOptPass::performStackMoveOptzn(Instruction *Load, Instruction *Store,
return true;
};
- if (!CaptureTrackingWithModRef(SrcAlloca, SrcModRefCallback))
+ bool SrcAddressCaptured = false;
+ if (!CaptureTrackingWithModRef(SrcAlloca, SrcModRefCallback,
+ SrcAddressCaptured))
+ return false;
+
+ // If both the source and destination address are captured, the fact that they
+ // are no longer two separate allocations may be observed.
+ if (DestAddressCaptured && SrcAddressCaptured)
return false;
// We can do the transformation. First, move the SrcAlloca to the start of the
diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
index 5af6c96..bb6c879 100644
--- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
+++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp
@@ -81,6 +81,7 @@ STATISTIC(
STATISTIC(NumInvariantConditionsInjected,
"Number of invariant conditions injected and unswitched");
+namespace llvm {
static cl::opt<bool> EnableNonTrivialUnswitch(
"enable-nontrivial-unswitch", cl::init(false), cl::Hidden,
cl::desc("Forcibly enables non-trivial loop unswitching rather than "
@@ -131,11 +132,17 @@ static cl::opt<bool> InjectInvariantConditions(
static cl::opt<unsigned> InjectInvariantConditionHotnesThreshold(
"simple-loop-unswitch-inject-invariant-condition-hotness-threshold",
- cl::Hidden, cl::desc("Only try to inject loop invariant conditions and "
- "unswitch on them to eliminate branches that are "
- "not-taken 1/<this option> times or less."),
+ cl::Hidden,
+ cl::desc("Only try to inject loop invariant conditions and "
+ "unswitch on them to eliminate branches that are "
+ "not-taken 1/<this option> times or less."),
cl::init(16));
+static cl::opt<bool> EstimateProfile("simple-loop-unswitch-estimate-profile",
+ cl::Hidden, cl::init(true));
+extern cl::opt<bool> ProfcheckDisableMetadataFixes;
+} // namespace llvm
+
AnalysisKey ShouldRunExtraSimpleLoopUnswitch::Key;
namespace {
struct CompareDesc {
@@ -268,13 +275,42 @@ static bool areLoopExitPHIsLoopInvariant(const Loop &L,
llvm_unreachable("Basic blocks should never be empty!");
}
-/// Copy a set of loop invariant values \p ToDuplicate and insert them at the
+/// Copy a set of loop invariant values \p Invariants and insert them at the
/// end of \p BB and conditionally branch on the copied condition. We only
/// branch on a single value.
+/// We attempt to estimate the profile of the resulting conditional branch from
+/// \p ComputeProfFrom, which is the original conditional branch we're
+/// unswitching.
+/// When \p Direction is true, the \p Invariants form a disjunction, and the
+/// branch conditioned on it exits the loop on the "true" case. When \p
+/// Direction is false, the \p Invariants form a conjunction and the branch
+/// exits on the "false" case.
static void buildPartialUnswitchConditionalBranch(
BasicBlock &BB, ArrayRef<Value *> Invariants, bool Direction,
BasicBlock &UnswitchedSucc, BasicBlock &NormalSucc, bool InsertFreeze,
- const Instruction *I, AssumptionCache *AC, const DominatorTree &DT) {
+ const Instruction *I, AssumptionCache *AC, const DominatorTree &DT,
+ const BranchInst &ComputeProfFrom) {
+
+ SmallVector<uint32_t> BranchWeights;
+ bool HasBranchWeights = EstimateProfile && !ProfcheckDisableMetadataFixes &&
+ extractBranchWeights(ComputeProfFrom, BranchWeights);
+ // If Direction is true, that means we had a disjunction and that the "true"
+ // case exits. The probability of the disjunction of the subset of terms is at
+ // most as high as the original one. So, if the probability is higher than the
+ // one we'd assign in absence of a profile (i.e. 0.5), we will use 0.5,
+ // but if it's lower, we will use the original probability.
+ // Conversely, if Direction is false, that means we had a conjunction, and the
+ // probability of exiting is captured in the second branch weight. That
+ // probability is a disjunction (of the negation of the original terms). The
+ // same reasoning applies as above.
+ // Issue #165649: should we expect BFI to conserve, and use that to calculate
+ // the branch weights?
+ if (HasBranchWeights &&
+ static_cast<double>(BranchWeights[Direction ? 0 : 1]) /
+ static_cast<double>(sum_of(BranchWeights)) >
+ 0.5)
+ HasBranchWeights = false;
+
IRBuilder<> IRB(&BB);
IRB.SetCurrentDebugLocation(DebugLoc::getCompilerGenerated());
@@ -287,8 +323,14 @@ static void buildPartialUnswitchConditionalBranch(
Value *Cond = Direction ? IRB.CreateOr(FrozenInvariants)
: IRB.CreateAnd(FrozenInvariants);
- IRB.CreateCondBr(Cond, Direction ? &UnswitchedSucc : &NormalSucc,
- Direction ? &NormalSucc : &UnswitchedSucc);
+ auto *BR = IRB.CreateCondBr(
+ Cond, Direction ? &UnswitchedSucc : &NormalSucc,
+ Direction ? &NormalSucc : &UnswitchedSucc,
+ HasBranchWeights ? ComputeProfFrom.getMetadata(LLVMContext::MD_prof)
+ : nullptr);
+ if (!HasBranchWeights)
+ setExplicitlyUnknownBranchWeightsIfProfiled(
+ *BR, *BR->getParent()->getParent(), DEBUG_TYPE);
}
/// Copy a set of loop invariant values, and conditionally branch on them.
@@ -658,7 +700,7 @@ static bool unswitchTrivialBranch(Loop &L, BranchInst &BI, DominatorTree &DT,
" condition!");
buildPartialUnswitchConditionalBranch(
*OldPH, Invariants, ExitDirection, *UnswitchedBB, *NewPH,
- FreezeLoopUnswitchCond, OldPH->getTerminator(), nullptr, DT);
+ FreezeLoopUnswitchCond, OldPH->getTerminator(), nullptr, DT, BI);
}
// Update the dominator tree with the added edge.
@@ -2477,7 +2519,7 @@ static void unswitchNontrivialInvariants(
else {
buildPartialUnswitchConditionalBranch(
*SplitBB, Invariants, Direction, *ClonedPH, *LoopPH,
- FreezeLoopUnswitchCond, BI, &AC, DT);
+ FreezeLoopUnswitchCond, BI, &AC, DT, *BI);
}
DTUpdates.push_back({DominatorTree::Insert, SplitBB, ClonedPH});
diff --git a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index 9829d4d..11db0ec 100644
--- a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -674,6 +674,79 @@ BasicBlock *llvm::SplitEdge(BasicBlock *BB, BasicBlock *Succ, DominatorTree *DT,
return SplitBlock(BB, BB->getTerminator(), DT, LI, MSSAU, BBName);
}
+/// Helper function to update the cycle or loop information after inserting a
+/// new block between a callbr instruction and one of its target blocks. Adds
+/// the new block to the innermost cycle or loop that the callbr instruction and
+/// the original target block share.
+/// \p LCI cycle or loop information to update
+/// \p CallBrBlock block containing the callbr instruction
+/// \p CallBrTarget new target block of the callbr instruction
+/// \p Succ original target block of the callbr instruction
+template <typename TI, typename T>
+static bool updateCycleLoopInfo(TI *LCI, BasicBlock *CallBrBlock,
+ BasicBlock *CallBrTarget, BasicBlock *Succ) {
+ static_assert(std::is_same_v<TI, CycleInfo> || std::is_same_v<TI, LoopInfo>,
+ "type must be CycleInfo or LoopInfo");
+ if (!LCI)
+ return false;
+
+ T *LC;
+ if constexpr (std::is_same_v<TI, CycleInfo>)
+ LC = LCI->getSmallestCommonCycle(CallBrBlock, Succ);
+ else
+ LC = LCI->getSmallestCommonLoop(CallBrBlock, Succ);
+ if (!LC)
+ return false;
+
+ if constexpr (std::is_same_v<TI, CycleInfo>)
+ LCI->addBlockToCycle(CallBrTarget, LC);
+ else
+ LC->addBasicBlockToLoop(CallBrTarget, *LCI);
+
+ return true;
+}
+
+BasicBlock *llvm::SplitCallBrEdge(BasicBlock *CallBrBlock, BasicBlock *Succ,
+ unsigned SuccIdx, DomTreeUpdater *DTU,
+ CycleInfo *CI, LoopInfo *LI,
+ bool *UpdatedLI) {
+ CallBrInst *CallBr = dyn_cast<CallBrInst>(CallBrBlock->getTerminator());
+ assert(CallBr && "expected callbr terminator");
+ assert(SuccIdx < CallBr->getNumSuccessors() &&
+ Succ == CallBr->getSuccessor(SuccIdx) && "invalid successor index");
+
+ // Create a new block between callbr and the specified successor.
+ // splitBlockBefore cannot be re-used here since it cannot split if the split
+ // point is a PHI node (because BasicBlock::splitBasicBlockBefore cannot
+ // handle that). But we don't need to rewire every part of a potential PHI
+ // node. We only care about the edge between CallBrBlock and the original
+ // successor.
+ BasicBlock *CallBrTarget =
+ BasicBlock::Create(CallBrBlock->getContext(),
+ CallBrBlock->getName() + ".target." + Succ->getName(),
+ CallBrBlock->getParent());
+ // Rewire control flow from the new target block to the original successor.
+ Succ->replacePhiUsesWith(CallBrBlock, CallBrTarget);
+ // Rewire control flow from callbr to the new target block.
+ CallBr->setSuccessor(SuccIdx, CallBrTarget);
+ // Jump from the new target block to the original successor.
+ BranchInst::Create(Succ, CallBrTarget);
+
+ bool Updated =
+ updateCycleLoopInfo<LoopInfo, Loop>(LI, CallBrBlock, CallBrTarget, Succ);
+ if (UpdatedLI)
+ *UpdatedLI = Updated;
+ updateCycleLoopInfo<CycleInfo, Cycle>(CI, CallBrBlock, CallBrTarget, Succ);
+ if (DTU) {
+ DTU->applyUpdates({{DominatorTree::Insert, CallBrBlock, CallBrTarget}});
+ if (DTU->getDomTree().dominates(CallBrBlock, Succ))
+ DTU->applyUpdates({{DominatorTree::Delete, CallBrBlock, Succ},
+ {DominatorTree::Insert, CallBrTarget, Succ}});
+ }
+
+ return CallBrTarget;
+}
+
void llvm::setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ) {
if (auto *II = dyn_cast<InvokeInst>(TI))
II->setUnwindDest(Succ);
diff --git a/llvm/lib/Transforms/Utils/ControlFlowUtils.cpp b/llvm/lib/Transforms/Utils/ControlFlowUtils.cpp
index 0046a00..287a177 100644
--- a/llvm/lib/Transforms/Utils/ControlFlowUtils.cpp
+++ b/llvm/lib/Transforms/Utils/ControlFlowUtils.cpp
@@ -13,6 +13,7 @@
#include "llvm/Transforms/Utils/ControlFlowUtils.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/DomTreeUpdater.h"
+#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/ValueHandle.h"
@@ -281,7 +282,9 @@ std::pair<BasicBlock *, bool> ControlFlowHub::finalize(
for (auto [BB, Succ0, Succ1] : Branches) {
#ifndef NDEBUG
- assert(Incoming.insert(BB).second && "Duplicate entry for incoming block.");
+ assert(
+ (Incoming.insert(BB).second || isa<CallBrInst>(BB->getTerminator())) &&
+ "Duplicate entry for incoming block.");
#endif
if (Succ0)
Outgoing.insert(Succ0);
diff --git a/llvm/lib/Transforms/Utils/FixIrreducible.cpp b/llvm/lib/Transforms/Utils/FixIrreducible.cpp
index 45e1d12..804af22 100644
--- a/llvm/lib/Transforms/Utils/FixIrreducible.cpp
+++ b/llvm/lib/Transforms/Utils/FixIrreducible.cpp
@@ -79,6 +79,53 @@
// Limitation: The pass cannot handle switch statements and indirect
// branches. Both must be lowered to plain branches first.
//
+// CallBr support: CallBr is handled as a more general branch instruction which
+// can have multiple successors. The pass redirects the edges to intermediate
+// target blocks that unconditionally branch to the original callbr target
+// blocks. This allows the control flow hub to know to which of the original
+// target blocks to jump to.
+// Example input CFG:
+// Entry (callbr)
+// / \
+// v v
+// H ----> B
+// ^ /|
+// `----' |
+// v
+// Exit
+//
+// becomes:
+// Entry (callbr)
+// / \
+// v v
+// target.H target.B
+// | |
+// v v
+// H ----> B
+// ^ /|
+// `----' |
+// v
+// Exit
+//
+// Note
+// OUTPUT CFG: Converted to a natural loop with a new header N.
+//
+// Entry (callbr)
+// / \
+// v v
+// target.H target.B
+// \ /
+// \ /
+// v v
+// N <---.
+// / \ \
+// / \ |
+// v v /
+// H --> B --'
+// |
+// v
+// Exit
+//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/FixIrreducible.h"
@@ -231,6 +278,7 @@ static bool fixIrreducible(Cycle &C, CycleInfo &CI, DominatorTree &DT,
return false;
LLVM_DEBUG(dbgs() << "Processing cycle:\n" << CI.print(&C) << "\n";);
+ DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
ControlFlowHub CHub;
SetVector<BasicBlock *> Predecessors;
@@ -242,18 +290,32 @@ static bool fixIrreducible(Cycle &C, CycleInfo &CI, DominatorTree &DT,
}
for (BasicBlock *P : Predecessors) {
- auto *Branch = cast<BranchInst>(P->getTerminator());
- // Exactly one of the two successors is the header.
- BasicBlock *Succ0 = Branch->getSuccessor(0) == Header ? Header : nullptr;
- BasicBlock *Succ1 = Succ0 ? nullptr : Header;
- if (!Succ0)
- assert(Branch->getSuccessor(1) == Header);
- assert(Succ0 || Succ1);
- CHub.addBranch(P, Succ0, Succ1);
-
- LLVM_DEBUG(dbgs() << "Added internal branch: " << P->getName() << " -> "
- << (Succ0 ? Succ0->getName() : "") << " "
- << (Succ1 ? Succ1->getName() : "") << "\n");
+ if (BranchInst *Branch = dyn_cast<BranchInst>(P->getTerminator())) {
+ // Exactly one of the two successors is the header.
+ BasicBlock *Succ0 = Branch->getSuccessor(0) == Header ? Header : nullptr;
+ BasicBlock *Succ1 = Succ0 ? nullptr : Header;
+ assert(Succ0 || Branch->getSuccessor(1) == Header);
+ assert(Succ0 || Succ1);
+ CHub.addBranch(P, Succ0, Succ1);
+
+ LLVM_DEBUG(dbgs() << "Added internal branch: " << printBasicBlock(P)
+ << " -> " << printBasicBlock(Succ0)
+ << (Succ0 && Succ1 ? " " : "") << printBasicBlock(Succ1)
+ << '\n');
+ } else if (CallBrInst *CallBr = dyn_cast<CallBrInst>(P->getTerminator())) {
+ for (unsigned I = 0; I < CallBr->getNumSuccessors(); ++I) {
+ BasicBlock *Succ = CallBr->getSuccessor(I);
+ if (Succ != Header)
+ continue;
+ BasicBlock *NewSucc = SplitCallBrEdge(P, Succ, I, &DTU, &CI, LI);
+ CHub.addBranch(NewSucc, Succ);
+ LLVM_DEBUG(dbgs() << "Added internal branch: "
+ << printBasicBlock(NewSucc) << " -> "
+ << printBasicBlock(Succ) << '\n');
+ }
+ } else {
+ llvm_unreachable("unsupported block terminator");
+ }
}
// Redirect external incoming edges. This includes the edges on the header.
@@ -266,17 +328,32 @@ static bool fixIrreducible(Cycle &C, CycleInfo &CI, DominatorTree &DT,
}
for (BasicBlock *P : Predecessors) {
- auto *Branch = cast<BranchInst>(P->getTerminator());
- BasicBlock *Succ0 = Branch->getSuccessor(0);
- Succ0 = C.contains(Succ0) ? Succ0 : nullptr;
- BasicBlock *Succ1 =
- Branch->isUnconditional() ? nullptr : Branch->getSuccessor(1);
- Succ1 = Succ1 && C.contains(Succ1) ? Succ1 : nullptr;
- CHub.addBranch(P, Succ0, Succ1);
-
- LLVM_DEBUG(dbgs() << "Added external branch: " << P->getName() << " -> "
- << (Succ0 ? Succ0->getName() : "") << " "
- << (Succ1 ? Succ1->getName() : "") << "\n");
+ if (BranchInst *Branch = dyn_cast<BranchInst>(P->getTerminator()); Branch) {
+ BasicBlock *Succ0 = Branch->getSuccessor(0);
+ Succ0 = C.contains(Succ0) ? Succ0 : nullptr;
+ BasicBlock *Succ1 =
+ Branch->isUnconditional() ? nullptr : Branch->getSuccessor(1);
+ Succ1 = Succ1 && C.contains(Succ1) ? Succ1 : nullptr;
+ CHub.addBranch(P, Succ0, Succ1);
+
+ LLVM_DEBUG(dbgs() << "Added external branch: " << printBasicBlock(P)
+ << " -> " << printBasicBlock(Succ0)
+ << (Succ0 && Succ1 ? " " : "") << printBasicBlock(Succ1)
+ << '\n');
+ } else if (CallBrInst *CallBr = dyn_cast<CallBrInst>(P->getTerminator())) {
+ for (unsigned I = 0; I < CallBr->getNumSuccessors(); ++I) {
+ BasicBlock *Succ = CallBr->getSuccessor(I);
+ if (!C.contains(Succ))
+ continue;
+ BasicBlock *NewSucc = SplitCallBrEdge(P, Succ, I, &DTU, &CI, LI);
+ CHub.addBranch(NewSucc, Succ);
+ LLVM_DEBUG(dbgs() << "Added external branch: "
+ << printBasicBlock(NewSucc) << " -> "
+ << printBasicBlock(Succ) << '\n');
+ }
+ } else {
+ llvm_unreachable("unsupported block terminator");
+ }
}
// Redirect all the backedges through a "hub" consisting of a series
@@ -292,7 +369,6 @@ static bool fixIrreducible(Cycle &C, CycleInfo &CI, DominatorTree &DT,
SetVector<BasicBlock *> Entries;
Entries.insert(C.entry_rbegin(), C.entry_rend());
- DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
CHub.finalize(&DTU, GuardBlocks, "irr");
#if defined(EXPENSIVE_CHECKS)
assert(DT.verify(DominatorTree::VerificationLevel::Full));
@@ -325,8 +401,6 @@ static bool FixIrreducibleImpl(Function &F, CycleInfo &CI, DominatorTree &DT,
LLVM_DEBUG(dbgs() << "===== Fix irreducible control-flow in function: "
<< F.getName() << "\n");
- assert(hasOnlySimpleTerminator(F) && "Unsupported block terminator.");
-
bool Changed = false;
for (Cycle *TopCycle : CI.toplevel_cycles()) {
for (Cycle *C : depth_first(TopCycle)) {
diff --git a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index 6312831..7a2b8da 100644
--- a/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -460,25 +460,10 @@ CloneLoopBlocks(Loop *L, Value *NewIter, const bool UseEpilogRemainder,
Loop *NewLoop = NewLoops[L];
assert(NewLoop && "L should have been cloned");
- MDNode *LoopID = NewLoop->getLoopID();
-
- // Only add loop metadata if the loop is not going to be completely
- // unrolled.
- if (UnrollRemainder)
- return NewLoop;
-
- std::optional<MDNode *> NewLoopID = makeFollowupLoopID(
- LoopID, {LLVMLoopUnrollFollowupAll, LLVMLoopUnrollFollowupRemainder});
- if (NewLoopID) {
- NewLoop->setLoopID(*NewLoopID);
-
- // Do not setLoopAlreadyUnrolled if loop attributes have been defined
- // explicitly.
- return NewLoop;
- }
// Add unroll disable metadata to disable future unrolling for this loop.
- NewLoop->setLoopAlreadyUnrolled();
+ if (!UnrollRemainder)
+ NewLoop->setLoopAlreadyUnrolled();
return NewLoop;
}
diff --git a/llvm/lib/Transforms/Utils/UnifyLoopExits.cpp b/llvm/lib/Transforms/Utils/UnifyLoopExits.cpp
index 9f338db..94c5c170 100644
--- a/llvm/lib/Transforms/Utils/UnifyLoopExits.cpp
+++ b/llvm/lib/Transforms/Utils/UnifyLoopExits.cpp
@@ -12,7 +12,11 @@
//
// Limitation: This assumes that all terminators in the CFG are direct branches
// (the "br" instruction). The presence of any other control flow
-// such as indirectbr, switch or callbr will cause an assert.
+// such as indirectbr or switch will cause an assert.
+// The callbr terminator is supported by creating intermediate
+// target blocks that unconditionally branch to the original target
+// blocks. These intermediate target blocks can then be redirected
+// through the ControlFlowHub as usual.
//
//===----------------------------------------------------------------------===//
@@ -150,25 +154,55 @@ static bool unifyLoopExits(DominatorTree &DT, LoopInfo &LI, Loop *L) {
SmallVector<BasicBlock *, 8> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
+ DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
+ SmallVector<BasicBlock *, 8> CallBrTargetBlocksToFix;
// Redirect exiting edges through a control flow hub.
ControlFlowHub CHub;
- for (auto *BB : ExitingBlocks) {
- auto *Branch = cast<BranchInst>(BB->getTerminator());
- BasicBlock *Succ0 = Branch->getSuccessor(0);
- Succ0 = L->contains(Succ0) ? nullptr : Succ0;
-
- BasicBlock *Succ1 =
- Branch->isUnconditional() ? nullptr : Branch->getSuccessor(1);
- Succ1 = L->contains(Succ1) ? nullptr : Succ1;
- CHub.addBranch(BB, Succ0, Succ1);
-
- LLVM_DEBUG(dbgs() << "Added exiting branch: " << BB->getName() << " -> {"
- << (Succ0 ? Succ0->getName() : "<none>") << ", "
- << (Succ1 ? Succ1->getName() : "<none>") << "}\n");
+
+ for (unsigned I = 0; I < ExitingBlocks.size(); ++I) {
+ BasicBlock *BB = ExitingBlocks[I];
+ if (BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator())) {
+ BasicBlock *Succ0 = Branch->getSuccessor(0);
+ Succ0 = L->contains(Succ0) ? nullptr : Succ0;
+
+ BasicBlock *Succ1 =
+ Branch->isUnconditional() ? nullptr : Branch->getSuccessor(1);
+ Succ1 = L->contains(Succ1) ? nullptr : Succ1;
+ CHub.addBranch(BB, Succ0, Succ1);
+
+ LLVM_DEBUG(dbgs() << "Added extiting branch: " << printBasicBlock(BB)
+ << " -> " << printBasicBlock(Succ0)
+ << (Succ0 && Succ1 ? " " : "") << printBasicBlock(Succ1)
+ << '\n');
+ } else if (CallBrInst *CallBr = dyn_cast<CallBrInst>(BB->getTerminator())) {
+ for (unsigned J = 0; J < CallBr->getNumSuccessors(); ++J) {
+ BasicBlock *Succ = CallBr->getSuccessor(J);
+ if (L->contains(Succ))
+ continue;
+ bool UpdatedLI = false;
+ BasicBlock *NewSucc =
+ SplitCallBrEdge(BB, Succ, J, &DTU, nullptr, &LI, &UpdatedLI);
+ // Even if CallBr and Succ do not have a common parent loop, we need to
+ // add the new target block to the parent loop of the current loop.
+ if (!UpdatedLI)
+ CallBrTargetBlocksToFix.push_back(NewSucc);
+ // ExitingBlocks is later used to restore SSA, so we need to make sure
+ // that the blocks used for phi nodes in the guard blocks match the
+ // predecessors of the guard blocks, which, in the case of callbr, are
+ // the new intermediate target blocks instead of the callbr blocks
+ // themselves.
+ ExitingBlocks[I] = NewSucc;
+ CHub.addBranch(NewSucc, Succ);
+ LLVM_DEBUG(dbgs() << "Added exiting branch: "
+ << printBasicBlock(NewSucc) << " -> "
+ << printBasicBlock(Succ) << '\n');
+ }
+ } else {
+ llvm_unreachable("unsupported block terminator");
+ }
}
SmallVector<BasicBlock *, 8> GuardBlocks;
- DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
BasicBlock *LoopExitBlock;
bool ChangedCFG;
std::tie(LoopExitBlock, ChangedCFG) = CHub.finalize(
@@ -187,10 +221,19 @@ static bool unifyLoopExits(DominatorTree &DT, LoopInfo &LI, Loop *L) {
// The guard blocks were created outside the loop, so they need to become
// members of the parent loop.
- if (auto ParentLoop = L->getParentLoop()) {
+ // Same goes for the callbr target blocks. Although we try to add them to the
+ // smallest common parent loop of the callbr block and the corresponding
+ // original target block, there might not have been such a loop, in which case
+ // the newly created callbr target blocks are not part of any loop. For nested
+ // loops, this might result in them leading to a loop with multiple entry
+ // points.
+ if (auto *ParentLoop = L->getParentLoop()) {
for (auto *G : GuardBlocks) {
ParentLoop->addBasicBlockToLoop(G, LI);
}
+ for (auto *C : CallBrTargetBlocksToFix) {
+ ParentLoop->addBasicBlockToLoop(C, LI);
+ }
ParentLoop->verifyLoop();
}
@@ -218,8 +261,6 @@ bool UnifyLoopExitsLegacyPass::runOnFunction(Function &F) {
auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- assert(hasOnlySimpleTerminator(F) && "Unsupported block terminator.");
-
return runImpl(LI, DT);
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index f7968ab..505fb43 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -5750,13 +5750,18 @@ void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
getMemoryInstructionCost(I, ElementCount::getFixed(1))));
UpdateMemOpUserCost(cast<LoadInst>(I));
} else if (const auto *Group = getInterleavedAccessGroup(I)) {
- // Scalarize an interleave group of address loads.
- for (unsigned I = 0; I < Group->getFactor(); ++I) {
- if (Instruction *Member = Group->getMember(I)) {
- setWideningDecision(
- Member, VF, CM_Scalarize,
- (VF.getKnownMinValue() *
- getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
+ // Scalarize all members of this interleaved group when any member
+ // is used as an address. The address-used load skips scalarization
+ // overhead, other members include it.
+ for (unsigned Idx = 0; Idx < Group->getFactor(); ++Idx) {
+ if (Instruction *Member = Group->getMember(Idx)) {
+ InstructionCost Cost =
+ AddrDefs.contains(Member)
+ ? (VF.getKnownMinValue() *
+ getMemoryInstructionCost(Member,
+ ElementCount::getFixed(1)))
+ : getMemInstScalarizationCost(Member, VF);
+ setWideningDecision(Member, VF, CM_Scalarize, Cost);
UpdateMemOpUserCost(cast<LoadInst>(Member));
}
}
@@ -8335,11 +8340,7 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
&R) ||
(isa<VPInstruction>(&R) && !UnderlyingValue))
continue;
-
- // FIXME: VPlan0, which models a copy of the original scalar loop, should
- // not use VPWidenPHIRecipe to model the phis.
- assert((isa<VPWidenPHIRecipe>(&R) || isa<VPInstruction>(&R)) &&
- UnderlyingValue && "unsupported recipe");
+ assert(isa<VPInstruction>(&R) && UnderlyingValue && "unsupported recipe");
// TODO: Gradually replace uses of underlying instruction by analyses on
// VPlan.
diff --git a/llvm/test/Analysis/DependenceAnalysis/GCD.ll b/llvm/test/Analysis/DependenceAnalysis/GCD.ll
index 03343e7..cb14d18 100644
--- a/llvm/test/Analysis/DependenceAnalysis/GCD.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/GCD.ll
@@ -254,7 +254,7 @@ define void @gcd4(ptr %A, ptr %B, i64 %M, i64 %N) nounwind uwtable ssp {
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %conv, ptr %arrayidx, align 4
; CHECK-NEXT: da analyze - output [* *]!
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: %0 = load i32, ptr %arrayidx16, align 4
-; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: da analyze - flow [* *|<]!
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %0, ptr %B.addr.11, align 4
; CHECK-NEXT: da analyze - confused!
; CHECK-NEXT: Src: %0 = load i32, ptr %arrayidx16, align 4 --> Dst: %0 = load i32, ptr %arrayidx16, align 4
@@ -322,7 +322,7 @@ define void @gcd5(ptr %A, ptr %B, i64 %M, i64 %N) nounwind uwtable ssp {
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %conv, ptr %arrayidx, align 4
; CHECK-NEXT: da analyze - output [* *]!
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: %0 = load i32, ptr %arrayidx16, align 4
-; CHECK-NEXT: da analyze - flow [<> *]!
+; CHECK-NEXT: da analyze - flow [* *|<]!
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %0, ptr %B.addr.11, align 4
; CHECK-NEXT: da analyze - confused!
; CHECK-NEXT: Src: %0 = load i32, ptr %arrayidx16, align 4 --> Dst: %0 = load i32, ptr %arrayidx16, align 4
@@ -390,7 +390,7 @@ define void @gcd6(i64 %n, ptr %A, ptr %B) nounwind uwtable ssp {
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx5, align 4 --> Dst: store i32 %conv, ptr %arrayidx5, align 4
; CHECK-NEXT: da analyze - output [* *]!
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx5, align 4 --> Dst: %2 = load i32, ptr %arrayidx9, align 4
-; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: da analyze - flow [* *|<]!
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx5, align 4 --> Dst: store i32 %2, ptr %B.addr.12, align 4
; CHECK-NEXT: da analyze - confused!
; CHECK-NEXT: Src: %2 = load i32, ptr %arrayidx9, align 4 --> Dst: %2 = load i32, ptr %arrayidx9, align 4
diff --git a/llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll b/llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll
index cdfaec7..73a415b 100644
--- a/llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/SymbolicSIV.ll
@@ -384,7 +384,7 @@ define void @symbolicsiv6(ptr %A, ptr %B, i64 %n, i64 %N, i64 %M) nounwind uwtab
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %conv, ptr %arrayidx, align 4
; CHECK-NEXT: da analyze - none!
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: %0 = load i32, ptr %arrayidx7, align 4
-; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: da analyze - flow [*|<]!
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %0, ptr %B.addr.02, align 4
; CHECK-NEXT: da analyze - confused!
; CHECK-NEXT: Src: %0 = load i32, ptr %arrayidx7, align 4 --> Dst: %0 = load i32, ptr %arrayidx7, align 4
@@ -440,7 +440,7 @@ define void @symbolicsiv7(ptr %A, ptr %B, i64 %n, i64 %N, i64 %M) nounwind uwtab
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %conv, ptr %arrayidx, align 4
; CHECK-NEXT: da analyze - none!
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: %1 = load i32, ptr %arrayidx6, align 4
-; CHECK-NEXT: da analyze - flow [<>]!
+; CHECK-NEXT: da analyze - flow [*|<]!
; CHECK-NEXT: Src: store i32 %conv, ptr %arrayidx, align 4 --> Dst: store i32 %1, ptr %B.addr.02, align 4
; CHECK-NEXT: da analyze - confused!
; CHECK-NEXT: Src: %1 = load i32, ptr %arrayidx6, align 4 --> Dst: %1 = load i32, ptr %arrayidx6, align 4
diff --git a/llvm/test/Analysis/DependenceAnalysis/compute-absolute-value.ll b/llvm/test/Analysis/DependenceAnalysis/compute-absolute-value.ll
index 64fad37..783150a 100644
--- a/llvm/test/Analysis/DependenceAnalysis/compute-absolute-value.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/compute-absolute-value.ll
@@ -18,7 +18,7 @@ define void @unknown_sign(ptr %a, i64 %k) {
; CHECK-NEXT: Src: store i8 1, ptr %idx.0, align 1 --> Dst: store i8 1, ptr %idx.0, align 1
; CHECK-NEXT: da analyze - none!
; CHECK-NEXT: Src: store i8 1, ptr %idx.0, align 1 --> Dst: store i8 2, ptr %idx.1, align 1
-; CHECK-NEXT: da analyze - output [<>]!
+; CHECK-NEXT: da analyze - output [*|<]!
; CHECK-NEXT: Src: store i8 2, ptr %idx.1, align 1 --> Dst: store i8 2, ptr %idx.1, align 1
; CHECK-NEXT: da analyze - none!
;
diff --git a/llvm/test/Analysis/DependenceAnalysis/gcd-miv-overflow.ll b/llvm/test/Analysis/DependenceAnalysis/gcd-miv-overflow.ll
new file mode 100644
index 0000000..9169ac3
--- /dev/null
+++ b/llvm/test/Analysis/DependenceAnalysis/gcd-miv-overflow.ll
@@ -0,0 +1,63 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -disable-output "-passes=print<da>" 2>&1 \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-ALL
+; RUN: opt < %s -disable-output "-passes=print<da>" -da-enable-dependence-test=gcd-miv 2>&1 \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-GCD-MIV
+
+; offset0 = 4;
+; offset1 = 0;
+; for (i = 0; i < 100; i++) {
+; A[offset0] = 1;
+; A[offset1] = 2;
+; offset0 += 3*m;
+; offset1 += 3;
+; }
+;
+; Dependency exists between the two stores. E.g., consider `m` is
+; 12297829382473034411, which is a modular multiplicative inverse of 3 under
+; modulo 2^64. Then `offset0` is effectively `i + 4`, so accesses will be as
+; follows:
+;
+; - A[offset0] : A[4], A[5], A[6], ...
+; - A[offset1] : A[0], A[3], A[6], ...
+;
+define void @gcdmiv_coef_ovfl(ptr %A, i64 %m) {
+; CHECK-ALL-LABEL: 'gcdmiv_coef_ovfl'
+; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-ALL-NEXT: da analyze - output [*|<]!
+; CHECK-ALL-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+;
+; CHECK-GCD-MIV-LABEL: 'gcdmiv_coef_ovfl'
+; CHECK-GCD-MIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1
+; CHECK-GCD-MIV-NEXT: da analyze - consistent output [*]!
+; CHECK-GCD-MIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-GCD-MIV-NEXT: da analyze - consistent output [*|<]!
+; CHECK-GCD-MIV-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-GCD-MIV-NEXT: da analyze - consistent output [*]!
+;
+entry:
+ %step = mul i64 3, %m
+ br label %loop
+
+loop:
+ %i = phi i64 [ 0, %entry ], [ %i.inc, %loop ]
+ %offset.0 = phi i64 [ 4, %entry ] , [ %offset.0.next, %loop ]
+ %offset.1 = phi i64 [ 0, %entry ] , [ %offset.1.next, %loop ]
+ %gep.0 = getelementptr inbounds i8, ptr %A, i64 %offset.0
+ %gep.1 = getelementptr inbounds i8, ptr %A, i64 %offset.1
+ store i8 1, ptr %gep.0
+ store i8 2, ptr %gep.1
+ %i.inc = add nuw nsw i64 %i, 1
+ %offset.0.next = add nsw i64 %offset.0, %step
+ %offset.1.next = add nsw i64 %offset.1, 3
+ %ec = icmp eq i64 %i.inc, 100
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Analysis/DependenceAnalysis/strong-siv-overflow.ll b/llvm/test/Analysis/DependenceAnalysis/strong-siv-overflow.ll
new file mode 100644
index 0000000..bf0fafc
--- /dev/null
+++ b/llvm/test/Analysis/DependenceAnalysis/strong-siv-overflow.ll
@@ -0,0 +1,68 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -disable-output "-passes=print<da>" 2>&1 \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-ALL
+; RUN: opt < %s -disable-output "-passes=print<da>" -da-enable-dependence-test=strong-siv 2>&1 \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-STRONG-SIV
+
+; for (i = 0; i < (1LL << 62); i++) {
+; if (0 <= 2*i - 2)
+; A[2*i - 2] = 1;
+;
+; if (0 <= 2*i - 4)
+; A[2*i - 4] = 2;
+; }
+;
+; FIXME: DependenceAnalysis currently detects no dependency between the two
+; stores, but it does exist. For example, each store will access A[0] when i
+; is 1 and 2 respectively.
+; The root cause is that the product of the BTC and the coefficient
+; ((1LL << 62) - 1 and 2) overflows in a signed sense.
+define void @strongsiv_const_ovfl(ptr %A) {
+; CHECK-LABEL: 'strongsiv_const_ovfl'
+; CHECK-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1
+; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-NEXT: da analyze - none!
+; CHECK-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-NEXT: da analyze - none!
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %i = phi i64 [ 0, %entry ], [ %i.inc, %loop.latch ]
+ %offset.0 = phi i64 [ -2, %entry ], [ %offset.0.next, %loop.latch ]
+ %offset.1 = phi i64 [ -4, %entry ], [ %offset.1.next, %loop.latch ]
+ %ec = icmp eq i64 %i, 4611686018427387904
+ br i1 %ec, label %exit, label %loop.body
+
+loop.body:
+ %cond.0 = icmp sge i64 %offset.0, 0
+ %cond.1 = icmp sge i64 %offset.1, 0
+ br i1 %cond.0, label %if.then.0, label %loop.middle
+
+if.then.0:
+ %gep.0 = getelementptr inbounds i8, ptr %A, i64 %offset.0
+ store i8 1, ptr %gep.0
+ br label %loop.middle
+
+loop.middle:
+ br i1 %cond.1, label %if.then.1, label %loop.latch
+
+if.then.1:
+ %gep.1 = getelementptr inbounds i8, ptr %A, i64 %offset.1
+ store i8 2, ptr %gep.1
+ br label %loop.latch
+
+loop.latch:
+ %i.inc = add nuw nsw i64 %i, 1
+ %offset.0.next = add nsw i64 %offset.0, 2
+ %offset.1.next = add nsw i64 %offset.1, 2
+ br label %loop.header
+
+exit:
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-ALL: {{.*}}
+; CHECK-STRONG-SIV: {{.*}}
diff --git a/llvm/test/Analysis/DependenceAnalysis/symbolic-rdiv-overflow.ll b/llvm/test/Analysis/DependenceAnalysis/symbolic-rdiv-overflow.ll
new file mode 100644
index 0000000..c5ff988
--- /dev/null
+++ b/llvm/test/Analysis/DependenceAnalysis/symbolic-rdiv-overflow.ll
@@ -0,0 +1,137 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -disable-output "-passes=print<da>" 2>&1 \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-ALL
+; RUN: opt < %s -disable-output "-passes=print<da>" -da-enable-dependence-test=symbolic-rdiv 2>&1 \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-SYMBOLIC-RDIV
+
+; for (i = 0; i < (1LL << 62); i++) {
+; if (0 <= 2*i - 2)
+; A[2*i - 2] = 1;
+; A[i] = 2;
+; }
+;
+; FIXME: DependenceAnalysis currently detects no dependency between the two
+; stores, but it does exist. For example, each store will access A[0] when i
+; is 1 and 0 respectively.
+; The root cause is that the product of the BTC and the coefficient
+; ((1LL << 62) - 1 and 2) overflows in a signed sense.
+define void @symbolicrdiv_prod_ovfl(ptr %A) {
+; CHECK-ALL-LABEL: 'symbolicrdiv_prod_ovfl'
+; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+;
+; CHECK-SYMBOLIC-RDIV-LABEL: 'symbolicrdiv_prod_ovfl'
+; CHECK-SYMBOLIC-RDIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1
+; CHECK-SYMBOLIC-RDIV-NEXT: da analyze - none!
+; CHECK-SYMBOLIC-RDIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-SYMBOLIC-RDIV-NEXT: da analyze - none!
+; CHECK-SYMBOLIC-RDIV-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-SYMBOLIC-RDIV-NEXT: da analyze - consistent output [*]!
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %i = phi i64 [ 0, %entry ], [ %i.inc, %loop.latch ]
+ %offset = phi i64 [ -2, %entry ], [ %offset.next, %loop.latch ]
+ %ec = icmp eq i64 %i, 4611686018427387904
+ br i1 %ec, label %exit, label %loop.body
+
+loop.body:
+ %cond = icmp sge i64 %offset, 0
+ br i1 %cond, label %if.then, label %loop.latch
+
+if.then:
+ %gep.0 = getelementptr inbounds i8, ptr %A, i64 %offset
+ store i8 1, ptr %gep.0
+ br label %loop.latch
+
+loop.latch:
+ %gep.1 = getelementptr inbounds i8, ptr %A, i64 %i
+ store i8 2, ptr %gep.1
+ %i.inc = add nuw nsw i64 %i, 1
+ %offset.next = add nsw i64 %offset, 2
+ br label %loop.header
+
+exit:
+ ret void
+}
+
+; offset0 = -4611686018427387904; // -2^62
+; offset1 = 4611686018427387904; // 2^62
+; for (i = 0; i < (1LL << 62) - 100; i++) {
+; if (0 <= offset0)
+; A[offset0] = 1;
+; if (0 <= offset1)
+; A[offset1] = 2;
+; offset0 += 2;
+; offset1 -= 1;
+; }
+;
+; FIXME: DependenceAnalysis currently detects no dependency between the two
+; stores, but it does exist. For example,
+;
+; memory access | i == 2^61 | i == 2^61 + 2^59 | i == 2^61 + 2^60
+; -------------------------|-----------|------------------|-------------------
+; A[2*i - 2^62] (offset0) | | A[2^60] | A[2^61]
+; A[-i + 2^62] (offset1) | A[2^61] | | A[2^60]
+;
+; The root cause is that the calculation of the differenct between the two
+; constants (-2^62 and 2^62) overflows in a signed sense.
+define void @symbolicrdiv_delta_ovfl(ptr %A) {
+; CHECK-ALL-LABEL: 'symbolicrdiv_delta_ovfl'
+; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+;
+; CHECK-SYMBOLIC-RDIV-LABEL: 'symbolicrdiv_delta_ovfl'
+; CHECK-SYMBOLIC-RDIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1
+; CHECK-SYMBOLIC-RDIV-NEXT: da analyze - consistent output [*]!
+; CHECK-SYMBOLIC-RDIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-SYMBOLIC-RDIV-NEXT: da analyze - none!
+; CHECK-SYMBOLIC-RDIV-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-SYMBOLIC-RDIV-NEXT: da analyze - consistent output [*]!
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %i = phi i64 [ 0, %entry ], [ %i.inc, %loop.latch ]
+ %offset.0 = phi i64 [ -4611686018427387904, %entry ], [ %offset.0.next, %loop.latch ]
+ %offset.1 = phi i64 [ 4611686018427387904, %entry ], [ %offset.1.next, %loop.latch ]
+ %cond.0 = icmp sge i64 %offset.0, 0
+ %cond.1 = icmp sge i64 %offset.1, 0
+ br i1 %cond.0, label %if.then.0, label %loop.middle
+
+if.then.0:
+ %gep.0 = getelementptr inbounds i8, ptr %A, i64 %offset.0
+ store i8 1, ptr %gep.0
+ br label %loop.middle
+
+loop.middle:
+ br i1 %cond.1, label %if.then.1, label %loop.latch
+
+if.then.1:
+ %gep.1 = getelementptr inbounds i8, ptr %A, i64 %offset.1
+ store i8 2, ptr %gep.1
+ br label %loop.latch
+
+loop.latch:
+ %i.inc = add nuw nsw i64 %i, 1
+ %offset.0.next = add nsw i64 %offset.0, 2
+ %offset.1.next = sub nsw i64 %offset.1, 1
+ %ec = icmp eq i64 %i.inc, 4611686018427387804 ; 2^62 - 100
+ br i1 %ec, label %exit, label %loop.header
+
+exit:
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Analysis/DependenceAnalysis/weak-crossing-siv-overflow.ll b/llvm/test/Analysis/DependenceAnalysis/weak-crossing-siv-overflow.ll
new file mode 100644
index 0000000..ba57c7b
--- /dev/null
+++ b/llvm/test/Analysis/DependenceAnalysis/weak-crossing-siv-overflow.ll
@@ -0,0 +1,125 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -disable-output "-passes=print<da>" 2>&1 \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-ALL
+; RUN: opt < %s -disable-output "-passes=print<da>" -da-enable-dependence-test=weak-crossing-siv 2>&1 \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-WEAK-CROSSING-SIV
+
+; max_i = INT64_MAX/3 // 3074457345618258602
+; for (long long i = 0; i <= max_i; i++) {
+; A[-3*i + INT64_MAX] = 0;
+; if (i)
+; A[3*i - 2] = 1;
+; }
+;
+; FIXME: DependenceAnalysis currently detects no dependency between
+; `A[-3*i + INT64_MAX]` and `A[3*i - 2]`, but it does exist. For example,
+;
+; memory access | i == 1 | i == max_i
+; ---------------------|------------------|------------------
+; A[-3*i + INT64_MAX] | A[INT64_MAX - 3] | A[1]
+; A[3*i - 2] | A[1] | A[INT64_MAX - 3]
+;
+; The root cause is that the calculation of the differenct between the two
+; constants (INT64_MAX and -2) triggers an overflow.
+
+define void @weakcorssing_delta_ovfl(ptr %A) {
+; CHECK-ALL-LABEL: 'weakcorssing_delta_ovfl'
+; CHECK-ALL-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 0, ptr %idx.0, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 1, ptr %idx.1, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 1, ptr %idx.1, align 1 --> Dst: store i8 1, ptr %idx.1, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+;
+; CHECK-WEAK-CROSSING-SIV-LABEL: 'weakcorssing_delta_ovfl'
+; CHECK-WEAK-CROSSING-SIV-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 0, ptr %idx.0, align 1
+; CHECK-WEAK-CROSSING-SIV-NEXT: da analyze - consistent output [*]!
+; CHECK-WEAK-CROSSING-SIV-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 1, ptr %idx.1, align 1
+; CHECK-WEAK-CROSSING-SIV-NEXT: da analyze - none!
+; CHECK-WEAK-CROSSING-SIV-NEXT: Src: store i8 1, ptr %idx.1, align 1 --> Dst: store i8 1, ptr %idx.1, align 1
+; CHECK-WEAK-CROSSING-SIV-NEXT: da analyze - consistent output [*]!
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %i = phi i64 [ 0, %entry ], [ %i.inc, %loop.latch ]
+ %subscript.0 = phi i64 [ 9223372036854775807, %entry ], [ %subscript.0.next, %loop.latch ]
+ %subscript.1 = phi i64 [ -2, %entry ], [ %subscript.1.next, %loop.latch ]
+ %idx.0 = getelementptr inbounds i8, ptr %A, i64 %subscript.0
+ store i8 0, ptr %idx.0
+ %cond.store = icmp ne i64 %i, 0
+ br i1 %cond.store, label %if.store, label %loop.latch
+
+if.store:
+ %idx.1 = getelementptr inbounds i8, ptr %A, i64 %subscript.1
+ store i8 1, ptr %idx.1
+ br label %loop.latch
+
+loop.latch:
+ %i.inc = add nuw nsw i64 %i, 1
+ %subscript.0.next = add nsw i64 %subscript.0, -3
+ %subscript.1.next = add nsw i64 %subscript.1, 3
+ %ec = icmp sgt i64 %i.inc, 3074457345618258602
+ br i1 %ec, label %exit, label %loop.header
+
+exit:
+ ret void
+}
+
+; max_i = INT64_MAX/3 // 3074457345618258602
+; for (long long i = 0; i <= max_i; i++) {
+; A[-3*i + INT64_MAX] = 0;
+; A[3*i + 1] = 1;
+; }
+;
+; FIXME: DependenceAnalysis currently detects no dependency between
+; `A[-3*i + INT64_MAX]` and `A[3*i - 2]`, but it does exist. For example,
+;
+; memory access | i == 0 | i == 1 | i == max_i - 1 | i == max_i
+; ---------------------|--------|------------------|----------------|------------------
+; A[-3*i + INT64_MAX] | | A[INT64_MAX - 3] | A[1] |
+; A[3*i + 1] | A[1] | | | A[INT64_MAX - 3]
+;
+; The root cause is that the product of the BTC, the coefficient, and 2
+; triggers an overflow.
+;
+define void @weakcorssing_prod_ovfl(ptr %A) {
+; CHECK-ALL-LABEL: 'weakcorssing_prod_ovfl'
+; CHECK-ALL-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 0, ptr %idx.0, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 1, ptr %idx.1, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 1, ptr %idx.1, align 1 --> Dst: store i8 1, ptr %idx.1, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+;
+; CHECK-WEAK-CROSSING-SIV-LABEL: 'weakcorssing_prod_ovfl'
+; CHECK-WEAK-CROSSING-SIV-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 0, ptr %idx.0, align 1
+; CHECK-WEAK-CROSSING-SIV-NEXT: da analyze - consistent output [*]!
+; CHECK-WEAK-CROSSING-SIV-NEXT: Src: store i8 0, ptr %idx.0, align 1 --> Dst: store i8 1, ptr %idx.1, align 1
+; CHECK-WEAK-CROSSING-SIV-NEXT: da analyze - none!
+; CHECK-WEAK-CROSSING-SIV-NEXT: Src: store i8 1, ptr %idx.1, align 1 --> Dst: store i8 1, ptr %idx.1, align 1
+; CHECK-WEAK-CROSSING-SIV-NEXT: da analyze - consistent output [*]!
+;
+entry:
+ br label %loop
+
+loop:
+ %i = phi i64 [ 0, %entry ], [ %i.inc, %loop ]
+ %subscript.0 = phi i64 [ 9223372036854775807, %entry ], [ %subscript.0.next, %loop ]
+ %subscript.1 = phi i64 [ 1, %entry ], [ %subscript.1.next, %loop ]
+ %idx.0 = getelementptr inbounds i8, ptr %A, i64 %subscript.0
+ %idx.1 = getelementptr inbounds i8, ptr %A, i64 %subscript.1
+ store i8 0, ptr %idx.0
+ store i8 1, ptr %idx.1
+ %i.inc = add nuw nsw i64 %i, 1
+ %subscript.0.next = add nsw i64 %subscript.0, -3
+ %subscript.1.next = add nsw i64 %subscript.1, 3
+ %ec = icmp sgt i64 %i.inc, 3074457345618258602
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Analysis/DependenceAnalysis/weak-zero-siv-overflow.ll b/llvm/test/Analysis/DependenceAnalysis/weak-zero-siv-overflow.ll
new file mode 100644
index 0000000..6317c38
--- /dev/null
+++ b/llvm/test/Analysis/DependenceAnalysis/weak-zero-siv-overflow.ll
@@ -0,0 +1,122 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -disable-output "-passes=print<da>" 2>&1 \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-ALL
+; RUN: opt < %s -disable-output "-passes=print<da>" -da-enable-dependence-test=weak-zero-siv 2>&1 \
+; RUN: | FileCheck %s --check-prefixes=CHECK,CHECK-WEAK-ZERO-SIV
+
+; for (i = 0; i < (1LL << 62); i++) {
+; if (0 <= 2*i - 2)
+; A[2*i - 2] = 1;
+; A[2] = 2;
+; }
+;
+; FIXME: DependenceAnalysis currently detects no dependency between the two
+; stores, but it does exist. The root cause is that the product of the BTC and
+; the coefficient ((1LL << 62) - 1 and 2) overflows in a signed sense.
+;
+define void @weakzero_dst_siv_prod_ovfl(ptr %A) {
+; CHECK-ALL-LABEL: 'weakzero_dst_siv_prod_ovfl'
+; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-ALL-NEXT: da analyze - consistent output [S]!
+;
+; CHECK-WEAK-ZERO-SIV-LABEL: 'weakzero_dst_siv_prod_ovfl'
+; CHECK-WEAK-ZERO-SIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1
+; CHECK-WEAK-ZERO-SIV-NEXT: da analyze - consistent output [*]!
+; CHECK-WEAK-ZERO-SIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-WEAK-ZERO-SIV-NEXT: da analyze - none!
+; CHECK-WEAK-ZERO-SIV-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-WEAK-ZERO-SIV-NEXT: da analyze - consistent output [S]!
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %i = phi i64 [ 0, %entry ], [ %i.inc, %loop.latch ]
+ %offset = phi i64 [ -2, %entry ], [ %offset.next, %loop.latch ]
+ %ec = icmp eq i64 %i, 4611686018427387904
+ br i1 %ec, label %exit, label %loop.body
+
+loop.body:
+ %cond = icmp sge i64 %offset, 0
+ br i1 %cond, label %if.then, label %loop.latch
+
+if.then:
+ %gep.0 = getelementptr inbounds i8, ptr %A, i64 %offset
+ store i8 1, ptr %gep.0
+ br label %loop.latch
+
+loop.latch:
+ %gep.1 = getelementptr inbounds i8, ptr %A, i64 2
+ store i8 2, ptr %gep.1
+ %i.inc = add nuw nsw i64 %i, 1
+ %offset.next = add nsw i64 %offset, 2
+ br label %loop.header
+
+exit:
+ ret void
+}
+
+; for (i = 0; i < n; i++) {
+; if (0 <= 2*i - 1)
+; A[2*i - 1] = 1;
+; A[INT64_MAX] = 2;
+; }
+;
+; FIXME: DependenceAnalysis currently detects no dependency between the two
+; stores, but it does exist. When `%n` is 2^62, the value of `%offset` will be
+; the same as INT64_MAX at the last iteration.
+; The root cause is that the calculation of the difference between the two
+; constants (INT64_MAX and -1) overflows in a signed sense.
+;
+define void @weakzero_dst_siv_delta_ovfl(ptr %A, i64 %n) {
+; CHECK-ALL-LABEL: 'weakzero_dst_siv_delta_ovfl'
+; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-ALL-NEXT: da analyze - none!
+; CHECK-ALL-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-ALL-NEXT: da analyze - consistent output [S]!
+;
+; CHECK-WEAK-ZERO-SIV-LABEL: 'weakzero_dst_siv_delta_ovfl'
+; CHECK-WEAK-ZERO-SIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 1, ptr %gep.0, align 1
+; CHECK-WEAK-ZERO-SIV-NEXT: da analyze - consistent output [*]!
+; CHECK-WEAK-ZERO-SIV-NEXT: Src: store i8 1, ptr %gep.0, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-WEAK-ZERO-SIV-NEXT: da analyze - none!
+; CHECK-WEAK-ZERO-SIV-NEXT: Src: store i8 2, ptr %gep.1, align 1 --> Dst: store i8 2, ptr %gep.1, align 1
+; CHECK-WEAK-ZERO-SIV-NEXT: da analyze - consistent output [S]!
+;
+entry:
+ %guard = icmp sgt i64 %n, 0
+ br i1 %guard, label %loop.header, label %exit
+
+loop.header:
+ %i = phi i64 [ 0, %entry ], [ %i.inc, %loop.latch ]
+ %offset = phi i64 [ -2, %entry ], [ %offset.next, %loop.latch ]
+ %ec = icmp eq i64 %i, %n
+ br i1 %ec, label %exit, label %loop.body
+
+loop.body:
+ %cond = icmp sge i64 %offset, 0
+ br i1 %cond, label %if.then, label %loop.latch
+
+if.then:
+ %gep.0 = getelementptr inbounds i8, ptr %A, i64 %offset
+ store i8 1, ptr %gep.0
+ br label %loop.latch
+
+loop.latch:
+ %gep.1 = getelementptr inbounds i8, ptr %A, i64 9223372036854775807
+ store i8 2, ptr %gep.1
+ %i.inc = add nuw nsw i64 %i, 1
+ %offset.next = add nsw i64 %offset, 2
+ br label %loop.header
+
+exit:
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-srl-and.ll b/llvm/test/CodeGen/AArch64/arm64-srl-and.ll
index b58f6ba..330f27b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-srl-and.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-srl-and.ll
@@ -1,22 +1,38 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-linux-gnu -O3 < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -O3 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-linux-gnu -O3 -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; This used to miscompile:
; The 16-bit -1 should not become 32-bit -1 (sub w8, w8, #1).
@g = global i16 0, align 4
define i32 @srl_and() {
-; CHECK-LABEL: srl_and:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: adrp x8, :got:g
-; CHECK-NEXT: mov w9, #50
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:g]
-; CHECK-NEXT: ldrh w8, [x8]
-; CHECK-NEXT: eor w8, w8, w9
-; CHECK-NEXT: mov w9, #65535
-; CHECK-NEXT: add w8, w8, w9
-; CHECK-NEXT: and w0, w8, w8, lsr #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: srl_and:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, :got:g
+; CHECK-SD-NEXT: mov w9, #50 // =0x32
+; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:g]
+; CHECK-SD-NEXT: ldrh w8, [x8]
+; CHECK-SD-NEXT: eor w8, w8, w9
+; CHECK-SD-NEXT: mov w9, #65535 // =0xffff
+; CHECK-SD-NEXT: add w8, w8, w9
+; CHECK-SD-NEXT: and w0, w8, w8, lsr #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: srl_and:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, :got:g
+; CHECK-GI-NEXT: mov w9, #50 // =0x32
+; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:g]
+; CHECK-GI-NEXT: ldrh w8, [x8]
+; CHECK-GI-NEXT: eor w8, w8, w9
+; CHECK-GI-NEXT: mov w9, #65535 // =0xffff
+; CHECK-GI-NEXT: add w8, w9, w8, uxth
+; CHECK-GI-NEXT: and w9, w8, #0xffff
+; CHECK-GI-NEXT: cmp w8, w9
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: and w0, w9, w8
+; CHECK-GI-NEXT: ret
entry:
%0 = load i16, ptr @g, align 4
%1 = xor i16 %0, 50
@@ -29,3 +45,5 @@ entry:
ret i32 %and
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
index c3fdc7d..8438f0b0 100644
--- a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-unknown-unknown < %s | FileCheck %s --check-prefix=CHECK
+; RUN: llc -mtriple=aarch64-unknown-unknown < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-unknown-unknown -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; We are looking for the following pattern here:
; (X & (C l>> Y)) ==/!= 0
@@ -13,12 +14,21 @@
; i8 scalar
define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
-; CHECK-LABEL: scalar_i8_signbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl w8, w0, w1
-; CHECK-NEXT: tst w8, #0x80
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i8_signbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0x80
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i8_signbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #128 // =0x80
+; CHECK-GI-NEXT: and w9, w1, #0xff
+; CHECK-GI-NEXT: lsr w8, w8, w9
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i8 128, %y
%t1 = and i8 %t0, %x
%res = icmp eq i8 %t1, 0
@@ -26,12 +36,21 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
}
define i1 @scalar_i8_lowestbit_eq(i8 %x, i8 %y) nounwind {
-; CHECK-LABEL: scalar_i8_lowestbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl w8, w0, w1
-; CHECK-NEXT: tst w8, #0x1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i8_lowestbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0x1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i8_lowestbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: and w9, w1, #0xff
+; CHECK-GI-NEXT: lsr w8, w8, w9
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i8 1, %y
%t1 = and i8 %t0, %x
%res = icmp eq i8 %t1, 0
@@ -39,12 +58,21 @@ define i1 @scalar_i8_lowestbit_eq(i8 %x, i8 %y) nounwind {
}
define i1 @scalar_i8_bitsinmiddle_eq(i8 %x, i8 %y) nounwind {
-; CHECK-LABEL: scalar_i8_bitsinmiddle_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl w8, w0, w1
-; CHECK-NEXT: tst w8, #0x18
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i8_bitsinmiddle_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0x18
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i8_bitsinmiddle_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #24 // =0x18
+; CHECK-GI-NEXT: and w9, w1, #0xff
+; CHECK-GI-NEXT: lsr w8, w8, w9
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i8 24, %y
%t1 = and i8 %t0, %x
%res = icmp eq i8 %t1, 0
@@ -54,12 +82,21 @@ define i1 @scalar_i8_bitsinmiddle_eq(i8 %x, i8 %y) nounwind {
; i16 scalar
define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
-; CHECK-LABEL: scalar_i16_signbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl w8, w0, w1
-; CHECK-NEXT: tst w8, #0x8000
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i16_signbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0x8000
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i16_signbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #32768 // =0x8000
+; CHECK-GI-NEXT: and w9, w1, #0xffff
+; CHECK-GI-NEXT: lsr w8, w8, w9
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i16 32768, %y
%t1 = and i16 %t0, %x
%res = icmp eq i16 %t1, 0
@@ -67,12 +104,21 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
}
define i1 @scalar_i16_lowestbit_eq(i16 %x, i16 %y) nounwind {
-; CHECK-LABEL: scalar_i16_lowestbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl w8, w0, w1
-; CHECK-NEXT: tst w8, #0x1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i16_lowestbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0x1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i16_lowestbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: and w9, w1, #0xffff
+; CHECK-GI-NEXT: lsr w8, w8, w9
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i16 1, %y
%t1 = and i16 %t0, %x
%res = icmp eq i16 %t1, 0
@@ -80,12 +126,21 @@ define i1 @scalar_i16_lowestbit_eq(i16 %x, i16 %y) nounwind {
}
define i1 @scalar_i16_bitsinmiddle_eq(i16 %x, i16 %y) nounwind {
-; CHECK-LABEL: scalar_i16_bitsinmiddle_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl w8, w0, w1
-; CHECK-NEXT: tst w8, #0xff0
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i16_bitsinmiddle_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0xff0
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i16_bitsinmiddle_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4080 // =0xff0
+; CHECK-GI-NEXT: and w9, w1, #0xffff
+; CHECK-GI-NEXT: lsr w8, w8, w9
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i16 4080, %y
%t1 = and i16 %t0, %x
%res = icmp eq i16 %t1, 0
@@ -95,12 +150,20 @@ define i1 @scalar_i16_bitsinmiddle_eq(i16 %x, i16 %y) nounwind {
; i32 scalar
define i1 @scalar_i32_signbit_eq(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: scalar_i32_signbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl w8, w0, w1
-; CHECK-NEXT: tst w8, #0x80000000
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i32_signbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0x80000000
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i32_signbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-2147483648 // =0x80000000
+; CHECK-GI-NEXT: lsr w8, w8, w1
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i32 2147483648, %y
%t1 = and i32 %t0, %x
%res = icmp eq i32 %t1, 0
@@ -108,12 +171,20 @@ define i1 @scalar_i32_signbit_eq(i32 %x, i32 %y) nounwind {
}
define i1 @scalar_i32_lowestbit_eq(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: scalar_i32_lowestbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl w8, w0, w1
-; CHECK-NEXT: tst w8, #0x1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i32_lowestbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0x1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i32_lowestbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: lsr w8, w8, w1
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i32 1, %y
%t1 = and i32 %t0, %x
%res = icmp eq i32 %t1, 0
@@ -121,12 +192,20 @@ define i1 @scalar_i32_lowestbit_eq(i32 %x, i32 %y) nounwind {
}
define i1 @scalar_i32_bitsinmiddle_eq(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: scalar_i32_bitsinmiddle_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl w8, w0, w1
-; CHECK-NEXT: tst w8, #0xffff00
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i32_bitsinmiddle_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0xffff00
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i32_bitsinmiddle_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #16776960 // =0xffff00
+; CHECK-GI-NEXT: lsr w8, w8, w1
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i32 16776960, %y
%t1 = and i32 %t0, %x
%res = icmp eq i32 %t1, 0
@@ -136,12 +215,20 @@ define i1 @scalar_i32_bitsinmiddle_eq(i32 %x, i32 %y) nounwind {
; i64 scalar
define i1 @scalar_i64_signbit_eq(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: scalar_i64_signbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl x8, x0, x1
-; CHECK-NEXT: tst x8, #0x8000000000000000
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i64_signbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl x8, x0, x1
+; CHECK-SD-NEXT: tst x8, #0x8000000000000000
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i64_signbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000
+; CHECK-GI-NEXT: lsr x8, x8, x1
+; CHECK-GI-NEXT: tst x8, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i64 9223372036854775808, %y
%t1 = and i64 %t0, %x
%res = icmp eq i64 %t1, 0
@@ -149,12 +236,20 @@ define i1 @scalar_i64_signbit_eq(i64 %x, i64 %y) nounwind {
}
define i1 @scalar_i64_lowestbit_eq(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: scalar_i64_lowestbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl x8, x0, x1
-; CHECK-NEXT: tst x8, #0x1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i64_lowestbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl x8, x0, x1
+; CHECK-SD-NEXT: tst x8, #0x1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i64_lowestbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: lsr x8, x8, x1
+; CHECK-GI-NEXT: tst x8, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i64 1, %y
%t1 = and i64 %t0, %x
%res = icmp eq i64 %t1, 0
@@ -162,12 +257,20 @@ define i1 @scalar_i64_lowestbit_eq(i64 %x, i64 %y) nounwind {
}
define i1 @scalar_i64_bitsinmiddle_eq(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: scalar_i64_bitsinmiddle_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl x8, x0, x1
-; CHECK-NEXT: tst x8, #0xffffffff0000
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i64_bitsinmiddle_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl x8, x0, x1
+; CHECK-SD-NEXT: tst x8, #0xffffffff0000
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i64_bitsinmiddle_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #281474976645120 // =0xffffffff0000
+; CHECK-GI-NEXT: lsr x8, x8, x1
+; CHECK-GI-NEXT: tst x8, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i64 281474976645120, %y
%t1 = and i64 %t0, %x
%res = icmp eq i64 %t1, 0
@@ -179,14 +282,24 @@ define i1 @scalar_i64_bitsinmiddle_eq(i64 %x, i64 %y) nounwind {
;------------------------------------------------------------------------------;
define <4 x i1> @vec_4xi32_splat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
-; CHECK-LABEL: vec_4xi32_splat_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: ushl v0.4s, v0.4s, v1.4s
-; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT: cmeq v0.4s, v0.4s, #0
-; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vec_4xi32_splat_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v2.4s, #1
+; CHECK-SD-NEXT: ushl v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vec_4xi32_splat_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: movi v2.4s, #1
+; CHECK-GI-NEXT: neg v1.4s, v1.4s
+; CHECK-GI-NEXT: ushl v1.4s, v2.4s, v1.4s
+; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, #0
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: ret
%t0 = lshr <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
%t1 = and <4 x i32> %t0, %x
%res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
@@ -211,44 +324,86 @@ define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
}
define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
-; CHECK-LABEL: vec_4xi32_nonsplat_undef0_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: ushl v0.4s, v0.4s, v1.4s
-; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
-; CHECK-NEXT: cmeq v0.4s, v0.4s, #0
-; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vec_4xi32_nonsplat_undef0_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v2.4s, #1
+; CHECK-SD-NEXT: ushl v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT: and v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vec_4xi32_nonsplat_undef0_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: neg v1.4s, v1.4s
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: mov v2.s[1], w8
+; CHECK-GI-NEXT: mov v2.s[3], w8
+; CHECK-GI-NEXT: ushl v1.4s, v2.4s, v1.4s
+; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, #0
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: ret
%t0 = lshr <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y
%t1 = and <4 x i32> %t0, %x
%res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
ret <4 x i1> %res
}
define <4 x i1> @vec_4xi32_nonsplat_undef1_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
-; CHECK-LABEL: vec_4xi32_nonsplat_undef1_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: neg v1.4s, v1.4s
-; CHECK-NEXT: ushl v1.4s, v2.4s, v1.4s
-; CHECK-NEXT: and v0.16b, v1.16b, v0.16b
-; CHECK-NEXT: cmeq v0.4s, v0.4s, #0
-; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vec_4xi32_nonsplat_undef1_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v2.4s, #1
+; CHECK-SD-NEXT: neg v1.4s, v1.4s
+; CHECK-SD-NEXT: ushl v1.4s, v2.4s, v1.4s
+; CHECK-SD-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vec_4xi32_nonsplat_undef1_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: movi d2, #0000000000000000
+; CHECK-GI-NEXT: movi v3.4s, #1
+; CHECK-GI-NEXT: neg v1.4s, v1.4s
+; CHECK-GI-NEXT: mov v2.s[1], wzr
+; CHECK-GI-NEXT: ushl v1.4s, v3.4s, v1.4s
+; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: mov v2.s[3], wzr
+; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, v2.4s
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: ret
%t0 = lshr <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
%t1 = and <4 x i32> %t0, %x
%res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
ret <4 x i1> %res
}
define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
-; CHECK-LABEL: vec_4xi32_nonsplat_undef2_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: neg v1.4s, v1.4s
-; CHECK-NEXT: ushl v1.4s, v2.4s, v1.4s
-; CHECK-NEXT: and v0.16b, v1.16b, v0.16b
-; CHECK-NEXT: cmeq v0.4s, v0.4s, #0
-; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vec_4xi32_nonsplat_undef2_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v2.4s, #1
+; CHECK-SD-NEXT: neg v1.4s, v1.4s
+; CHECK-SD-NEXT: ushl v1.4s, v2.4s, v1.4s
+; CHECK-SD-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vec_4xi32_nonsplat_undef2_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: movi d2, #0000000000000000
+; CHECK-GI-NEXT: neg v1.4s, v1.4s
+; CHECK-GI-NEXT: fmov s3, w8
+; CHECK-GI-NEXT: mov v3.s[1], w8
+; CHECK-GI-NEXT: mov v2.s[1], wzr
+; CHECK-GI-NEXT: mov v3.s[3], w8
+; CHECK-GI-NEXT: mov v2.s[3], wzr
+; CHECK-GI-NEXT: ushl v1.4s, v3.4s, v1.4s
+; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, v2.4s
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: ret
%t0 = lshr <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y
%t1 = and <4 x i32> %t0, %x
%res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
@@ -260,11 +415,20 @@ define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwi
;------------------------------------------------------------------------------;
define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
-; CHECK-LABEL: scalar_i8_signbit_ne:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsl w8, w0, w1
-; CHECK-NEXT: ubfx w0, w8, #7, #1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i8_signbit_ne:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsl w8, w0, w1
+; CHECK-SD-NEXT: ubfx w0, w8, #7, #1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i8_signbit_ne:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #128 // =0x80
+; CHECK-GI-NEXT: and w9, w1, #0xff
+; CHECK-GI-NEXT: lsr w8, w8, w9
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
%t0 = lshr i8 128, %y
%t1 = and i8 %t0, %x
%res = icmp ne i8 %t1, 0 ; we are perfectly happy with 'ne' predicate
@@ -315,14 +479,24 @@ define i1 @scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
}
define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
-; CHECK-LABEL: scalar_i8_signbit_eq_with_nonzero:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #128 // =0x80
-; CHECK-NEXT: lsr w8, w8, w1
-; CHECK-NEXT: and w8, w8, w0
-; CHECK-NEXT: cmp w8, #1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i8_signbit_eq_with_nonzero:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mov w8, #128 // =0x80
+; CHECK-SD-NEXT: lsr w8, w8, w1
+; CHECK-SD-NEXT: and w8, w8, w0
+; CHECK-SD-NEXT: cmp w8, #1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i8_signbit_eq_with_nonzero:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #128 // =0x80
+; CHECK-GI-NEXT: and w9, w1, #0xff
+; CHECK-GI-NEXT: lsr w8, w8, w9
+; CHECK-GI-NEXT: and w8, w8, w0
+; CHECK-GI-NEXT: cmp w8, #1
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = lshr i8 128, %y
%t1 = and i8 %t0, %x
%res = icmp eq i8 %t1, 1 ; should be comparing with 0
diff --git a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
index 4a73b10..cc1bf27 100644
--- a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-unknown-unknown < %s | FileCheck %s --check-prefix=CHECK
+; RUN: llc -mtriple=aarch64-unknown-unknown < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-unknown-unknown -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; We are looking for the following pattern here:
; (X & (C << Y)) ==/!= 0
@@ -13,13 +14,23 @@
; i8 scalar
define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
-; CHECK-LABEL: scalar_i8_signbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: lsr w8, w8, w1
-; CHECK-NEXT: tst w8, #0x80
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i8_signbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xff
+; CHECK-SD-NEXT: lsr w8, w8, w1
+; CHECK-SD-NEXT: tst w8, #0x80
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i8_signbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-128 // =0xffffff80
+; CHECK-GI-NEXT: and w9, w1, #0xff
+; CHECK-GI-NEXT: lsl w8, w8, w9
+; CHECK-GI-NEXT: and w8, w8, w0
+; CHECK-GI-NEXT: tst w8, #0xff
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = shl i8 128, %y
%t1 = and i8 %t0, %x
%res = icmp eq i8 %t1, 0
@@ -27,13 +38,23 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
}
define i1 @scalar_i8_lowestbit_eq(i8 %x, i8 %y) nounwind {
-; CHECK-LABEL: scalar_i8_lowestbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: lsr w8, w8, w1
-; CHECK-NEXT: tst w8, #0x1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i8_lowestbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xff
+; CHECK-SD-NEXT: lsr w8, w8, w1
+; CHECK-SD-NEXT: tst w8, #0x1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i8_lowestbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: and w9, w1, #0xff
+; CHECK-GI-NEXT: lsl w8, w8, w9
+; CHECK-GI-NEXT: and w8, w8, w0
+; CHECK-GI-NEXT: tst w8, #0xff
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = shl i8 1, %y
%t1 = and i8 %t0, %x
%res = icmp eq i8 %t1, 0
@@ -41,13 +62,23 @@ define i1 @scalar_i8_lowestbit_eq(i8 %x, i8 %y) nounwind {
}
define i1 @scalar_i8_bitsinmiddle_eq(i8 %x, i8 %y) nounwind {
-; CHECK-LABEL: scalar_i8_bitsinmiddle_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: lsr w8, w8, w1
-; CHECK-NEXT: tst w8, #0x18
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i8_bitsinmiddle_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xff
+; CHECK-SD-NEXT: lsr w8, w8, w1
+; CHECK-SD-NEXT: tst w8, #0x18
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i8_bitsinmiddle_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #24 // =0x18
+; CHECK-GI-NEXT: and w9, w1, #0xff
+; CHECK-GI-NEXT: lsl w8, w8, w9
+; CHECK-GI-NEXT: and w8, w8, w0
+; CHECK-GI-NEXT: tst w8, #0xff
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = shl i8 24, %y
%t1 = and i8 %t0, %x
%res = icmp eq i8 %t1, 0
@@ -57,13 +88,23 @@ define i1 @scalar_i8_bitsinmiddle_eq(i8 %x, i8 %y) nounwind {
; i16 scalar
define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
-; CHECK-LABEL: scalar_i16_signbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: lsr w8, w8, w1
-; CHECK-NEXT: tst w8, #0x8000
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i16_signbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xffff
+; CHECK-SD-NEXT: lsr w8, w8, w1
+; CHECK-SD-NEXT: tst w8, #0x8000
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i16_signbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-32768 // =0xffff8000
+; CHECK-GI-NEXT: and w9, w1, #0xffff
+; CHECK-GI-NEXT: lsl w8, w8, w9
+; CHECK-GI-NEXT: and w8, w8, w0
+; CHECK-GI-NEXT: tst w8, #0xffff
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = shl i16 32768, %y
%t1 = and i16 %t0, %x
%res = icmp eq i16 %t1, 0
@@ -71,13 +112,23 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
}
define i1 @scalar_i16_lowestbit_eq(i16 %x, i16 %y) nounwind {
-; CHECK-LABEL: scalar_i16_lowestbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: lsr w8, w8, w1
-; CHECK-NEXT: tst w8, #0x1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i16_lowestbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xffff
+; CHECK-SD-NEXT: lsr w8, w8, w1
+; CHECK-SD-NEXT: tst w8, #0x1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i16_lowestbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: and w9, w1, #0xffff
+; CHECK-GI-NEXT: lsl w8, w8, w9
+; CHECK-GI-NEXT: and w8, w8, w0
+; CHECK-GI-NEXT: tst w8, #0xffff
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = shl i16 1, %y
%t1 = and i16 %t0, %x
%res = icmp eq i16 %t1, 0
@@ -85,13 +136,23 @@ define i1 @scalar_i16_lowestbit_eq(i16 %x, i16 %y) nounwind {
}
define i1 @scalar_i16_bitsinmiddle_eq(i16 %x, i16 %y) nounwind {
-; CHECK-LABEL: scalar_i16_bitsinmiddle_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: lsr w8, w8, w1
-; CHECK-NEXT: tst w8, #0xff0
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i16_bitsinmiddle_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xffff
+; CHECK-SD-NEXT: lsr w8, w8, w1
+; CHECK-SD-NEXT: tst w8, #0xff0
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i16_bitsinmiddle_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4080 // =0xff0
+; CHECK-GI-NEXT: and w9, w1, #0xffff
+; CHECK-GI-NEXT: lsl w8, w8, w9
+; CHECK-GI-NEXT: and w8, w8, w0
+; CHECK-GI-NEXT: tst w8, #0xffff
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = shl i16 4080, %y
%t1 = and i16 %t0, %x
%res = icmp eq i16 %t1, 0
@@ -101,12 +162,20 @@ define i1 @scalar_i16_bitsinmiddle_eq(i16 %x, i16 %y) nounwind {
; i32 scalar
define i1 @scalar_i32_signbit_eq(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: scalar_i32_signbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsr w8, w0, w1
-; CHECK-NEXT: tst w8, #0x80000000
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i32_signbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsr w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0x80000000
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i32_signbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-2147483648 // =0x80000000
+; CHECK-GI-NEXT: lsl w8, w8, w1
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = shl i32 2147483648, %y
%t1 = and i32 %t0, %x
%res = icmp eq i32 %t1, 0
@@ -114,12 +183,20 @@ define i1 @scalar_i32_signbit_eq(i32 %x, i32 %y) nounwind {
}
define i1 @scalar_i32_lowestbit_eq(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: scalar_i32_lowestbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsr w8, w0, w1
-; CHECK-NEXT: tst w8, #0x1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i32_lowestbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsr w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0x1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i32_lowestbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: lsl w8, w8, w1
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = shl i32 1, %y
%t1 = and i32 %t0, %x
%res = icmp eq i32 %t1, 0
@@ -127,12 +204,20 @@ define i1 @scalar_i32_lowestbit_eq(i32 %x, i32 %y) nounwind {
}
define i1 @scalar_i32_bitsinmiddle_eq(i32 %x, i32 %y) nounwind {
-; CHECK-LABEL: scalar_i32_bitsinmiddle_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsr w8, w0, w1
-; CHECK-NEXT: tst w8, #0xffff00
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i32_bitsinmiddle_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsr w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0xffff00
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i32_bitsinmiddle_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #16776960 // =0xffff00
+; CHECK-GI-NEXT: lsl w8, w8, w1
+; CHECK-GI-NEXT: tst w8, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = shl i32 16776960, %y
%t1 = and i32 %t0, %x
%res = icmp eq i32 %t1, 0
@@ -142,12 +227,20 @@ define i1 @scalar_i32_bitsinmiddle_eq(i32 %x, i32 %y) nounwind {
; i64 scalar
define i1 @scalar_i64_signbit_eq(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: scalar_i64_signbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsr x8, x0, x1
-; CHECK-NEXT: tst x8, #0x8000000000000000
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i64_signbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsr x8, x0, x1
+; CHECK-SD-NEXT: tst x8, #0x8000000000000000
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i64_signbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000
+; CHECK-GI-NEXT: lsl x8, x8, x1
+; CHECK-GI-NEXT: tst x8, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = shl i64 9223372036854775808, %y
%t1 = and i64 %t0, %x
%res = icmp eq i64 %t1, 0
@@ -155,12 +248,20 @@ define i1 @scalar_i64_signbit_eq(i64 %x, i64 %y) nounwind {
}
define i1 @scalar_i64_lowestbit_eq(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: scalar_i64_lowestbit_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsr x8, x0, x1
-; CHECK-NEXT: tst x8, #0x1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i64_lowestbit_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsr x8, x0, x1
+; CHECK-SD-NEXT: tst x8, #0x1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i64_lowestbit_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: lsl x8, x8, x1
+; CHECK-GI-NEXT: tst x8, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = shl i64 1, %y
%t1 = and i64 %t0, %x
%res = icmp eq i64 %t1, 0
@@ -168,12 +269,20 @@ define i1 @scalar_i64_lowestbit_eq(i64 %x, i64 %y) nounwind {
}
define i1 @scalar_i64_bitsinmiddle_eq(i64 %x, i64 %y) nounwind {
-; CHECK-LABEL: scalar_i64_bitsinmiddle_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: lsr x8, x0, x1
-; CHECK-NEXT: tst x8, #0xffffffff0000
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i64_bitsinmiddle_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: lsr x8, x0, x1
+; CHECK-SD-NEXT: tst x8, #0xffffffff0000
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i64_bitsinmiddle_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #281474976645120 // =0xffffffff0000
+; CHECK-GI-NEXT: lsl x8, x8, x1
+; CHECK-GI-NEXT: tst x8, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%t0 = shl i64 281474976645120, %y
%t1 = and i64 %t0, %x
%res = icmp eq i64 %t1, 0
@@ -216,42 +325,81 @@ define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
}
define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
-; CHECK-LABEL: vec_4xi32_nonsplat_undef0_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: ushl v1.4s, v2.4s, v1.4s
-; CHECK-NEXT: and v0.16b, v1.16b, v0.16b
-; CHECK-NEXT: cmeq v0.4s, v0.4s, #0
-; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vec_4xi32_nonsplat_undef0_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v2.4s, #1
+; CHECK-SD-NEXT: ushl v1.4s, v2.4s, v1.4s
+; CHECK-SD-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vec_4xi32_nonsplat_undef0_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: mov v2.s[1], w8
+; CHECK-GI-NEXT: mov v2.s[3], w8
+; CHECK-GI-NEXT: ushl v1.4s, v2.4s, v1.4s
+; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, #0
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: ret
%t0 = shl <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y
%t1 = and <4 x i32> %t0, %x
%res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 0, i32 0>
ret <4 x i1> %res
}
define <4 x i1> @vec_4xi32_nonsplat_undef1_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
-; CHECK-LABEL: vec_4xi32_nonsplat_undef1_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: ushl v1.4s, v2.4s, v1.4s
-; CHECK-NEXT: and v0.16b, v1.16b, v0.16b
-; CHECK-NEXT: cmeq v0.4s, v0.4s, #0
-; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vec_4xi32_nonsplat_undef1_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v2.4s, #1
+; CHECK-SD-NEXT: ushl v1.4s, v2.4s, v1.4s
+; CHECK-SD-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vec_4xi32_nonsplat_undef1_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: movi d3, #0000000000000000
+; CHECK-GI-NEXT: movi v2.4s, #1
+; CHECK-GI-NEXT: mov v3.s[1], wzr
+; CHECK-GI-NEXT: ushl v1.4s, v2.4s, v1.4s
+; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, v3.4s
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: ret
%t0 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y
%t1 = and <4 x i32> %t0, %x
%res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
ret <4 x i1> %res
}
define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
-; CHECK-LABEL: vec_4xi32_nonsplat_undef2_eq:
-; CHECK: // %bb.0:
-; CHECK-NEXT: movi v2.4s, #1
-; CHECK-NEXT: ushl v1.4s, v2.4s, v1.4s
-; CHECK-NEXT: and v0.16b, v1.16b, v0.16b
-; CHECK-NEXT: cmeq v0.4s, v0.4s, #0
-; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: vec_4xi32_nonsplat_undef2_eq:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v2.4s, #1
+; CHECK-SD-NEXT: ushl v1.4s, v2.4s, v1.4s
+; CHECK-SD-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-SD-NEXT: cmeq v0.4s, v0.4s, #0
+; CHECK-SD-NEXT: xtn v0.4h, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: vec_4xi32_nonsplat_undef2_eq:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #1 // =0x1
+; CHECK-GI-NEXT: movi d3, #0000000000000000
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: mov v2.s[1], w8
+; CHECK-GI-NEXT: mov v3.s[1], wzr
+; CHECK-GI-NEXT: mov v2.s[3], w8
+; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: ushl v1.4s, v2.4s, v1.4s
+; CHECK-GI-NEXT: and v0.16b, v1.16b, v0.16b
+; CHECK-GI-NEXT: cmeq v0.4s, v0.4s, v3.4s
+; CHECK-GI-NEXT: xtn v0.4h, v0.4s
+; CHECK-GI-NEXT: ret
%t0 = shl <4 x i32> <i32 1, i32 1, i32 undef, i32 1>, %y
%t1 = and <4 x i32> %t0, %x
%res = icmp eq <4 x i32> %t1, <i32 0, i32 0, i32 undef, i32 0>
@@ -263,12 +411,22 @@ define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwi
;------------------------------------------------------------------------------;
define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
-; CHECK-LABEL: scalar_i8_signbit_ne:
-; CHECK: // %bb.0:
-; CHECK-NEXT: and w8, w0, #0xff
-; CHECK-NEXT: lsr w8, w8, w1
-; CHECK-NEXT: lsr w0, w8, #7
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i8_signbit_ne:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xff
+; CHECK-SD-NEXT: lsr w8, w8, w1
+; CHECK-SD-NEXT: lsr w0, w8, #7
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i8_signbit_ne:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-128 // =0xffffff80
+; CHECK-GI-NEXT: and w9, w1, #0xff
+; CHECK-GI-NEXT: lsl w8, w8, w9
+; CHECK-GI-NEXT: and w8, w8, w0
+; CHECK-GI-NEXT: tst w8, #0xff
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
%t0 = shl i8 128, %y
%t1 = and i8 %t0, %x
%res = icmp ne i8 %t1, 0 ; we are perfectly happy with 'ne' predicate
@@ -310,13 +468,24 @@ define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
}
define i1 @scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
-; CHECK-LABEL: scalar_i8_bitsinmiddle_slt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #24 // =0x18
-; CHECK-NEXT: lsl w8, w8, w1
-; CHECK-NEXT: and w8, w8, w0
-; CHECK-NEXT: ubfx w0, w8, #7, #1
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i8_bitsinmiddle_slt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mov w8, #24 // =0x18
+; CHECK-SD-NEXT: lsl w8, w8, w1
+; CHECK-SD-NEXT: and w8, w8, w0
+; CHECK-SD-NEXT: ubfx w0, w8, #7, #1
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i8_bitsinmiddle_slt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #24 // =0x18
+; CHECK-GI-NEXT: and w9, w1, #0xff
+; CHECK-GI-NEXT: lsl w8, w8, w9
+; CHECK-GI-NEXT: and w8, w8, w0
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: cset w0, mi
+; CHECK-GI-NEXT: ret
%t0 = shl i8 24, %y
%t1 = and i8 %t0, %x
%res = icmp slt i8 %t1, 0
@@ -324,15 +493,20 @@ define i1 @scalar_i8_bitsinmiddle_slt(i8 %x, i8 %y) nounwind {
}
define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
-; CHECK-LABEL: scalar_i8_signbit_eq_with_nonzero:
-; CHECK: // %bb.0:
-; CHECK-NEXT: mov w8, #-128 // =0xffffff80
-; CHECK-NEXT: lsl w8, w8, w1
-; CHECK-NEXT: and w8, w8, w0
-; CHECK-NEXT: and w8, w8, #0x80
-; CHECK-NEXT: cmp w8, #1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: scalar_i8_signbit_eq_with_nonzero:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: mov w8, #-128 // =0xffffff80
+; CHECK-SD-NEXT: lsl w8, w8, w1
+; CHECK-SD-NEXT: and w8, w8, w0
+; CHECK-SD-NEXT: and w8, w8, #0x80
+; CHECK-SD-NEXT: cmp w8, #1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: scalar_i8_signbit_eq_with_nonzero:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: ret
%t0 = shl i8 128, %y
%t1 = and i8 %t0, %x
%res = icmp eq i8 %t1, 1 ; should be comparing with 0
diff --git a/llvm/test/CodeGen/AArch64/signbit-test.ll b/llvm/test/CodeGen/AArch64/signbit-test.ll
index c74a934..298495b 100644
--- a/llvm/test/CodeGen/AArch64/signbit-test.ll
+++ b/llvm/test/CodeGen/AArch64/signbit-test.ll
@@ -1,13 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
+; RUN: llc -mtriple=aarch64-- < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-- -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i64 @test_clear_mask_i64_i32(i64 %x) nounwind {
-; CHECK-LABEL: test_clear_mask_i64_i32:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42 // =0x2a
-; CHECK-NEXT: cmn w0, #1
-; CHECK-NEXT: csel x0, x8, x0, gt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_clear_mask_i64_i32:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: mov w8, #42 // =0x2a
+; CHECK-SD-NEXT: cmn w0, #1
+; CHECK-SD-NEXT: csel x0, x8, x0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_clear_mask_i64_i32:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: mov w8, #42 // =0x2a
+; CHECK-GI-NEXT: tst x0, #0x80000000
+; CHECK-GI-NEXT: csel x0, x8, x0, eq
+; CHECK-GI-NEXT: ret
entry:
%a = and i64 %x, 2147483648
%r = icmp eq i64 %a, 0
diff --git a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
index 7c80f93..fc01c6b 100644
--- a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
+++ b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; https://bugs.llvm.org/show_bug.cgi?id=38149
@@ -19,13 +20,22 @@
; ---------------------------------------------------------------------------- ;
define i1 @shifts_eqcmp_i16_i8(i16 %x) nounwind {
-; CHECK-LABEL: shifts_eqcmp_i16_i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: and w8, w8, #0xffff
-; CHECK-NEXT: cmp w8, w0, uxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: shifts_eqcmp_i16_i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sxtb w8, w0
+; CHECK-SD-NEXT: and w8, w8, #0xffff
+; CHECK-SD-NEXT: cmp w8, w0, uxth
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: shifts_eqcmp_i16_i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: lsl w8, w0, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: and w8, w8, #0xffff
+; CHECK-GI-NEXT: cmp w8, w0, uxth
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
%tmp0 = shl i16 %x, 8 ; 16-8
%tmp1 = ashr exact i16 %tmp0, 8 ; 16-8
%tmp2 = icmp eq i16 %tmp1, %x
@@ -97,26 +107,43 @@ define i1 @shifts_eqcmp_i64_i8(i64 %x) nounwind {
; ---------------------------------------------------------------------------- ;
define i1 @add_ugecmp_i16_i8(i16 %x) nounwind {
-; CHECK-LABEL: add_ugecmp_i16_i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w8, w8, #128
-; CHECK-NEXT: lsr w8, w8, #8
-; CHECK-NEXT: cmp w8, #254
-; CHECK-NEXT: cset w0, hi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ugecmp_i16_i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xffff
+; CHECK-SD-NEXT: sub w8, w8, #128
+; CHECK-SD-NEXT: lsr w8, w8, #8
+; CHECK-SD-NEXT: cmp w8, #254
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ugecmp_i16_i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-128 // =0xffffff80
+; CHECK-GI-NEXT: mov w9, #65280 // =0xff00
+; CHECK-GI-NEXT: add w8, w8, w0, uxth
+; CHECK-GI-NEXT: cmp w8, w9
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%tmp0 = add i16 %x, -128 ; ~0U << (8-1)
%tmp1 = icmp uge i16 %tmp0, -256 ; ~0U << 8
ret i1 %tmp1
}
define i1 @add_ugecmp_i32_i16_i8(i16 %xx) nounwind {
-; CHECK-LABEL: add_ugecmp_i32_i16_i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: cmp w8, w8, sxtb
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ugecmp_i32_i16_i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xffff
+; CHECK-SD-NEXT: cmp w8, w8, sxtb
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ugecmp_i32_i16_i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-128 // =0xffffff80
+; CHECK-GI-NEXT: add w8, w8, w0, uxth
+; CHECK-GI-NEXT: cmn w8, #256
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%x = zext i16 %xx to i32
%tmp0 = add i32 %x, -128 ; ~0U << (8-1)
%tmp1 = icmp uge i32 %tmp0, -256 ; ~0U << 8
@@ -124,55 +151,92 @@ define i1 @add_ugecmp_i32_i16_i8(i16 %xx) nounwind {
}
define i1 @add_ugecmp_i32_i16(i32 %x) nounwind {
-; CHECK-LABEL: add_ugecmp_i32_i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp w0, w0, sxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ugecmp_i32_i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w0, sxth
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ugecmp_i32_i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub w8, w0, #8, lsl #12 // =32768
+; CHECK-GI-NEXT: cmn w8, #16, lsl #12 // =65536
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%tmp0 = add i32 %x, -32768 ; ~0U << (16-1)
%tmp1 = icmp uge i32 %tmp0, -65536 ; ~0U << 16
ret i1 %tmp1
}
define i1 @add_ugecmp_i32_i8(i32 %x) nounwind {
-; CHECK-LABEL: add_ugecmp_i32_i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp w0, w0, sxtb
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ugecmp_i32_i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w0, sxtb
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ugecmp_i32_i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub w8, w0, #128
+; CHECK-GI-NEXT: cmn w8, #256
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%tmp0 = add i32 %x, -128 ; ~0U << (8-1)
%tmp1 = icmp uge i32 %tmp0, -256 ; ~0U << 8
ret i1 %tmp1
}
define i1 @add_ugecmp_i64_i32(i64 %x) nounwind {
-; CHECK-LABEL: add_ugecmp_i64_i32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp x0, w0, sxtw
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ugecmp_i64_i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp x0, w0, sxtw
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ugecmp_i64_i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-2147483648 // =0xffffffff80000000
+; CHECK-GI-NEXT: mov x9, #-4294967296 // =0xffffffff00000000
+; CHECK-GI-NEXT: add x8, x0, x8
+; CHECK-GI-NEXT: cmp x8, x9
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%tmp0 = add i64 %x, -2147483648 ; ~0U << (32-1)
%tmp1 = icmp uge i64 %tmp0, -4294967296 ; ~0U << 32
ret i1 %tmp1
}
define i1 @add_ugecmp_i64_i16(i64 %x) nounwind {
-; CHECK-LABEL: add_ugecmp_i64_i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp x0, w0, sxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ugecmp_i64_i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp x0, w0, sxth
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ugecmp_i64_i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub x8, x0, #8, lsl #12 // =32768
+; CHECK-GI-NEXT: cmn x8, #16, lsl #12 // =65536
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%tmp0 = add i64 %x, -32768 ; ~0U << (16-1)
%tmp1 = icmp uge i64 %tmp0, -65536 ; ~0U << 16
ret i1 %tmp1
}
define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
-; CHECK-LABEL: add_ugecmp_i64_i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp x0, w0, sxtb
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ugecmp_i64_i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp x0, w0, sxtb
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ugecmp_i64_i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: sub x8, x0, #128
+; CHECK-GI-NEXT: cmn x8, #256
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%tmp0 = add i64 %x, -128 ; ~0U << (8-1)
%tmp1 = icmp uge i64 %tmp0, -256 ; ~0U << 8
ret i1 %tmp1
@@ -180,14 +244,23 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
; Slightly more canonical variant
define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
-; CHECK-LABEL: add_ugtcmp_i16_i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: sub w8, w8, #128
-; CHECK-NEXT: lsr w8, w8, #8
-; CHECK-NEXT: cmp w8, #254
-; CHECK-NEXT: cset w0, hi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ugtcmp_i16_i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xffff
+; CHECK-SD-NEXT: sub w8, w8, #128
+; CHECK-SD-NEXT: lsr w8, w8, #8
+; CHECK-SD-NEXT: cmp w8, #254
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ugtcmp_i16_i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-128 // =0xffffff80
+; CHECK-GI-NEXT: mov w9, #65279 // =0xfeff
+; CHECK-GI-NEXT: add w8, w8, w0, uxth
+; CHECK-GI-NEXT: cmp w8, w9
+; CHECK-GI-NEXT: cset w0, hi
+; CHECK-GI-NEXT: ret
%tmp0 = add i16 %x, -128 ; ~0U << (8-1)
%tmp1 = icmp ugt i16 %tmp0, -257 ; ~0U << 8 - 1
ret i1 %tmp1
@@ -198,68 +271,113 @@ define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
; ---------------------------------------------------------------------------- ;
define i1 @add_ultcmp_i16_i8(i16 %x) nounwind {
-; CHECK-LABEL: add_ultcmp_i16_i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: and w8, w8, #0xffff
-; CHECK-NEXT: cmp w8, w0, uxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ultcmp_i16_i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sxtb w8, w0
+; CHECK-SD-NEXT: and w8, w8, #0xffff
+; CHECK-SD-NEXT: cmp w8, w0, uxth
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ultcmp_i16_i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, #128
+; CHECK-GI-NEXT: and w8, w8, #0xffff
+; CHECK-GI-NEXT: cmp w8, #256
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%tmp0 = add i16 %x, 128 ; 1U << (8-1)
%tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
ret i1 %tmp1
}
define i1 @add_ultcmp_i32_i16(i32 %x) nounwind {
-; CHECK-LABEL: add_ultcmp_i32_i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp w0, w0, sxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ultcmp_i32_i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w0, sxth
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ultcmp_i32_i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, #8, lsl #12 // =32768
+; CHECK-GI-NEXT: cmp w8, #16, lsl #12 // =65536
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%tmp0 = add i32 %x, 32768 ; 1U << (16-1)
%tmp1 = icmp ult i32 %tmp0, 65536 ; 1U << 16
ret i1 %tmp1
}
define i1 @add_ultcmp_i32_i8(i32 %x) nounwind {
-; CHECK-LABEL: add_ultcmp_i32_i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp w0, w0, sxtb
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ultcmp_i32_i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w0, sxtb
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ultcmp_i32_i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, #128
+; CHECK-GI-NEXT: cmp w8, #256
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%tmp0 = add i32 %x, 128 ; 1U << (8-1)
%tmp1 = icmp ult i32 %tmp0, 256 ; 1U << 8
ret i1 %tmp1
}
define i1 @add_ultcmp_i64_i32(i64 %x) nounwind {
-; CHECK-LABEL: add_ultcmp_i64_i32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp x0, w0, sxtw
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ultcmp_i64_i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp x0, w0, sxtw
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ultcmp_i64_i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-2147483648 // =0x80000000
+; CHECK-GI-NEXT: mov x9, #4294967296 // =0x100000000
+; CHECK-GI-NEXT: add x8, x0, x8
+; CHECK-GI-NEXT: cmp x8, x9
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%tmp0 = add i64 %x, 2147483648 ; 1U << (32-1)
%tmp1 = icmp ult i64 %tmp0, 4294967296 ; 1U << 32
ret i1 %tmp1
}
define i1 @add_ultcmp_i64_i16(i64 %x) nounwind {
-; CHECK-LABEL: add_ultcmp_i64_i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp x0, w0, sxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ultcmp_i64_i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp x0, w0, sxth
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ultcmp_i64_i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add x8, x0, #8, lsl #12 // =32768
+; CHECK-GI-NEXT: cmp x8, #16, lsl #12 // =65536
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%tmp0 = add i64 %x, 32768 ; 1U << (16-1)
%tmp1 = icmp ult i64 %tmp0, 65536 ; 1U << 16
ret i1 %tmp1
}
define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
-; CHECK-LABEL: add_ultcmp_i64_i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmp x0, w0, sxtb
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ultcmp_i64_i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp x0, w0, sxtb
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ultcmp_i64_i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add x8, x0, #128
+; CHECK-GI-NEXT: cmp x8, #256
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%tmp0 = add i64 %x, 128 ; 1U << (8-1)
%tmp1 = icmp ult i64 %tmp0, 256 ; 1U << 8
ret i1 %tmp1
@@ -267,13 +385,21 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
; Slightly more canonical variant
define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
-; CHECK-LABEL: add_ulecmp_i16_i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: and w8, w8, #0xffff
-; CHECK-NEXT: cmp w8, w0, uxth
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ulecmp_i16_i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sxtb w8, w0
+; CHECK-SD-NEXT: and w8, w8, #0xffff
+; CHECK-SD-NEXT: cmp w8, w0, uxth
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ulecmp_i16_i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, #128
+; CHECK-GI-NEXT: and w8, w8, #0xffff
+; CHECK-GI-NEXT: cmp w8, #255
+; CHECK-GI-NEXT: cset w0, ls
+; CHECK-GI-NEXT: ret
%tmp0 = add i16 %x, 128 ; 1U << (8-1)
%tmp1 = icmp ule i16 %tmp0, 255 ; (1U << 8) - 1
ret i1 %tmp1
@@ -284,12 +410,20 @@ define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
; Adding not a constant
define i1 @add_ultcmp_bad_i16_i8_add(i16 %x, i16 %y) nounwind {
-; CHECK-LABEL: add_ultcmp_bad_i16_i8_add:
-; CHECK: // %bb.0:
-; CHECK-NEXT: add w8, w0, w1
-; CHECK-NEXT: tst w8, #0xff00
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ultcmp_bad_i16_i8_add:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0xff00
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ultcmp_bad_i16_i8_add:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, w1
+; CHECK-GI-NEXT: and w8, w8, #0xffff
+; CHECK-GI-NEXT: cmp w8, #256
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%tmp0 = add i16 %x, %y
%tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
ret i1 %tmp1
@@ -311,12 +445,20 @@ define i1 @add_ultcmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
; Second constant is not larger than the first one
define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind {
-; CHECK-LABEL: add_ultcmp_bad_i8_i16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: and w8, w0, #0xffff
-; CHECK-NEXT: add w8, w8, #128
-; CHECK-NEXT: lsr w0, w8, #16
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ultcmp_bad_i8_i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xffff
+; CHECK-SD-NEXT: add w8, w8, #128
+; CHECK-SD-NEXT: lsr w0, w8, #16
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ultcmp_bad_i8_i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w0, #0xffff
+; CHECK-GI-NEXT: add w8, w8, #128
+; CHECK-GI-NEXT: cmp w8, w8, uxth
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
%tmp0 = add i16 %x, 128 ; 1U << (8-1)
%tmp1 = icmp ult i16 %tmp0, 128 ; 1U << (8-1)
ret i1 %tmp1
@@ -324,12 +466,20 @@ define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind {
; First constant is not power of two
define i1 @add_ultcmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
-; CHECK-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo:
-; CHECK: // %bb.0:
-; CHECK-NEXT: add w8, w0, #192
-; CHECK-NEXT: tst w8, #0xff00
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, #192
+; CHECK-SD-NEXT: tst w8, #0xff00
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, #192
+; CHECK-GI-NEXT: and w8, w8, #0xffff
+; CHECK-GI-NEXT: cmp w8, #256
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1))
%tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
ret i1 %tmp1
@@ -351,12 +501,20 @@ define i1 @add_ultcmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
; Magic check fails, 64 << 1 != 256
define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind {
-; CHECK-LABEL: add_ultcmp_bad_i16_i8_magic:
-; CHECK: // %bb.0:
-; CHECK-NEXT: add w8, w0, #64
-; CHECK-NEXT: tst w8, #0xff00
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ultcmp_bad_i16_i8_magic:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, #64
+; CHECK-SD-NEXT: tst w8, #0xff00
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ultcmp_bad_i16_i8_magic:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, #64
+; CHECK-GI-NEXT: and w8, w8, #0xffff
+; CHECK-GI-NEXT: cmp w8, #256
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%tmp0 = add i16 %x, 64 ; 1U << (8-1-1)
%tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
ret i1 %tmp1
@@ -364,12 +522,20 @@ define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind {
; Bad 'destination type'
define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind {
-; CHECK-LABEL: add_ultcmp_bad_i16_i4:
-; CHECK: // %bb.0:
-; CHECK-NEXT: add w8, w0, #8
-; CHECK-NEXT: tst w8, #0xfff0
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ultcmp_bad_i16_i4:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, #8
+; CHECK-SD-NEXT: tst w8, #0xfff0
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ultcmp_bad_i16_i4:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, #8
+; CHECK-GI-NEXT: and w8, w8, #0xffff
+; CHECK-GI-NEXT: cmp w8, #16
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%tmp0 = add i16 %x, 8 ; 1U << (4-1)
%tmp1 = icmp ult i16 %tmp0, 16 ; 1U << 4
ret i1 %tmp1
@@ -377,12 +543,20 @@ define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind {
; Bad storage type
define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind {
-; CHECK-LABEL: add_ultcmp_bad_i24_i8:
-; CHECK: // %bb.0:
-; CHECK-NEXT: add w8, w0, #128
-; CHECK-NEXT: tst w8, #0xffff00
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: add_ultcmp_bad_i24_i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, #128
+; CHECK-SD-NEXT: tst w8, #0xffff00
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: add_ultcmp_bad_i24_i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, #128
+; CHECK-GI-NEXT: and w8, w8, #0xffffff
+; CHECK-GI-NEXT: cmp w8, #256
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%tmp0 = add i24 %x, 128 ; 1U << (8-1)
%tmp1 = icmp ult i24 %tmp0, 256 ; 1U << 8
ret i1 %tmp1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll
new file mode 100644
index 0000000..e117200
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll
@@ -0,0 +1,612 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=hawaii < %s | FileCheck -check-prefix=GFX7 %s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=fiji < %s | FileCheck -check-prefix=GFX8 %s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX12 %s
+
+define i16 @s_add_i16(i16 inreg %a, i16 inreg %b) {
+; GFX7-LABEL: s_add_i16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_add_i32 s16, s16, s17
+; GFX7-NEXT: v_mov_b32_e32 v0, s16
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_add_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_add_i32 s16, s16, s17
+; GFX9-NEXT: v_mov_b32_e32 v0, s16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_add_i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_add_i32 s16, s16, s17
+; GFX8-NEXT: v_mov_b32_e32 v0, s16
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: s_add_i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_add_i32 s16, s16, s17
+; GFX10-NEXT: v_mov_b32_e32 v0, s16
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_add_i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_add_i32 s0, s0, s1
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: s_add_i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_add_co_i32 s0, s0, s1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = add i16 %a, %b
+ ret i16 %c
+}
+
+define i16 @v_add_i16(i16 %a, i16 %b) {
+; GFX7-LABEL: v_add_i16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_add_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_add_i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_add_i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_add_i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_nc_u16 v0.l, v0.l, v1.l
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_add_i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_nc_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = add i16 %a, %b
+ ret i16 %c
+}
+
+define i32 @s_add_i32(i32 inreg %a, i32 inreg %b) {
+; GFX7-LABEL: s_add_i32:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_add_i32 s16, s16, s17
+; GFX7-NEXT: v_mov_b32_e32 v0, s16
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_add_i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_add_i32 s16, s16, s17
+; GFX9-NEXT: v_mov_b32_e32 v0, s16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_add_i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_add_i32 s16, s16, s17
+; GFX8-NEXT: v_mov_b32_e32 v0, s16
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: s_add_i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_add_i32 s16, s16, s17
+; GFX10-NEXT: v_mov_b32_e32 v0, s16
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_add_i32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_add_i32 s0, s0, s1
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: s_add_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_add_co_i32 s0, s0, s1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = add i32 %a, %b
+ ret i32 %c
+}
+
+define i32 @v_add_i32(i32 %a, i32 %b) {
+; GFX7-LABEL: v_add_i32:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_add_i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_add_i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_add_i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_add_i32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_add_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = add i32 %a, %b
+ ret i32 %c
+}
+
+define <2 x i16> @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) {
+; GFX7-LABEL: s_add_v2i16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_add_i32 s16, s16, s18
+; GFX7-NEXT: s_add_i32 s17, s17, s19
+; GFX7-NEXT: v_mov_b32_e32 v0, s16
+; GFX7-NEXT: v_mov_b32_e32 v1, s17
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_add_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_lshr_b32 s4, s16, 16
+; GFX9-NEXT: s_lshr_b32 s5, s17, 16
+; GFX9-NEXT: s_add_i32 s16, s16, s17
+; GFX9-NEXT: s_add_i32 s4, s4, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s16, s4
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_add_v2i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_lshr_b32 s4, s16, 16
+; GFX8-NEXT: s_lshr_b32 s5, s17, 16
+; GFX8-NEXT: s_add_i32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s16, s16, s17
+; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
+; GFX8-NEXT: s_and_b32 s5, 0xffff, s16
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s4, s5, s4
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: s_add_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_lshr_b32 s4, s16, 16
+; GFX10-NEXT: s_lshr_b32 s5, s17, 16
+; GFX10-NEXT: s_add_i32 s16, s16, s17
+; GFX10-NEXT: s_add_i32 s4, s4, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s16, s4
+; GFX10-NEXT: v_mov_b32_e32 v0, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_add_v2i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_lshr_b32 s2, s0, 16
+; GFX11-NEXT: s_lshr_b32 s3, s1, 16
+; GFX11-NEXT: s_add_i32 s0, s0, s1
+; GFX11-NEXT: s_add_i32 s2, s2, s3
+; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: s_add_v2i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_lshr_b32 s2, s0, 16
+; GFX12-NEXT: s_lshr_b32 s3, s1, 16
+; GFX12-NEXT: s_add_co_i32 s0, s0, s1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_i32 s2, s2, s3
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = add <2 x i16> %a, %b
+ ret <2 x i16> %c
+}
+
+define <2 x i16> @v_add_v2i16(<2 x i16> %a, <2 x i16> %b) {
+; GFX7-LABEL: v_add_v2i16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_add_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_add_v2i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u16_e32 v2, v0, v1
+; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_add_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_add_v2i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_add_v2i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = add <2 x i16> %a, %b
+ ret <2 x i16> %c
+}
+
+define i64 @s_add_i64(i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_add_i64:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_add_u32 s4, s16, s18
+; GFX7-NEXT: s_addc_u32 s5, s17, s19
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_add_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_add_u32 s4, s16, s18
+; GFX9-NEXT: s_addc_u32 s5, s17, s19
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_add_i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_add_u32 s4, s16, s18
+; GFX8-NEXT: s_addc_u32 s5, s17, s19
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: v_mov_b32_e32 v1, s5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: s_add_i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_add_u32 s4, s16, s18
+; GFX10-NEXT: s_addc_u32 s5, s17, s19
+; GFX10-NEXT: v_mov_b32_e32 v0, s4
+; GFX10-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_add_i64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_add_u32 s0, s0, s2
+; GFX11-NEXT: s_addc_u32 s1, s1, s3
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: s_add_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = add i64 %a, %b
+ ret i64 %c
+}
+
+define i64 @v_add_i64(i64 %a, i64 %b) {
+; GFX7-LABEL: v_add_i64:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_add_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_add_i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_add_i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_add_i64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_add_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = add i64 %a, %b
+ ret i64 %c
+}
+
+define void @s_uaddo_uadde(i64 inreg %a, i64 inreg %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) {
+; GFX7-LABEL: s_uaddo_uadde:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_add_u32 s4, s16, s18
+; GFX7-NEXT: s_addc_u32 s5, s17, s19
+; GFX7-NEXT: v_mov_b32_e32 v4, s4
+; GFX7-NEXT: s_mov_b32 s6, 0
+; GFX7-NEXT: s_cselect_b32 s8, 1, 0
+; GFX7-NEXT: v_mov_b32_e32 v5, s5
+; GFX7-NEXT: s_mov_b32 s7, 0xf000
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT: v_mov_b32_e32 v0, s8
+; GFX7-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_uaddo_uadde:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_add_u32 s4, s16, s18
+; GFX9-NEXT: s_addc_u32 s5, s17, s19
+; GFX9-NEXT: v_mov_b32_e32 v4, s4
+; GFX9-NEXT: s_cselect_b32 s6, 1, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-NEXT: global_store_dwordx2 v[0:1], v[4:5], off
+; GFX9-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-NEXT: global_store_dword v[2:3], v0, off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_uaddo_uadde:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_add_u32 s4, s16, s18
+; GFX8-NEXT: s_addc_u32 s5, s17, s19
+; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: s_cselect_b32 s6, 1, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, s5
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NEXT: flat_store_dword v[2:3], v0
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: s_uaddo_uadde:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_add_u32 s4, s16, s18
+; GFX10-NEXT: s_addc_u32 s5, s17, s19
+; GFX10-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v4, s4
+; GFX10-NEXT: v_mov_b32_e32 v5, s5
+; GFX10-NEXT: v_mov_b32_e32 v6, s6
+; GFX10-NEXT: global_store_dwordx2 v[0:1], v[4:5], off
+; GFX10-NEXT: global_store_dword v[2:3], v6, off
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_uaddo_uadde:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_add_u32 s0, s0, s2
+; GFX11-NEXT: s_addc_u32 s1, s1, s3
+; GFX11-NEXT: s_cselect_b32 s2, 1, 0
+; GFX11-NEXT: v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0
+; GFX11-NEXT: v_mov_b32_e32 v6, s2
+; GFX11-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX11-NEXT: global_store_b32 v[2:3], v6, off
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: s_uaddo_uadde:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_add_co_u32 s0, s0, s2
+; GFX12-NEXT: s_add_co_ci_u32 s1, s1, s3
+; GFX12-NEXT: s_cselect_b32 s2, 1, 0
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0
+; GFX12-NEXT: v_mov_b32_e32 v6, s2
+; GFX12-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX12-NEXT: global_store_b32 v[2:3], v6, off
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %uaddo = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+ %add = extractvalue {i64, i1} %uaddo, 0
+ %of = extractvalue {i64, i1} %uaddo, 1
+ %of32 = select i1 %of, i32 1, i32 0
+ store i64 %add, ptr addrspace(1) %res
+ store i32 %of32, ptr addrspace(1) %carry
+ ret void
+}
+
+define void @v_uaddo_uadde(i64 %a, i64 %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) {
+; GFX7-LABEL: v_uaddo_uadde:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX7-NEXT: s_mov_b32 s6, 0
+; GFX7-NEXT: s_mov_b32 s7, 0xf000
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX7-NEXT: buffer_store_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; GFX7-NEXT: buffer_store_dword v2, v[6:7], s[4:7], 0 addr64
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddo_uadde:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: global_store_dwordx2 v[4:5], v[0:1], off
+; GFX9-NEXT: global_store_dword v[6:7], v2, off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddo_uadde:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
+; GFX8-NEXT: flat_store_dword v[6:7], v2
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddo_uadde:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX10-NEXT: global_store_dwordx2 v[4:5], v[0:1], off
+; GFX10-NEXT: global_store_dword v[6:7], v2, off
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_uaddo_uadde:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX11-NEXT: global_store_b64 v[4:5], v[0:1], off
+; GFX11-NEXT: global_store_b32 v[6:7], v2, off
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_uaddo_uadde:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX12-NEXT: global_store_b64 v[4:5], v[0:1], off
+; GFX12-NEXT: global_store_b32 v[6:7], v2, off
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %uaddo = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+ %add = extractvalue {i64, i1} %uaddo, 0
+ %of = extractvalue {i64, i1} %uaddo, 1
+ %of32 = select i1 %of, i32 1, i32 0
+ store i64 %add, ptr addrspace(1) %res
+ store i32 %of32, ptr addrspace(1) %carry
+ ret void
+}
+
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.ll
new file mode 100644
index 0000000..1a7ccf0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+
+define amdgpu_kernel void @fcmp_uniform_select(float %a, i32 %b, i32 %c, ptr addrspace(1) %out) {
+; GFX7-LABEL: fcmp_uniform_select:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x9
+; GFX7-NEXT: s_load_dword s3, s[4:5], 0xb
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_f32_e64 s[4:5], s6, 0
+; GFX7-NEXT: s_or_b64 s[4:5], s[4:5], s[4:5]
+; GFX7-NEXT: s_cselect_b32 s4, 1, 0
+; GFX7-NEXT: s_and_b32 s4, s4, 1
+; GFX7-NEXT: s_cmp_lg_u32 s4, 0
+; GFX7-NEXT: s_cselect_b32 s3, s7, s3
+; GFX7-NEXT: v_mov_b32_e32 v0, s3
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: fcmp_uniform_select:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX8-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x34
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_cmp_eq_f32_e64 s[4:5], s0, 0
+; GFX8-NEXT: s_cmp_lg_u64 s[4:5], 0
+; GFX8-NEXT: s_cselect_b32 s0, 1, 0
+; GFX8-NEXT: s_and_b32 s0, s0, 1
+; GFX8-NEXT: s_cmp_lg_u32 s0, 0
+; GFX8-NEXT: s_cselect_b32 s0, s1, s6
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dword v[0:1], v2
+; GFX8-NEXT: s_endpgm
+;
+; GFX11-LABEL: fcmp_uniform_select:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_clause 0x2
+; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x2c
+; GFX11-NEXT: s_load_b64 s[2:3], s[4:5], 0x34
+; GFX11-NEXT: v_mov_b32_e32 v1, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_f32_e64 s0, s0, 0
+; GFX11-NEXT: s_cmp_lg_u32 s0, 0
+; GFX11-NEXT: s_cselect_b32 s0, 1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_b32 s0, s0, 1
+; GFX11-NEXT: s_cmp_lg_u32 s0, 0
+; GFX11-NEXT: s_cselect_b32 s0, s1, s6
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: global_store_b32 v1, v0, s[2:3]
+; GFX11-NEXT: s_endpgm
+ %cmp = fcmp oeq float %a, 0.0
+ %sel = select i1 %cmp, i32 %b, i32 %c
+ store i32 %sel, ptr addrspace(1) %out
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.mir
new file mode 100644
index 0000000..67cc016
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.mir
@@ -0,0 +1,37 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn -mcpu=gfx700 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck -check-prefixes=GFX7 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx803 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck -check-prefixes=GF8 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck -check-prefixes=GFX11 %s
+
+---
+name: test_copy_scc_vcc
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; GFX7-LABEL: name: test_copy_scc_vcc
+ ; GFX7: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GFX7-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[DEF]], [[DEF]], implicit-def $scc
+ ; GFX7-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $scc
+ ; GFX7-NEXT: $sgpr0 = COPY [[COPY]]
+ ; GFX7-NEXT: S_ENDPGM 0, implicit $sgpr0
+ ;
+ ; GF8-LABEL: name: test_copy_scc_vcc
+ ; GF8: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; GF8-NEXT: S_CMP_LG_U64 [[DEF]], 0, implicit-def $scc
+ ; GF8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $scc
+ ; GF8-NEXT: $sgpr0 = COPY [[COPY]]
+ ; GF8-NEXT: S_ENDPGM 0, implicit $sgpr0
+ ;
+ ; GFX11-LABEL: name: test_copy_scc_vcc
+ ; GFX11: [[DEF:%[0-9]+]]:sreg_32_xm0_xexec = IMPLICIT_DEF
+ ; GFX11-NEXT: S_CMP_LG_U32 [[DEF]], 0, implicit-def $scc
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $scc
+ ; GFX11-NEXT: $sgpr0 = COPY [[COPY]]
+ ; GFX11-NEXT: S_ENDPGM 0, implicit $sgpr0
+ %0:vcc(s1) = G_IMPLICIT_DEF
+ %1:sgpr(s32) = G_AMDGPU_COPY_SCC_VCC %0
+ $sgpr0 = COPY %1
+ S_ENDPGM 0, implicit $sgpr0
+...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
index 7714c03..d3e2118 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
@@ -113,9 +113,9 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_and_b32 s0, 1, s0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
-; CHECK-NEXT: s_cmp_eq_u32 s0, 0
+; CHECK-NEXT: s_xor_b32 s0, s0, 1
+; CHECK-NEXT: s_and_b32 s0, s0, 1
+; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB8_2
; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
@@ -161,16 +161,17 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_and_b32 s0, 1, s0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
+; CHECK-NEXT: s_xor_b32 s0, s0, 1
+; CHECK-NEXT: s_xor_b32 s0, s0, 1
+; CHECK-NEXT: s_and_b32 s0, s0, 1
; CHECK-NEXT: s_cmp_lg_u32 s0, 0
-; CHECK-NEXT: s_cbranch_scc0 .LBB10_2
-; CHECK-NEXT: ; %bb.1: ; %false
-; CHECK-NEXT: s_mov_b32 s0, 33
-; CHECK-NEXT: s_branch .LBB10_3
-; CHECK-NEXT: .LBB10_2: ; %true
+; CHECK-NEXT: s_cbranch_scc1 .LBB10_2
+; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB10_3
+; CHECK-NEXT: .LBB10_2: ; %false
+; CHECK-NEXT: s_mov_b32 s0, 33
+; CHECK-NEXT: s_branch .LBB10_3
; CHECK-NEXT: .LBB10_3:
%c = trunc i32 %v to i1
%ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c)
@@ -208,11 +209,7 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_cmp_lt_u32 s0, 12
-; CHECK-NEXT: s_cselect_b32 s0, 1, 0
-; CHECK-NEXT: s_and_b32 s0, 1, s0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
-; CHECK-NEXT: s_cmp_eq_u32 s0, 0
+; CHECK-NEXT: s_cmp_ge_u32 s0, 12
; CHECK-NEXT: s_cbranch_scc1 .LBB12_2
; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
@@ -258,17 +255,13 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_cmp_lt_u32 s0, 12
-; CHECK-NEXT: s_cselect_b32 s0, 1, 0
-; CHECK-NEXT: s_and_b32 s0, 1, s0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
-; CHECK-NEXT: s_cbranch_scc0 .LBB14_2
-; CHECK-NEXT: ; %bb.1: ; %false
-; CHECK-NEXT: s_mov_b32 s0, 33
-; CHECK-NEXT: s_branch .LBB14_3
-; CHECK-NEXT: .LBB14_2: ; %true
+; CHECK-NEXT: s_cbranch_scc1 .LBB14_2
+; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB14_3
+; CHECK-NEXT: .LBB14_2: ; %false
+; CHECK-NEXT: s_mov_b32 s0, 33
+; CHECK-NEXT: s_branch .LBB14_3
; CHECK-NEXT: .LBB14_3:
%c = icmp ult i32 %v, 12
%ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c)
@@ -310,14 +303,12 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_and:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_cmp_lt_u32 s0, 12
+; CHECK-NEXT: s_cmp_ge_u32 s0, 12
; CHECK-NEXT: s_cselect_b32 s0, 1, 0
-; CHECK-NEXT: s_cmp_gt_u32 s1, 34
+; CHECK-NEXT: s_cmp_le_u32 s1, 34
; CHECK-NEXT: s_cselect_b32 s1, 1, 0
-; CHECK-NEXT: s_and_b32 s0, s0, s1
-; CHECK-NEXT: s_and_b32 s0, 1, s0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
-; CHECK-NEXT: s_cmp_eq_u32 s0, 0
+; CHECK-NEXT: s_or_b32 s0, s0, s1
+; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB16_2
; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
@@ -372,16 +363,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg
; CHECK-NEXT: s_cmp_gt_u32 s1, 34
; CHECK-NEXT: s_cselect_b32 s1, 1, 0
; CHECK-NEXT: s_and_b32 s0, s0, s1
-; CHECK-NEXT: s_and_b32 s0, 1, s0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
; CHECK-NEXT: s_cmp_lg_u32 s0, 0
-; CHECK-NEXT: s_cbranch_scc0 .LBB18_2
-; CHECK-NEXT: ; %bb.1: ; %false
-; CHECK-NEXT: s_mov_b32 s0, 33
-; CHECK-NEXT: s_branch .LBB18_3
-; CHECK-NEXT: .LBB18_2: ; %true
+; CHECK-NEXT: s_cbranch_scc1 .LBB18_2
+; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB18_3
+; CHECK-NEXT: .LBB18_2: ; %false
+; CHECK-NEXT: s_mov_b32 s0, 33
+; CHECK-NEXT: s_branch .LBB18_3
; CHECK-NEXT: .LBB18_3:
%v1c = icmp ult i32 %v1, 12
%v2c = icmp ugt i32 %v2, 34
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
index 7b81669..250fbc7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
@@ -116,9 +116,9 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_and_b32 s0, 1, s0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
-; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0
+; CHECK-NEXT: s_xor_b32 s0, s0, 1
+; CHECK-NEXT: s_and_b32 s0, s0, 1
+; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB8_2
; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
@@ -164,16 +164,17 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_and_b32 s0, 1, s0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
-; CHECK-NEXT: s_cbranch_scc0 .LBB10_2
-; CHECK-NEXT: ; %bb.1: ; %false
-; CHECK-NEXT: s_mov_b32 s0, 33
-; CHECK-NEXT: s_branch .LBB10_3
-; CHECK-NEXT: .LBB10_2: ; %true
+; CHECK-NEXT: s_xor_b32 s0, s0, 1
+; CHECK-NEXT: s_xor_b32 s0, s0, 1
+; CHECK-NEXT: s_and_b32 s0, s0, 1
+; CHECK-NEXT: s_cmp_lg_u32 s0, 0
+; CHECK-NEXT: s_cbranch_scc1 .LBB10_2
+; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB10_3
+; CHECK-NEXT: .LBB10_2: ; %false
+; CHECK-NEXT: s_mov_b32 s0, 33
+; CHECK-NEXT: s_branch .LBB10_3
; CHECK-NEXT: .LBB10_3:
%c = trunc i32 %v to i1
%ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
@@ -211,11 +212,7 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_cmp_lt_u32 s0, 12
-; CHECK-NEXT: s_cselect_b32 s0, 1, 0
-; CHECK-NEXT: s_and_b32 s0, 1, s0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
-; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0
+; CHECK-NEXT: s_cmp_ge_u32 s0, 12
; CHECK-NEXT: s_cbranch_scc1 .LBB12_2
; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
@@ -261,17 +258,13 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_cmp_lt_u32 s0, 12
-; CHECK-NEXT: s_cselect_b32 s0, 1, 0
-; CHECK-NEXT: s_and_b32 s0, 1, s0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
-; CHECK-NEXT: s_cbranch_scc0 .LBB14_2
-; CHECK-NEXT: ; %bb.1: ; %false
-; CHECK-NEXT: s_mov_b32 s0, 33
-; CHECK-NEXT: s_branch .LBB14_3
-; CHECK-NEXT: .LBB14_2: ; %true
+; CHECK-NEXT: s_cbranch_scc1 .LBB14_2
+; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB14_3
+; CHECK-NEXT: .LBB14_2: ; %false
+; CHECK-NEXT: s_mov_b32 s0, 33
+; CHECK-NEXT: s_branch .LBB14_3
; CHECK-NEXT: .LBB14_3:
%c = icmp ult i32 %v, 12
%ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
@@ -313,14 +306,12 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_and:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_cmp_lt_u32 s0, 12
+; CHECK-NEXT: s_cmp_ge_u32 s0, 12
; CHECK-NEXT: s_cselect_b32 s0, 1, 0
-; CHECK-NEXT: s_cmp_gt_u32 s1, 34
+; CHECK-NEXT: s_cmp_le_u32 s1, 34
; CHECK-NEXT: s_cselect_b32 s1, 1, 0
-; CHECK-NEXT: s_and_b32 s0, s0, s1
-; CHECK-NEXT: s_and_b32 s0, 1, s0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
-; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0
+; CHECK-NEXT: s_or_b32 s0, s0, s1
+; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB16_2
; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
@@ -375,16 +366,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg
; CHECK-NEXT: s_cmp_gt_u32 s1, 34
; CHECK-NEXT: s_cselect_b32 s1, 1, 0
; CHECK-NEXT: s_and_b32 s0, s0, s1
-; CHECK-NEXT: s_and_b32 s0, 1, s0
-; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
-; CHECK-NEXT: s_cbranch_scc0 .LBB18_2
-; CHECK-NEXT: ; %bb.1: ; %false
-; CHECK-NEXT: s_mov_b32 s0, 33
-; CHECK-NEXT: s_branch .LBB18_3
-; CHECK-NEXT: .LBB18_2: ; %true
+; CHECK-NEXT: s_cmp_lg_u32 s0, 0
+; CHECK-NEXT: s_cbranch_scc1 .LBB18_2
+; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB18_3
+; CHECK-NEXT: .LBB18_2: ; %false
+; CHECK-NEXT: s_mov_b32 s0, 33
+; CHECK-NEXT: s_branch .LBB18_3
; CHECK-NEXT: .LBB18_3:
%v1c = icmp ult i32 %v1, 12
%v2c = icmp ugt i32 %v2, 34
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.mir
new file mode 100644
index 0000000..097372a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.mir
@@ -0,0 +1,524 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - | FileCheck %s
+---
+name: add_s16_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1
+ ; CHECK-LABEL: name: add_s16_ss
+ ; CHECK: liveins: $sgpr0, $sgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC]](s16)
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC1]](s16)
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[ANYEXT]], [[ANYEXT1]]
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[ADD]](s32)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC2]], [[TRUNC2]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s16) = G_TRUNC %0
+ %3:_(s16) = G_TRUNC %1
+ %4:_(s16) = G_ADD %2, %3
+ %5:_(s16) = G_AND %4, %4
+...
+
+---
+name: add_s16_sv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $vgpr0
+ ; CHECK-LABEL: name: add_s16_sv
+ ; CHECK: liveins: $sgpr0, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC]](s16)
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[COPY2]], [[TRUNC1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $vgpr0
+ %2:_(s16) = G_TRUNC %0
+ %3:_(s16) = G_TRUNC %1
+ %4:_(s16) = G_ADD %2, %3
+ %5:_(s16) = G_AND %4, %4
+...
+
+---
+name: add_s16_vs
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $vgpr0
+ ; CHECK-LABEL: name: add_s16_vs
+ ; CHECK: liveins: $sgpr0, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC1]](s16)
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[TRUNC]], [[COPY2]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $sgpr0
+ %2:_(s16) = G_TRUNC %0
+ %3:_(s16) = G_TRUNC %1
+ %4:_(s16) = G_ADD %2, %3
+ %5:_(s16) = G_AND %4, %4
+...
+
+---
+name: add_s16_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; CHECK-LABEL: name: add_s16_vv
+ ; CHECK: liveins: $vgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s16) = G_TRUNC %0
+ %3:_(s16) = G_TRUNC %1
+ %4:_(s16) = G_ADD %2, %3
+ %5:_(s16) = G_AND %4, %4
+...
+
+---
+name: add_s32_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1
+ ; CHECK-LABEL: name: add_s32_ss
+ ; CHECK: liveins: $sgpr0, $sgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[ADD]], [[ADD]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32) = G_ADD %0, %1
+ %3:_(s32) = G_AND %2, %2
+...
+
+---
+name: add_s32_sv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $vgpr0
+ ; CHECK-LABEL: name: add_s32_sv
+ ; CHECK: liveins: $sgpr0, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY2]], [[COPY1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[ADD]], [[ADD]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $vgpr0
+ %2:_(s32) = G_ADD %0, %1
+ %3:_(s32) = G_AND %2, %2
+...
+
+---
+name: add_s32_vs
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $vgpr0
+ ; CHECK-LABEL: name: add_s32_vs
+ ; CHECK: liveins: $sgpr0, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[ADD]], [[ADD]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $sgpr0
+ %2:_(s32) = G_ADD %0, %1
+ %3:_(s32) = G_AND %2, %2
+...
+
+---
+name: add_s32_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; CHECK-LABEL: name: add_s32_vv
+ ; CHECK: liveins: $vgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[ADD]], [[ADD]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = G_ADD %0, %1
+ %3:_(s32) = G_AND %2, %2
+...
+
+---
+name: add_s64_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
+ ; CHECK-LABEL: name: add_s64_ss
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr2_sgpr3
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s64) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 255
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s64) = G_AND [[ADD]], [[ADD]]
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = COPY $sgpr2_sgpr3
+ %2:_(s64) = G_ADD %0, %1
+ %3:_(s64) = G_CONSTANT i64 255
+ %4:_(s64) = G_AND %2, %2
+...
+
+---
+name: add_s64_sv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $vgpr0_vgpr1
+ ; CHECK-LABEL: name: add_s64_sv
+ ; CHECK: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY]](s64)
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s64) = G_ADD [[COPY2]], [[COPY1]]
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]]
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = COPY $vgpr0_vgpr1
+ %2:_(s64) = G_ADD %0, %1
+ %3:_(s64) = G_AND %2, %2
+...
+
+---
+name: add_s64_vs
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $vgpr0_vgpr1
+ ; CHECK-LABEL: name: add_s64_vs
+ ; CHECK: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s64) = G_ADD [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]]
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ %0:_(s64) = COPY $vgpr0_vgpr1
+ %1:_(s64) = COPY $sgpr0_sgpr1
+ %2:_(s64) = G_ADD %0, %1
+ %3:_(s64) = G_AND %2, %2
+...
+
+---
+name: add_s64_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; CHECK-LABEL: name: add_s64_vv
+ ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s64) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]]
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ %0:_(s64) = COPY $vgpr0_vgpr1
+ %1:_(s64) = COPY $vgpr2_vgpr3
+ %2:_(s64) = G_ADD %0, %1
+ %3:_(s64) = G_AND %2, %2
+...
+
+---
+name: uaddo_s32_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1
+ ; CHECK-LABEL: name: uaddo_s32_ss
+ ; CHECK: liveins: $sgpr0, $sgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[UADDO:%[0-9]+]]:sgpr(s32), [[UADDO1:%[0-9]+]]:sgpr(s32) = G_UADDO [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[UADDO1]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s32) = G_AND [[SELECT]], [[UADDO]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32), %3:_(s1) = G_UADDO %0, %1
+ %4:_(s32) = G_ZEXT %3
+ %5:_(s32) = G_AND %4, %2
+...
+
+---
+name: uaddo_s32_sv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $vgpr1
+ ; CHECK-LABEL: name: uaddo_s32_sv
+ ; CHECK: liveins: $sgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+ ; CHECK-NEXT: [[UADDO:%[0-9]+]]:vgpr(s32), [[UADDO1:%[0-9]+]]:vcc(s1) = G_UADDO [[COPY2]], [[COPY1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDO1]](s1), [[C]], [[C1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDO]], [[SELECT]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32), %3:_(s1) = G_UADDO %0, %1
+ %4:_(s32) = G_ZEXT %3
+ %5:_(s32) = G_AND %2, %4
+...
+
+---
+name: uaddo_s32_vs
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $sgpr1
+ ; CHECK-LABEL: name: uaddo_s32_vs
+ ; CHECK: liveins: $vgpr0, $sgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: [[UADDO:%[0-9]+]]:vgpr(s32), [[UADDO1:%[0-9]+]]:vcc(s1) = G_UADDO [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDO1]](s1), [[C]], [[C1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDO]], [[SELECT]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32), %3:_(s1) = G_UADDO %0, %1
+ %4:_(s32) = G_ZEXT %3
+ %5:_(s32) = G_AND %2, %4
+...
+
+---
+name: uaddo_s32_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; CHECK-LABEL: name: uaddo_s32_vv
+ ; CHECK: liveins: $vgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+ ; CHECK-NEXT: [[UADDO:%[0-9]+]]:vgpr(s32), [[UADDO1:%[0-9]+]]:vcc(s1) = G_UADDO [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDO1]](s1), [[C]], [[C1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDO]], [[SELECT]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32), %3:_(s1) = G_UADDO %0, %1
+ %4:_(s32) = G_ZEXT %3
+ %5:_(s32) = G_AND %2, %4
+...
+
+---
+name: uadde_s32_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: uadde_s32_ss
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY2]], [[C]]
+ ; CHECK-NEXT: [[UADDE:%[0-9]+]]:sgpr(s32), [[UADDE1:%[0-9]+]]:sgpr(s32) = G_UADDE [[COPY]], [[COPY1]], [[AND]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s32) = G_AND [[UADDE1]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND1]](s32), [[C]], [[C1]]
+ ; CHECK-NEXT: [[AND2:%[0-9]+]]:sgpr(s32) = G_AND [[UADDE]], [[SELECT]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32) = COPY $sgpr2
+ %3:_(s1) = G_TRUNC %2
+ %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3
+ %6:_(s32) = G_ZEXT %5
+ %7:_(s32) = G_AND %4, %6
+...
+
+---
+name: uadde_s32_sv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $vgpr1, $sgpr2
+ ; CHECK-LABEL: name: uadde_s32_sv
+ ; CHECK: liveins: $sgpr0, $vgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+ ; CHECK-NEXT: [[AMDGPU_COPY_VCC_SCC:%[0-9]+]]:vcc(s1) = G_AMDGPU_COPY_VCC_SCC [[COPY2]](s32)
+ ; CHECK-NEXT: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY3]], [[COPY1]], [[AMDGPU_COPY_VCC_SCC]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDE1]](s1), [[C]], [[C1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDE]], [[SELECT]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = COPY $sgpr2
+ %3:_(s1) = G_TRUNC %2
+ %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3
+ %6:_(s32) = G_ZEXT %5
+ %7:_(s32) = G_AND %4, %6
+...
+
+---
+name: uadde_s32_vs
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: uadde_s32_vs
+ ; CHECK: liveins: $vgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: [[AMDGPU_COPY_VCC_SCC:%[0-9]+]]:vcc(s1) = G_AMDGPU_COPY_VCC_SCC [[COPY2]](s32)
+ ; CHECK-NEXT: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY]], [[COPY3]], [[AMDGPU_COPY_VCC_SCC]]
+ ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDE1]](s1), [[C]], [[C1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDE]], [[SELECT]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32) = COPY $sgpr2
+ %3:_(s1) = G_TRUNC %2
+ %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3
+ %6:_(s32) = G_ZEXT %5
+ %7:_(s32) = G_AND %4, %6
+...
+
+---
+name: uadde_s32_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: uadde_s32_vv
+ ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
+ ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[COPY2]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[AND]](s32), [[C1]]
+ ; CHECK-NEXT: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY]], [[COPY1]], [[ICMP]]
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDE1]](s1), [[C]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UADDE]], [[SELECT]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = COPY $vgpr2
+ %3:_(s1) = G_TRUNC %2
+ %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3
+ %6:_(s32) = G_ZEXT %5
+ %7:_(s32) = G_AND %4, %6
+...
+
+---
+name: uadde_s32_ss_scc_use
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: uadde_s32_ss_scc_use
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY2]], [[C]]
+ ; CHECK-NEXT: [[UADDE:%[0-9]+]]:sgpr(s32), [[UADDE1:%[0-9]+]]:sgpr(s32) = G_UADDE [[COPY]], [[COPY1]], [[AND]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s32) = G_AND [[UADDE1]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND1]](s32), [[C]], [[C1]]
+ ; CHECK-NEXT: [[AND2:%[0-9]+]]:sgpr(s32) = G_AND [[UADDE]], [[SELECT]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32) = COPY $sgpr2
+ %3:_(s1) = G_TRUNC %2
+ %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3
+ %6:_(s32) = G_ZEXT %5
+ %8:_(s32) = G_AND %4, %6
+...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir
index 54ee69f..30c958f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir
@@ -1,6 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
-# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - | FileCheck %s
---
name: add_s16_ss
legalized: true
@@ -19,13 +18,13 @@ body: |
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC1]](s16)
; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[ANYEXT]], [[ANYEXT1]]
; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[ADD]](s32)
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[TRUNC2]](s16)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC2]], [[TRUNC2]]
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $sgpr1
%2:_(s16) = G_TRUNC %0
%3:_(s16) = G_TRUNC %1
%4:_(s16) = G_ADD %2, %3
- S_ENDPGM 0, implicit %4
+ %5:_(s16) = G_AND %4, %4
...
---
@@ -44,13 +43,13 @@ body: |
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC]](s16)
; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[COPY2]], [[TRUNC1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](s16)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]]
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $vgpr0
%2:_(s16) = G_TRUNC %0
%3:_(s16) = G_TRUNC %1
%4:_(s16) = G_ADD %2, %3
- S_ENDPGM 0, implicit %4
+ %5:_(s16) = G_AND %4, %4
...
---
@@ -69,13 +68,13 @@ body: |
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC1]](s16)
; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[TRUNC]], [[COPY2]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](s16)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]]
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $sgpr0
%2:_(s16) = G_TRUNC %0
%3:_(s16) = G_TRUNC %1
%4:_(s16) = G_ADD %2, %3
- S_ENDPGM 0, implicit %4
+ %5:_(s16) = G_AND %4, %4
...
---
@@ -93,11 +92,11 @@ body: |
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)
; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[TRUNC]], [[TRUNC1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](s16)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]]
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
%3:_(s16) = G_TRUNC %1
%4:_(s16) = G_ADD %2, %3
- S_ENDPGM 0, implicit %4
+ %5:_(s16) = G_AND %4, %4
...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir
index 97018fa..01eb391 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir
@@ -1,6 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
-# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - | FileCheck %s
---
name: add_v2s16_ss
@@ -18,16 +17,19 @@ body: |
; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
; CHECK-NEXT: [[LSHR:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST]], [[C]](s32)
; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY1]](<2 x s16>)
- ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
- ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C1]](s32)
+ ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[BITCAST]], [[BITCAST1]]
; CHECK-NEXT: [[ADD1:%[0-9]+]]:sgpr(s32) = G_ADD [[LSHR]], [[LSHR1]]
; CHECK-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ADD]](s32), [[ADD1]](s32)
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[BUILD_VECTOR_TRUNC]](<2 x s16>)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C1]](s16)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(<2 x s16>) = G_AND [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR]]
%0:_(<2 x s16>) = COPY $sgpr0
%1:_(<2 x s16>) = COPY $sgpr1
%2:_(<2 x s16>) = G_ADD %0, %1
- S_ENDPGM 0, implicit %2
+ %3:_(s16) = G_CONSTANT i16 255
+ %4:_(<2 x s16>) = G_BUILD_VECTOR %3, %3
+ %5:_(<2 x s16>) = G_AND %2, %4
...
---
@@ -44,11 +46,11 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY]](<2 x s16>)
; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(<2 x s16>) = G_ADD [[COPY2]], [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](<2 x s16>)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[ADD]], [[ADD]]
%0:_(<2 x s16>) = COPY $sgpr0
%1:_(<2 x s16>) = COPY $vgpr0
%2:_(<2 x s16>) = G_ADD %0, %1
- S_ENDPGM 0, implicit %2
+ %3:_(<2 x s16>) = G_AND %2, %2
...
---
@@ -65,9 +67,11 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY1]](<2 x s16>)
; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(<2 x s16>) = G_ADD [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[ADD]], [[ADD]]
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $sgpr0
%2:_(<2 x s16>) = G_ADD %0, %1
+ %3:_(<2 x s16>) = G_AND %2, %2
...
---
@@ -83,9 +87,9 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr1
; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(<2 x s16>) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](<2 x s16>)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[ADD]], [[ADD]]
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
%2:_(<2 x s16>) = G_ADD %0, %1
- S_ENDPGM 0, implicit %2
+ %3:_(<2 x s16>) = G_AND %2, %2
...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir
index 7378c93..e0e783e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir
@@ -77,10 +77,14 @@ body: |
; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C1]], [[C2]]
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SELECT]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC]], [[C3]]
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $sgpr1
%2:_(s1) = G_ICMP intpred(eq), %0, %1
%3:_(s16) = G_SEXT %2
+ %4:_(s16) = G_CONSTANT i16 255
+ %5:_(s16) = G_AND %3, %4
...
---
@@ -215,9 +219,13 @@ body: |
; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C1]], [[C2]]
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SELECT]](s32)
+ ; CHECK-NEXT: [[C3:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC]], [[C3]]
%0:_(s32) = COPY $sgpr0
%1:_(s1) = G_TRUNC %0
%2:_(s16) = G_SEXT %1
+ %3:_(s16) = G_CONSTANT i16 255
+ %4:_(s16) = G_AND %2, %3
...
---
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir
index b0199d3..e3c01c0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir
@@ -1,5 +1,107 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass="amdgpu-regbankselect,amdgpu-regbanklegalize" %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s
+
+---
+name: sub_s16_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1
+ ; CHECK-LABEL: name: sub_s16_ss
+ ; CHECK: liveins: $sgpr0, $sgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC]](s16)
+ ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC1]](s16)
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:sgpr(s32) = G_SUB [[ANYEXT]], [[ANYEXT1]]
+ ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SUB]](s32)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC2]], [[TRUNC2]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s16) = G_TRUNC %0
+ %3:_(s16) = G_TRUNC %1
+ %4:_(s16) = G_SUB %2, %3
+ %6:_(s16) = G_AND %4, %4
+...
+
+---
+name: sub_s16_sv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $vgpr0
+ ; CHECK-LABEL: name: sub_s16_sv
+ ; CHECK: liveins: $sgpr0, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC]](s16)
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s16) = G_SUB [[COPY2]], [[TRUNC1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[SUB]], [[SUB]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $vgpr0
+ %2:_(s16) = G_TRUNC %0
+ %3:_(s16) = G_TRUNC %1
+ %4:_(s16) = G_SUB %2, %3
+ %6:_(s16) = G_AND %4, %4
+...
+
+---
+name: sub_s16_vs
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $vgpr0
+ ; CHECK-LABEL: name: sub_s16_vs
+ ; CHECK: liveins: $sgpr0, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC1]](s16)
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s16) = G_SUB [[TRUNC]], [[COPY2]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[SUB]], [[SUB]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $sgpr0
+ %2:_(s16) = G_TRUNC %0
+ %3:_(s16) = G_TRUNC %1
+ %4:_(s16) = G_SUB %2, %3
+ %6:_(s16) = G_AND %4, %4
+...
+
+---
+name: sub_s16_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; CHECK-LABEL: name: sub_s16_vv
+ ; CHECK: liveins: $vgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s16) = G_SUB [[TRUNC]], [[TRUNC1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[SUB]], [[SUB]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s16) = G_TRUNC %0
+ %3:_(s16) = G_TRUNC %1
+ %4:_(s16) = G_SUB %2, %3
+ %6:_(s16) = G_AND %4, %4
+...
---
name: sub_s32_ss
@@ -14,9 +116,11 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
; CHECK-NEXT: [[SUB:%[0-9]+]]:sgpr(s32) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[SUB]], [[SUB]]
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $sgpr1
%2:_(s32) = G_SUB %0, %1
+ %4:_(s32) = G_AND %2, %2
...
---
@@ -33,9 +137,11 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[COPY2]], [[COPY1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[SUB]], [[SUB]]
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $vgpr0
%2:_(s32) = G_SUB %0, %1
+ %4:_(s32) = G_AND %2, %2
...
---
@@ -52,9 +158,11 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[SUB]], [[SUB]]
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $sgpr0
%2:_(s32) = G_SUB %0, %1
+ %4:_(s32) = G_AND %2, %2
...
---
@@ -70,7 +178,376 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[SUB]], [[SUB]]
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_SUB %0, %1
+ %4:_(s32) = G_AND %2, %2
+...
+
+---
+name: sub_v2s16_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1
+ ; CHECK-LABEL: name: sub_v2s16_ss
+ ; CHECK: liveins: $sgpr0, $sgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr1
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
+ ; CHECK-NEXT: [[LSHR:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:sgpr(s32) = G_SUB [[BITCAST]], [[BITCAST1]]
+ ; CHECK-NEXT: [[SUB1:%[0-9]+]]:sgpr(s32) = G_SUB [[LSHR]], [[LSHR1]]
+ ; CHECK-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[SUB]](s32), [[SUB1]](s32)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(<2 x s16>) = G_AND [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC]]
+ %0:_(<2 x s16>) = COPY $sgpr0
+ %1:_(<2 x s16>) = COPY $sgpr1
+ %2:_(<2 x s16>) = G_SUB %0, %1
+ %5:_(<2 x s16>) = G_AND %2, %2
+...
+
+---
+name: sub_v2s16_sv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $vgpr0
+ ; CHECK-LABEL: name: sub_v2s16_sv
+ ; CHECK: liveins: $sgpr0, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY]](<2 x s16>)
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(<2 x s16>) = G_SUB [[COPY2]], [[COPY1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[SUB]], [[SUB]]
+ %0:_(<2 x s16>) = COPY $sgpr0
+ %1:_(<2 x s16>) = COPY $vgpr0
+ %2:_(<2 x s16>) = G_SUB %0, %1
+ %5:_(<2 x s16>) = G_AND %2, %2
+...
+
+---
+name: sub_v2s16_vs
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $vgpr0
+ ; CHECK-LABEL: name: sub_v2s16_vs
+ ; CHECK: liveins: $sgpr0, $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY1]](<2 x s16>)
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(<2 x s16>) = G_SUB [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[SUB]], [[SUB]]
+ %0:_(<2 x s16>) = COPY $vgpr0
+ %1:_(<2 x s16>) = COPY $sgpr0
+ %2:_(<2 x s16>) = G_SUB %0, %1
+ %5:_(<2 x s16>) = G_AND %2, %2
+...
+
+---
+name: sub_v2s16_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; CHECK-LABEL: name: sub_v2s16_vv
+ ; CHECK: liveins: $vgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr1
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(<2 x s16>) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[SUB]], [[SUB]]
+ %0:_(<2 x s16>) = COPY $vgpr0
+ %1:_(<2 x s16>) = COPY $vgpr1
+ %2:_(<2 x s16>) = G_SUB %0, %1
+ %5:_(<2 x s16>) = G_AND %2, %2
+...
+
+---
+name: sub_s64_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr0_sgpr1
+ ; CHECK-LABEL: name: sub_s64_ss
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr0_sgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:sgpr(s64) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s64) = G_AND [[SUB]], [[SUB]]
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = COPY $sgpr0_sgpr1
+ %2:_(s64) = G_SUB %0, %1
+ %4:_(s64) = G_AND %2, %2
+...
+
+---
+name: sub_s64_sv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $vgpr0_vgpr1
+ ; CHECK-LABEL: name: sub_s64_sv
+ ; CHECK: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY]](s64)
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s64) = G_SUB [[COPY2]], [[COPY1]]
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]]
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = COPY $vgpr0_vgpr1
+ %2:_(s64) = G_SUB %0, %1
+ %4:_(s64) = G_AND %2, %2
+...
+
+---
+name: sub_s64_vs
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $vgpr0_vgpr1
+ ; CHECK-LABEL: name: sub_s64_vs
+ ; CHECK: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY1]](s64)
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s64) = G_SUB [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]]
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ %0:_(s64) = COPY $vgpr0_vgpr1
+ %1:_(s64) = COPY $sgpr0_sgpr1
+ %2:_(s64) = G_SUB %0, %1
+ %4:_(s64) = G_AND %2, %2
+...
+
+---
+name: sub_s64_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; CHECK-LABEL: name: sub_s64_vv
+ ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3
+ ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s64) = G_SUB [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64)
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]]
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32)
+ %0:_(s64) = COPY $vgpr0_vgpr1
+ %1:_(s64) = COPY $vgpr2_vgpr3
+ %2:_(s64) = G_SUB %0, %1
+ %4:_(s64) = G_AND %2, %2
+...
+
+---
+name: usubo_s32_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1
+ ; CHECK-LABEL: name: usubo_s32_ss
+ ; CHECK: liveins: $sgpr0, $sgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[USUBO:%[0-9]+]]:sgpr(s32), [[USUBO1:%[0-9]+]]:sgpr(s32) = G_USUBO [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[USUBO]], [[USUBO]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32), %3:_(s1) = G_USUBO %0, %1
+ %5:_(s32) = G_AND %2, %2
+...
+
+---
+name: usubo_s32_sv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $vgpr1
+ ; CHECK-LABEL: name: usubo_s32_sv
+ ; CHECK: liveins: $sgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+ ; CHECK-NEXT: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[COPY2]], [[COPY1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBO]], [[USUBO]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32), %3:_(s1) = G_USUBO %0, %1
+ %5:_(s32) = G_AND %2, %2
+...
+
+---
+name: usubo_s32_vs
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $sgpr1
+ ; CHECK-LABEL: name: usubo_s32_vs
+ ; CHECK: liveins: $vgpr0, $sgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBO]], [[USUBO]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32), %3:_(s1) = G_USUBO %0, %1
+ %5:_(s32) = G_AND %2, %2
+...
+
+---
+name: usubo_s32_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; CHECK-LABEL: name: usubo_s32_vv
+ ; CHECK: liveins: $vgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+ ; CHECK-NEXT: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBO]], [[USUBO]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32), %3:_(s1) = G_USUBO %0, %1
+ %5:_(s32) = G_AND %2, %2
+...
+
+---
+name: usube_s32_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: usube_s32_ss
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY2]], [[C]]
+ ; CHECK-NEXT: [[USUBE:%[0-9]+]]:sgpr(s32), [[USUBE1:%[0-9]+]]:sgpr(s32) = G_USUBE [[COPY]], [[COPY1]], [[AND]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s32) = G_AND [[USUBE]], [[USUBE]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32) = COPY $sgpr2
+ %3:_(s1) = G_TRUNC %2
+ %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3
+ %7:_(s32) = G_AND %4, %4
+...
+
+---
+name: usube_s32_sv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0, $vgpr1, $sgpr2
+ ; CHECK-LABEL: name: usube_s32_sv
+ ; CHECK: liveins: $sgpr0, $vgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
+ ; CHECK-NEXT: [[AMDGPU_COPY_VCC_SCC:%[0-9]+]]:vcc(s1) = G_AMDGPU_COPY_VCC_SCC [[COPY2]](s32)
+ ; CHECK-NEXT: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY3]], [[COPY1]], [[AMDGPU_COPY_VCC_SCC]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBE]], [[USUBE]]
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = COPY $sgpr2
+ %3:_(s1) = G_TRUNC %2
+ %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3
+ %7:_(s32) = G_AND %4, %4
+...
+
+---
+name: usube_s32_vs
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: usube_s32_vs
+ ; CHECK: liveins: $vgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: [[AMDGPU_COPY_VCC_SCC:%[0-9]+]]:vcc(s1) = G_AMDGPU_COPY_VCC_SCC [[COPY2]](s32)
+ ; CHECK-NEXT: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY]], [[COPY3]], [[AMDGPU_COPY_VCC_SCC]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBE]], [[USUBE]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32) = COPY $sgpr2
+ %3:_(s1) = G_TRUNC %2
+ %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3
+ %7:_(s32) = G_AND %4, %4
+...
+
+---
+name: usube_s32_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+ ; CHECK-LABEL: name: usube_s32_vv
+ ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
+ ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[COPY2]], [[C]]
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[AND]](s32), [[C1]]
+ ; CHECK-NEXT: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY]], [[COPY1]], [[ICMP]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[USUBE]], [[USUBE]]
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = COPY $vgpr2
+ %3:_(s1) = G_TRUNC %2
+ %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3
+ %7:_(s32) = G_AND %4, %4
...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir
index 088c20a3..d4baa5f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir
@@ -73,10 +73,14 @@ body: |
; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C]], [[C1]]
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SELECT]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC]], [[C2]]
%0:_(s32) = COPY $sgpr0
%1:_(s32) = COPY $sgpr1
%2:_(s1) = G_ICMP intpred(eq), %0, %1
%3:_(s16) = G_ZEXT %2
+ %4:_(s16) = G_CONSTANT i16 255
+ %5:_(s16) = G_AND %3, %4
...
---
@@ -209,9 +213,13 @@ body: |
; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C]], [[C1]]
; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SELECT]](s32)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC]], [[C2]]
%0:_(s32) = COPY $sgpr0
%1:_(s1) = G_TRUNC %0
%2:_(s16) = G_ZEXT %1
+ %3:_(s16) = G_CONSTANT i16 255
+ %4:_(s16) = G_AND %2, %3
...
---
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.ll
new file mode 100644
index 0000000..8b5958d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.ll
@@ -0,0 +1,535 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=hawaii < %s | FileCheck -check-prefix=GFX7 %s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=fiji < %s | FileCheck -check-prefix=GFX8 %s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX11 %s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX12 %s
+
+define i16 @s_sub_i16(i16 inreg %a, i16 inreg %b) {
+; GFX7-LABEL: s_sub_i16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_sub_i32 s4, s16, s17
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_sub_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_sub_i32 s4, s16, s17
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_sub_i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_sub_i32 s4, s16, s17
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: s_sub_i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_sub_i32 s4, s16, s17
+; GFX10-NEXT: v_mov_b32_e32 v0, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_sub_i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_sub_i32 s0, s0, s1
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: s_sub_i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_sub_co_i32 s0, s0, s1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = sub i16 %a, %b
+ ret i16 %c
+}
+
+define i16 @v_sub_i16(i16 %a, i16 %b) {
+; GFX7-LABEL: v_sub_i16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_sub_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_sub_i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_sub_i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_sub_nc_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_sub_i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_sub_nc_u16 v0.l, v0.l, v1.l
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_sub_i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_sub_nc_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = sub i16 %a, %b
+ ret i16 %c
+}
+
+define i32 @s_sub_i32(i32 inreg %a, i32 inreg %b) {
+; GFX7-LABEL: s_sub_i32:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_sub_i32 s4, s16, s17
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_sub_i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_sub_i32 s4, s16, s17
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_sub_i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_sub_i32 s4, s16, s17
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: s_sub_i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_sub_i32 s4, s16, s17
+; GFX10-NEXT: v_mov_b32_e32 v0, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_sub_i32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_sub_i32 s0, s0, s1
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: s_sub_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_sub_co_i32 s0, s0, s1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = sub i32 %a, %b
+ ret i32 %c
+}
+
+define i32 @v_sub_i32(i32 %a, i32 %b) {
+; GFX7-LABEL: v_sub_i32:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_sub_i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_sub_i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_sub_i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_sub_i32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_sub_nc_u32_e32 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_sub_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_sub_nc_u32_e32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = sub i32 %a, %b
+ ret i32 %c
+}
+
+; TODO: Add test for s_sub_v2i16. Instruction selector currently fails
+; to handle G_UNMERGE_VALUES.
+
+define <2 x i16> @v_sub_v2i16(<2 x i16> %a, <2 x i16> %b) {
+; GFX7-LABEL: v_sub_v2i16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_sub_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_sub_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_sub_i16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_sub_v2i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_sub_u16_e32 v2, v0, v1
+; GFX8-NEXT: v_sub_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_sub_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_sub_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_sub_v2i16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_sub_i16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_sub_v2i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_pk_sub_i16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = sub <2 x i16> %a, %b
+ ret <2 x i16> %c
+}
+
+define i64 @s_sub_i64(i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_sub_i64:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_sub_u32 s4, s16, s18
+; GFX7-NEXT: s_subb_u32 s5, s17, s19
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_sub_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_sub_u32 s4, s16, s18
+; GFX9-NEXT: s_subb_u32 s5, s17, s19
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_sub_i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_sub_u32 s4, s16, s18
+; GFX8-NEXT: s_subb_u32 s5, s17, s19
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: v_mov_b32_e32 v1, s5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: s_sub_i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_sub_u32 s4, s16, s18
+; GFX10-NEXT: s_subb_u32 s5, s17, s19
+; GFX10-NEXT: v_mov_b32_e32 v0, s4
+; GFX10-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_sub_i64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_sub_u32 s0, s0, s2
+; GFX11-NEXT: s_subb_u32 s1, s1, s3
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: s_sub_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_sub_nc_u64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = sub i64 %a, %b
+ ret i64 %c
+}
+
+define i64 @v_sub_i64(i64 %a, i64 %b) {
+; GFX7-LABEL: v_sub_i64:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_sub_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_sub_i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_sub_i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_sub_i64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_sub_co_ci_u32_e64 v1, null, v1, v3, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_sub_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_sub_co_ci_u32_e64 v1, null, v1, v3, vcc_lo
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %c = sub i64 %a, %b
+ ret i64 %c
+}
+
+define void @s_usubo_usube(i64 inreg %a, i64 inreg %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) {
+; GFX7-LABEL: s_usubo_usube:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: s_sub_u32 s4, s16, s18
+; GFX7-NEXT: s_subb_u32 s5, s17, s19
+; GFX7-NEXT: v_mov_b32_e32 v4, s4
+; GFX7-NEXT: s_mov_b32 s6, 0
+; GFX7-NEXT: s_cselect_b32 s8, 1, 0
+; GFX7-NEXT: v_mov_b32_e32 v5, s5
+; GFX7-NEXT: s_mov_b32 s7, 0xf000
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: buffer_store_dwordx2 v[4:5], v[0:1], s[4:7], 0 addr64
+; GFX7-NEXT: v_mov_b32_e32 v0, s8
+; GFX7-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_usubo_usube:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_sub_u32 s4, s16, s18
+; GFX9-NEXT: s_subb_u32 s5, s17, s19
+; GFX9-NEXT: v_mov_b32_e32 v4, s4
+; GFX9-NEXT: s_cselect_b32 s6, 1, 0
+; GFX9-NEXT: v_mov_b32_e32 v5, s5
+; GFX9-NEXT: global_store_dwordx2 v[0:1], v[4:5], off
+; GFX9-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-NEXT: global_store_dword v[2:3], v0, off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: s_usubo_usube:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_sub_u32 s4, s16, s18
+; GFX8-NEXT: s_subb_u32 s5, s17, s19
+; GFX8-NEXT: v_mov_b32_e32 v4, s4
+; GFX8-NEXT: s_cselect_b32 s6, 1, 0
+; GFX8-NEXT: v_mov_b32_e32 v5, s5
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; GFX8-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NEXT: flat_store_dword v[2:3], v0
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: s_usubo_usube:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_sub_u32 s4, s16, s18
+; GFX10-NEXT: s_subb_u32 s5, s17, s19
+; GFX10-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v4, s4
+; GFX10-NEXT: v_mov_b32_e32 v5, s5
+; GFX10-NEXT: v_mov_b32_e32 v6, s6
+; GFX10-NEXT: global_store_dwordx2 v[0:1], v[4:5], off
+; GFX10-NEXT: global_store_dword v[2:3], v6, off
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_usubo_usube:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_sub_u32 s0, s0, s2
+; GFX11-NEXT: s_subb_u32 s1, s1, s3
+; GFX11-NEXT: s_cselect_b32 s2, 1, 0
+; GFX11-NEXT: v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0
+; GFX11-NEXT: v_mov_b32_e32 v6, s2
+; GFX11-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX11-NEXT: global_store_b32 v[2:3], v6, off
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: s_usubo_usube:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_sub_co_u32 s0, s0, s2
+; GFX12-NEXT: s_sub_co_ci_u32 s1, s1, s3
+; GFX12-NEXT: s_cselect_b32 s2, 1, 0
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0
+; GFX12-NEXT: v_mov_b32_e32 v6, s2
+; GFX12-NEXT: global_store_b64 v[0:1], v[4:5], off
+; GFX12-NEXT: global_store_b32 v[2:3], v6, off
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %usubo = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+ %sub = extractvalue {i64, i1} %usubo, 0
+ %of = extractvalue {i64, i1} %usubo, 1
+ %of32 = select i1 %of, i32 1, i32 0
+ store i64 %sub, ptr addrspace(1) %res
+ store i32 %of32, ptr addrspace(1) %carry
+ ret void
+}
+
+define void @v_usubo_usube(i64 %a, i64 %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) {
+; GFX7-LABEL: v_usubo_usube:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; GFX7-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX7-NEXT: s_mov_b32 s6, 0
+; GFX7-NEXT: s_mov_b32 s7, 0xf000
+; GFX7-NEXT: s_mov_b64 s[4:5], 0
+; GFX7-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX7-NEXT: buffer_store_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
+; GFX7-NEXT: buffer_store_dword v2, v[6:7], s[4:7], 0 addr64
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubo_usube:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: global_store_dwordx2 v[4:5], v[0:1], off
+; GFX9-NEXT: global_store_dword v[6:7], v2, off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubo_usube:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
+; GFX8-NEXT: flat_store_dword v[6:7], v2
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubo_usube:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX10-NEXT: global_store_dwordx2 v[4:5], v[0:1], off
+; GFX10-NEXT: global_store_dword v[6:7], v2, off
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_usubo_usube:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX11-NEXT: global_store_b64 v[4:5], v[0:1], off
+; GFX11-NEXT: global_store_b32 v[6:7], v2, off
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_usubo_usube:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX12-NEXT: s_wait_alu 0xfffd
+; GFX12-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX12-NEXT: global_store_b64 v[4:5], v[0:1], off
+; GFX12-NEXT: global_store_b32 v[6:7], v2, off
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+ %usubo = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+ %sub = extractvalue {i64, i1} %usubo, 0
+ %of = extractvalue {i64, i1} %usubo, 1
+ %of32 = select i1 %of, i32 1, i32 0
+ store i64 %sub, ptr addrspace(1) %res
+ store i32 %of32, ptr addrspace(1) %carry
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsic.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsic.ll
new file mode 100644
index 0000000..34d4c51
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsic.ll
@@ -0,0 +1,173 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -o - %s | FileCheck %s
+define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) {
+; CHECK-LABEL: readfirstlane_with_readfirstlane:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 5
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT: s_endpgm
+ %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 5)
+ %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
+ store i32 %v2, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
+; CHECK-LABEL: readfirstlane_with_readlane:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_bfe_u32 v1, v0, 10, 10
+; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_readfirstlane_b32 s2, v1
+; CHECK-NEXT: v_readlane_b32 s2, v0, s2
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT: s_endpgm
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+ %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+ %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
+ store i32 %v2, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
+; CHECK-LABEL: readlane_with_firstlane:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_readfirstlane_b32 s2, v0
+; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT: s_endpgm
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 %tidx)
+ %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 3)
+ store i32 %v2, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
+; CHECK-LABEL: readlane_readlane:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_bfe_u32 v1, v0, 10, 10
+; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_readfirstlane_b32 s2, v1
+; CHECK-NEXT: v_readlane_b32 s2, v0, s2
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT: s_endpgm
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+ %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+ %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 2)
+ store i32 %v2, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
+; CHECK-LABEL: permlane64_uniform:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: s_load_b32 s2, s[4:5], 0x8
+; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; CHECK-NEXT: global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT: s_endpgm
+ %v = call i32 @llvm.amdgcn.permlane64(i32 %src)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
+; CHECK-LABEL: permlane64_nonuniform:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_permlane64_b32 v1, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT: s_endpgm
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %v = call i32 @llvm.amdgcn.permlane64(i32 %tid)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %out) {
+; CHECK-LABEL: permlane64_nonuniform_expression:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; CHECK-NEXT: v_add_nc_u32_e32 v1, 1, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; CHECK-NEXT: v_permlane64_b32 v1, v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT: s_endpgm
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid2 = add i32 %tid, 1
+ %v = call i32 @llvm.amdgcn.permlane64(i32 %tid2)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1) %out) {
+; CHECK-LABEL: trivial_waterfall_eq_zero:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 5
+; CHECK-NEXT: s_mov_b32 s2, 0
+; CHECK-NEXT: s_branch .LBB7_2
+; CHECK-NEXT: .LBB7_1: ; %Flow
+; CHECK-NEXT: ; in Loop: Header=BB7_2 Depth=1
+; CHECK-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
+; CHECK-NEXT: s_mov_b32 s2, -1
+; CHECK-NEXT: s_cbranch_vccz .LBB7_4
+; CHECK-NEXT: .LBB7_2: ; %while
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_and_b32 vcc_lo, exec_lo, s2
+; CHECK-NEXT: s_mov_b32 s2, -1
+; CHECK-NEXT: s_cbranch_vccnz .LBB7_1
+; CHECK-NEXT: ; %bb.3: ; %if
+; CHECK-NEXT: ; in Loop: Header=BB7_2 Depth=1
+; CHECK-NEXT: s_mov_b32 s2, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT: s_branch .LBB7_1
+; CHECK-NEXT: .LBB7_4: ; %exit
+; CHECK-NEXT: s_endpgm
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %not_done = xor i1 %done, true
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+ %is_done = icmp eq i64 %ballot, 0 ; in this case is_done = !not_done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index 33ce278..c962c05 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-enable-uniform-intrinsic-combine=0 -O3 -S < %s | FileCheck %s -check-prefix=CURRENT-CHECK
; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -O3 -S < %s | FileCheck %s -check-prefix=O3-CHECK
define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1) %out) {
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
index a3e42e5..a7e828c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-enable-uniform-intrinsic-combine=0 -O3 -S < %s | FileCheck %s -check-prefix=CURRENT-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s -check-prefix=DCE-CHECK
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
index 2fde3e3..7929261 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,instcombine,early-cse,simplifycfg -S < %s | FileCheck %s -check-prefix=COMB-CHECK
; This should not be optimized
diff --git a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll
index db32135..b8f084d 100644
--- a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll
+++ b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll
@@ -4,24 +4,14 @@
define amdgpu_gs i32 @main() {
; CHECK-LABEL: main:
; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_bitcmp1_b32 0, 0
; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: s_cselect_b32 s1, -1, 0
-; CHECK-NEXT: s_or_saveexec_b32 s2, -1
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s1
-; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; CHECK-NEXT: v_readfirstlane_b32 s1, v0
-; CHECK-NEXT: s_mov_b32 exec_lo, s2
-; CHECK-NEXT: s_or_b32 s0, s0, s1
-; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; CHECK-NEXT: s_bitcmp1_b32 s0, 0
; CHECK-NEXT: s_cselect_b32 s0, -1, 0
-; CHECK-NEXT: s_wait_alu 0xfffe
; CHECK-NEXT: s_xor_b32 s0, s0, -1
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0
-; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; CHECK-NEXT: v_readfirstlane_b32 s0, v1
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: s_wait_alu 0xf1ff
; CHECK-NEXT: ; return to shader part epilog
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
index 3aa3663..704ea37 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
@@ -9,11 +9,11 @@
; RUN: | FileCheck -check-prefix=GCN-O3 %s
-; GCN-O0: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O0>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(atomic-expand,verify,gc-lowering,lower-constant-intrinsics,unreachableblockelim,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa,require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,localstackalloc))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,phi-node-elimination,two-address-instruction,regallocfast,si-fix-vgpr-copies,remove-redundant-debug-values,fixup-statepoint-caller-saved,prolog-epilog,post-ra-pseudos,si-post-ra-bundler,fentry-insert,xray-instrumentation,patchable-function,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
+; GCN-O0: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O0>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-uniform-intrinsic-combine),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(atomic-expand,verify,gc-lowering,lower-constant-intrinsics,unreachableblockelim,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa,require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,localstackalloc))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,phi-node-elimination,two-address-instruction,regallocfast,si-fix-vgpr-copies,remove-redundant-debug-values,fixup-statepoint-caller-saved,prolog-epilog,post-ra-pseudos,si-post-ra-bundler,fentry-insert,xray-instrumentation,patchable-function,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
-; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O2>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
+; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O2>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt,amdgpu-uniform-intrinsic-combine),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
-; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O3>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
+; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O3>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt,amdgpu-uniform-intrinsic-combine),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
define void @empty() {
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 6e52125..ee6caab 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -31,6 +31,11 @@
; GCN-O0-NEXT: AMDGPU Remove Incompatible Functions
; GCN-O0-NEXT: AMDGPU Printf lowering
; GCN-O0-NEXT: Lower ctors and dtors for AMDGPU
+; GCN-O0-NEXT: FunctionPass Manager
+; GCN-O0-NEXT: Dominator Tree Construction
+; GCN-O0-NEXT: Cycle Info Analysis
+; GCN-O0-NEXT: Uniformity Analysis
+; GCN-O0-NEXT: AMDGPU Uniform Intrinsic Combine
; GCN-O0-NEXT: Expand variadic functions
; GCN-O0-NEXT: AMDGPU Inline All Functions
; GCN-O0-NEXT: Inliner for always_inline functions
@@ -179,6 +184,11 @@
; GCN-O1-NEXT: AMDGPU Remove Incompatible Functions
; GCN-O1-NEXT: AMDGPU Printf lowering
; GCN-O1-NEXT: Lower ctors and dtors for AMDGPU
+; GCN-O1-NEXT: FunctionPass Manager
+; GCN-O1-NEXT: Dominator Tree Construction
+; GCN-O1-NEXT: Cycle Info Analysis
+; GCN-O1-NEXT: Uniformity Analysis
+; GCN-O1-NEXT: AMDGPU Uniform Intrinsic Combine
; GCN-O1-NEXT: Expand variadic functions
; GCN-O1-NEXT: AMDGPU Inline All Functions
; GCN-O1-NEXT: Inliner for always_inline functions
@@ -466,6 +476,11 @@
; GCN-O1-OPTS-NEXT: AMDGPU Remove Incompatible Functions
; GCN-O1-OPTS-NEXT: AMDGPU Printf lowering
; GCN-O1-OPTS-NEXT: Lower ctors and dtors for AMDGPU
+; GCN-O1-OPTS-NEXT: FunctionPass Manager
+; GCN-O1-OPTS-NEXT: Dominator Tree Construction
+; GCN-O1-OPTS-NEXT: Cycle Info Analysis
+; GCN-O1-OPTS-NEXT: Uniformity Analysis
+; GCN-O1-OPTS-NEXT: AMDGPU Uniform Intrinsic Combine
; GCN-O1-OPTS-NEXT: Expand variadic functions
; GCN-O1-OPTS-NEXT: AMDGPU Inline All Functions
; GCN-O1-OPTS-NEXT: Inliner for always_inline functions
@@ -783,6 +798,10 @@
; GCN-O2-NEXT: Lower ctors and dtors for AMDGPU
; GCN-O2-NEXT: FunctionPass Manager
; GCN-O2-NEXT: AMDGPU Image Intrinsic Optimizer
+; GCN-O2-NEXT: Dominator Tree Construction
+; GCN-O2-NEXT: Cycle Info Analysis
+; GCN-O2-NEXT: Uniformity Analysis
+; GCN-O2-NEXT: AMDGPU Uniform Intrinsic Combine
; GCN-O2-NEXT: Expand variadic functions
; GCN-O2-NEXT: AMDGPU Inline All Functions
; GCN-O2-NEXT: Inliner for always_inline functions
@@ -1104,6 +1123,10 @@
; GCN-O3-NEXT: Lower ctors and dtors for AMDGPU
; GCN-O3-NEXT: FunctionPass Manager
; GCN-O3-NEXT: AMDGPU Image Intrinsic Optimizer
+; GCN-O3-NEXT: Dominator Tree Construction
+; GCN-O3-NEXT: Cycle Info Analysis
+; GCN-O3-NEXT: Uniformity Analysis
+; GCN-O3-NEXT: AMDGPU Uniform Intrinsic Combine
; GCN-O3-NEXT: Expand variadic functions
; GCN-O3-NEXT: AMDGPU Inline All Functions
; GCN-O3-NEXT: Inliner for always_inline functions
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll
index e00e1f1..c1f3a12 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll
@@ -110,9 +110,8 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_and_b32 s0, s0, 1
-; CHECK-NEXT: v_cmp_ne_u32_e64 vcc_lo, s0, 0
-; CHECK-NEXT: s_cbranch_vccz .LBB8_2
+; CHECK-NEXT: s_bitcmp0_b32 s0, 0
+; CHECK-NEXT: s_cbranch_scc1 .LBB8_2
; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB8_3
@@ -156,15 +155,16 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_and_b32 s0, s0, 1
-; CHECK-NEXT: v_cmp_ne_u32_e64 vcc_lo, s0, 0
-; CHECK-NEXT: s_cbranch_vccz .LBB10_2
-; CHECK-NEXT: ; %bb.1: ; %false
-; CHECK-NEXT: s_mov_b32 s0, 33
-; CHECK-NEXT: s_branch .LBB10_3
-; CHECK-NEXT: .LBB10_2: ; %true
+; CHECK-NEXT: s_bitcmp1_b32 s0, 0
+; CHECK-NEXT: s_cselect_b32 s0, -1, 0
+; CHECK-NEXT: s_and_b32 vcc_lo, exec_lo, s0
+; CHECK-NEXT: s_cbranch_vccnz .LBB10_2
+; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB10_3
+; CHECK-NEXT: .LBB10_2: ; %false
+; CHECK-NEXT: s_mov_b32 s0, 33
+; CHECK-NEXT: s_branch .LBB10_3
; CHECK-NEXT: .LBB10_3:
%c = trunc i32 %v to i1
%ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c)
@@ -201,8 +201,8 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: v_cmp_lt_u32_e64 vcc_lo, s0, 12
-; CHECK-NEXT: s_cbranch_vccz .LBB12_2
+; CHECK-NEXT: s_cmp_gt_u32 s0, 11
+; CHECK-NEXT: s_cbranch_scc1 .LBB12_2
; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB12_3
@@ -245,14 +245,14 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: v_cmp_lt_u32_e64 vcc_lo, s0, 12
-; CHECK-NEXT: s_cbranch_vccz .LBB14_2
-; CHECK-NEXT: ; %bb.1: ; %false
-; CHECK-NEXT: s_mov_b32 s0, 33
-; CHECK-NEXT: s_branch .LBB14_3
-; CHECK-NEXT: .LBB14_2: ; %true
+; CHECK-NEXT: s_cmp_lt_u32 s0, 12
+; CHECK-NEXT: s_cbranch_scc1 .LBB14_2
+; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB14_3
+; CHECK-NEXT: .LBB14_2: ; %false
+; CHECK-NEXT: s_mov_b32 s0, 33
+; CHECK-NEXT: s_branch .LBB14_3
; CHECK-NEXT: .LBB14_3:
%c = icmp ult i32 %v, 12
%ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c)
@@ -293,13 +293,13 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_and:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_cmp_lt_u32 s0, 12
+; CHECK-NEXT: s_cmp_gt_u32 s0, 11
; CHECK-NEXT: s_cselect_b32 s0, -1, 0
-; CHECK-NEXT: s_cmp_gt_u32 s1, 34
+; CHECK-NEXT: s_cmp_lt_u32 s1, 35
; CHECK-NEXT: s_cselect_b32 s1, -1, 0
-; CHECK-NEXT: s_and_b32 s0, s0, s1
-; CHECK-NEXT: s_and_b32 s0, s0, exec_lo
-; CHECK-NEXT: s_cbranch_scc0 .LBB16_2
+; CHECK-NEXT: s_or_b32 s0, s0, s1
+; CHECK-NEXT: s_and_b32 vcc_lo, exec_lo, s0
+; CHECK-NEXT: s_cbranch_vccnz .LBB16_2
; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB16_3
@@ -353,14 +353,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg
; CHECK-NEXT: s_cmp_gt_u32 s1, 34
; CHECK-NEXT: s_cselect_b32 s1, -1, 0
; CHECK-NEXT: s_and_b32 s0, s0, s1
-; CHECK-NEXT: s_and_b32 s0, s0, exec_lo
-; CHECK-NEXT: s_cbranch_scc0 .LBB18_2
-; CHECK-NEXT: ; %bb.1: ; %false
-; CHECK-NEXT: s_mov_b32 s0, 33
-; CHECK-NEXT: s_branch .LBB18_3
-; CHECK-NEXT: .LBB18_2: ; %true
+; CHECK-NEXT: s_and_b32 vcc_lo, exec_lo, s0
+; CHECK-NEXT: s_cbranch_vccnz .LBB18_2
+; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB18_3
+; CHECK-NEXT: .LBB18_2: ; %false
+; CHECK-NEXT: s_mov_b32 s0, 33
+; CHECK-NEXT: s_branch .LBB18_3
; CHECK-NEXT: .LBB18_3:
%v1c = icmp ult i32 %v1, 12
%v2c = icmp ugt i32 %v2, 34
@@ -591,3 +591,24 @@ exit:
store i32 %ballot, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_cs i32 @compare_bfloats(bfloat %x, bfloat %y) {
+; GFX10-LABEL: compare_bfloats:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX10-NEXT: v_cmp_gt_f32_e64 s0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+;
+; GFX11-LABEL: compare_bfloats:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_mov_b16_e32 v2.l, 0
+; GFX11-NEXT: v_mov_b16_e32 v2.h, v1.l
+; GFX11-NEXT: v_mov_b16_e32 v1.h, v0.l
+; GFX11-NEXT: v_mov_b16_e32 v1.l, v2.l
+; GFX11-NEXT: v_cmp_gt_f32_e64 s0, v1, v2
+; GFX11-NEXT: ; return to shader part epilog
+ %cmp = fcmp ogt bfloat %x, %y
+ %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %cmp)
+ ret i32 %ballot
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll
index b4adf7f..827a01f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll
@@ -113,9 +113,8 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_and_b32 s0, s0, 1
-; CHECK-NEXT: v_cmp_ne_u32_e64 vcc, s0, 0
-; CHECK-NEXT: s_cbranch_vccz .LBB8_2
+; CHECK-NEXT: s_bitcmp0_b32 s0, 0
+; CHECK-NEXT: s_cbranch_scc1 .LBB8_2
; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB8_3
@@ -159,15 +158,16 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_and_b32 s0, s0, 1
-; CHECK-NEXT: v_cmp_ne_u32_e64 vcc, s0, 0
-; CHECK-NEXT: s_cbranch_vccz .LBB10_2
-; CHECK-NEXT: ; %bb.1: ; %false
-; CHECK-NEXT: s_mov_b32 s0, 33
-; CHECK-NEXT: s_branch .LBB10_3
-; CHECK-NEXT: .LBB10_2: ; %true
+; CHECK-NEXT: s_bitcmp1_b32 s0, 0
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: s_and_b64 vcc, exec, s[0:1]
+; CHECK-NEXT: s_cbranch_vccnz .LBB10_2
+; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB10_3
+; CHECK-NEXT: .LBB10_2: ; %false
+; CHECK-NEXT: s_mov_b32 s0, 33
+; CHECK-NEXT: s_branch .LBB10_3
; CHECK-NEXT: .LBB10_3:
%c = trunc i32 %v to i1
%ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
@@ -204,8 +204,8 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: v_cmp_lt_u32_e64 vcc, s0, 12
-; CHECK-NEXT: s_cbranch_vccz .LBB12_2
+; CHECK-NEXT: s_cmp_gt_u32 s0, 11
+; CHECK-NEXT: s_cbranch_scc1 .LBB12_2
; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB12_3
@@ -248,14 +248,14 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) {
; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare:
; CHECK: ; %bb.0:
-; CHECK-NEXT: v_cmp_lt_u32_e64 vcc, s0, 12
-; CHECK-NEXT: s_cbranch_vccz .LBB14_2
-; CHECK-NEXT: ; %bb.1: ; %false
-; CHECK-NEXT: s_mov_b32 s0, 33
-; CHECK-NEXT: s_branch .LBB14_3
-; CHECK-NEXT: .LBB14_2: ; %true
+; CHECK-NEXT: s_cmp_lt_u32 s0, 12
+; CHECK-NEXT: s_cbranch_scc1 .LBB14_2
+; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB14_3
+; CHECK-NEXT: .LBB14_2: ; %false
+; CHECK-NEXT: s_mov_b32 s0, 33
+; CHECK-NEXT: s_branch .LBB14_3
; CHECK-NEXT: .LBB14_3:
%c = icmp ult i32 %v, 12
%ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
@@ -296,13 +296,13 @@ false:
define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) {
; CHECK-LABEL: branch_uniform_ballot_ne_zero_and:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_cmp_lt_u32 s0, 12
+; CHECK-NEXT: s_cmp_gt_u32 s0, 11
; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
-; CHECK-NEXT: s_cmp_gt_u32 s1, 34
+; CHECK-NEXT: s_cmp_lt_u32 s1, 35
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
-; CHECK-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1]
-; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], exec
-; CHECK-NEXT: s_cbranch_scc0 .LBB16_2
+; CHECK-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
+; CHECK-NEXT: s_and_b64 vcc, exec, s[0:1]
+; CHECK-NEXT: s_cbranch_vccnz .LBB16_2
; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB16_3
@@ -356,14 +356,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg
; CHECK-NEXT: s_cmp_gt_u32 s1, 34
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: s_and_b64 s[0:1], s[2:3], s[0:1]
-; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], exec
-; CHECK-NEXT: s_cbranch_scc0 .LBB18_2
-; CHECK-NEXT: ; %bb.1: ; %false
-; CHECK-NEXT: s_mov_b32 s0, 33
-; CHECK-NEXT: s_branch .LBB18_3
-; CHECK-NEXT: .LBB18_2: ; %true
+; CHECK-NEXT: s_and_b64 vcc, exec, s[0:1]
+; CHECK-NEXT: s_cbranch_vccnz .LBB18_2
+; CHECK-NEXT: ; %bb.1: ; %true
; CHECK-NEXT: s_mov_b32 s0, 42
; CHECK-NEXT: s_branch .LBB18_3
+; CHECK-NEXT: .LBB18_2: ; %false
+; CHECK-NEXT: s_mov_b32 s0, 33
+; CHECK-NEXT: s_branch .LBB18_3
; CHECK-NEXT: .LBB18_3:
%v1c = icmp ult i32 %v1, 12
%v2c = icmp ugt i32 %v2, 34
@@ -557,3 +557,15 @@ exit:
store i64 %ballot, ptr addrspace(1) %out
ret void
}
+
+define amdgpu_cs i64 @compare_bfloats(bfloat %x, bfloat %y) {
+; CHECK-LABEL: compare_bfloats:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; CHECK-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; CHECK-NEXT: v_cmp_gt_f32_e64 s[0:1], v0, v1
+; CHECK-NEXT: ; return to shader part epilog
+ %cmp = fcmp ogt bfloat %x, %y
+ %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cmp)
+ ret i64 %ballot
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll
index 6dd2258..39191d2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll
@@ -23,10 +23,8 @@ define amdgpu_kernel void @test_s_i32(ptr addrspace(1) %out, i32 %src0) {
; GFX11-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x2c
; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0
-; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX11-SDAG-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-SDAG-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: test_s_i32:
@@ -36,8 +34,6 @@ define amdgpu_kernel void @test_s_i32(ptr addrspace(1) %out, i32 %src0) {
; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v0
; GFX11-GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-GISEL-NEXT: s_endpgm
%v = call i32 @llvm.amdgcn.permlane64.i32(i32 %src0)
@@ -50,12 +46,9 @@ define amdgpu_kernel void @test_s_i64(ptr addrspace(1) %out, i64 %src0) {
; GFX11-SDAG: ; %bb.0:
; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s3
-; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s2
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v0
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v2
-; GFX11-SDAG-NEXT: global_store_b64 v3, v[0:1], s[0:1]
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-SDAG-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: test_s_i64:
@@ -64,9 +57,6 @@ define amdgpu_kernel void @test_s_i64(ptr addrspace(1) %out, i64 %src0) {
; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v0
-; GFX11-GISEL-NEXT: v_permlane64_b32 v1, v1
; GFX11-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-GISEL-NEXT: s_endpgm
%v = call i64 @llvm.amdgcn.permlane64.i64(i64 %src0)
@@ -79,12 +69,9 @@ define amdgpu_kernel void @test_s_f64(ptr addrspace(1) %out, double %src0) {
; GFX11-SDAG: ; %bb.0:
; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s3
-; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s2
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v0
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v2
-; GFX11-SDAG-NEXT: global_store_b64 v3, v[0:1], s[0:1]
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-SDAG-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: test_s_f64:
@@ -93,9 +80,6 @@ define amdgpu_kernel void @test_s_f64(ptr addrspace(1) %out, double %src0) {
; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v0
-; GFX11-GISEL-NEXT: v_permlane64_b32 v1, v1
; GFX11-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-GISEL-NEXT: s_endpgm
%v = call double @llvm.amdgcn.permlane64.f64(double %src0)
@@ -116,19 +100,15 @@ define amdgpu_kernel void @test_i_i32(ptr addrspace(1) %out) {
; GFX11-SDAG-LABEL: test_i_i32:
; GFX11-SDAG: ; %bb.0:
; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0x63 :: v_dual_mov_b32 v1, 0
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x63
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX11-SDAG-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-SDAG-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: test_i_i32:
; GFX11-GISEL: ; %bb.0:
; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX11-GISEL-NEXT: v_dual_mov_b32 v0, 0x63 :: v_dual_mov_b32 v1, 0
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v0
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-GISEL-NEXT: s_endpgm
@@ -141,19 +121,15 @@ define amdgpu_kernel void @test_i_f32(ptr addrspace(1) %out) {
; GFX11-SDAG-LABEL: test_i_f32:
; GFX11-SDAG: ; %bb.0:
; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0x449a5000 :: v_dual_mov_b32 v1, 0
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x449a5000
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX11-SDAG-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-SDAG-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: test_i_f32:
; GFX11-GISEL: ; %bb.0:
; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX11-GISEL-NEXT: v_dual_mov_b32 v0, 0x449a5000 :: v_dual_mov_b32 v1, 0
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v0
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-GISEL-NEXT: s_endpgm
@@ -166,23 +142,16 @@ define amdgpu_kernel void @test_i_i64(ptr addrspace(1) %out) {
; GFX11-SDAG-LABEL: test_i_i64:
; GFX11-SDAG: ; %bb.0:
; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, 0
-; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, 0x63
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v2
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, 0x63
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX11-SDAG-NEXT: global_store_b64 v1, v[0:1], s[0:1]
; GFX11-SDAG-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: test_i_i64:
; GFX11-GISEL: ; %bb.0:
; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0x63
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, 0
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v0
-; GFX11-GISEL-NEXT: v_permlane64_b32 v1, v2
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-GISEL-NEXT: s_endpgm
@@ -195,22 +164,16 @@ define amdgpu_kernel void @test_i_f64(ptr addrspace(1) %out) {
; GFX11-SDAG-LABEL: test_i_f64:
; GFX11-SDAG: ; %bb.0:
; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, 0x40934a00
-; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, 0
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v0
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v2
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x40934a00
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX11-SDAG-NEXT: global_store_b64 v0, v[0:1], s[0:1]
; GFX11-SDAG-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: test_i_f64:
; GFX11-GISEL: ; %bb.0:
; GFX11-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX11-GISEL-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, 0x40934a00
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_permlane64_b32 v0, v2
-; GFX11-GISEL-NEXT: v_permlane64_b32 v1, v1
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-GISEL-NEXT: v_dual_mov_b32 v1, 0x40934a00 :: v_dual_mov_b32 v2, 0
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-GISEL-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll
index b0149f7..672b658 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll
@@ -6,12 +6,9 @@ define amdgpu_kernel void @test_p0(ptr addrspace(1) %out, ptr %src0) {
; GFX11-SDAG: ; %bb.0:
; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s3
-; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s2
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v0
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v2
-; GFX11-SDAG-NEXT: global_store_b64 v3, v[0:1], s[0:1]
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-SDAG-NEXT: s_endpgm
%v = call ptr @llvm.amdgcn.permlane64.p0(ptr %src0)
store ptr %v, ptr addrspace(1) %out
@@ -22,21 +19,14 @@ define amdgpu_kernel void @test_v3p0(ptr addrspace(1) %out, <3 x ptr> %src0) {
; GFX11-SDAG-LABEL: test_v3p0:
; GFX11-SDAG: ; %bb.0:
; GFX11-SDAG-NEXT: s_clause 0x2
-; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x44
; GFX11-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x54
+; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x44
; GFX11-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v1, s2
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v4, s1 :: v_dual_mov_b32 v5, s7
-; GFX11-SDAG-NEXT: v_mov_b32_e32 v8, s6
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, s3 :: v_dual_mov_b32 v7, s0
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v2, v1
-; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v4
-; GFX11-SDAG-NEXT: v_permlane64_b32 v5, v5
-; GFX11-SDAG-NEXT: v_permlane64_b32 v4, v8
-; GFX11-SDAG-NEXT: v_permlane64_b32 v3, v0
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v7
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v5, s7
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v1, s1
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v2, s2
; GFX11-SDAG-NEXT: s_clause 0x1
; GFX11-SDAG-NEXT: global_store_b64 v6, v[4:5], s[4:5] offset:16
; GFX11-SDAG-NEXT: global_store_b128 v6, v[0:3], s[4:5]
@@ -53,10 +43,8 @@ define amdgpu_kernel void @test_p3(ptr addrspace(1) %out, ptr addrspace(3) %src0
; GFX11-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x2c
; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0
-; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX11-SDAG-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-SDAG-NEXT: s_endpgm
%v = call ptr addrspace(3) @llvm.amdgcn.permlane64.v3p0(ptr addrspace(3) %src0)
store ptr addrspace(3) %v, ptr addrspace(1) %out
@@ -70,14 +58,9 @@ define amdgpu_kernel void @test_v3p3(ptr addrspace(1) %out, <3 x ptr addrspace(3
; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
; GFX11-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v3, s0
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v2, v0
-; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v3
-; GFX11-SDAG-NEXT: global_store_b96 v4, v[0:2], s[4:5]
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s0
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-SDAG-NEXT: global_store_b96 v3, v[0:2], s[4:5]
; GFX11-SDAG-NEXT: s_endpgm
%v = call <3 x ptr addrspace(3)> @llvm.amdgcn.permlane64.v3p3(<3 x ptr addrspace(3)> %src0)
store <3 x ptr addrspace(3)> %v, ptr addrspace(1) %out
@@ -91,10 +74,8 @@ define amdgpu_kernel void @test_p5(ptr addrspace(1) %out, ptr addrspace(5) %src0
; GFX11-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x2c
; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0
-; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX11-SDAG-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-SDAG-NEXT: s_endpgm
%v = call ptr addrspace(5) @llvm.amdgcn.permlane64.p5(ptr addrspace(5) %src0)
store ptr addrspace(5) %v, ptr addrspace(1) %out
@@ -108,14 +89,9 @@ define amdgpu_kernel void @test_v3p5(ptr addrspace(1) %out, <3 x ptr addrspace(5
; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
; GFX11-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v3, s0
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v2, v0
-; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v3
-; GFX11-SDAG-NEXT: global_store_b96 v4, v[0:2], s[4:5]
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s0
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-SDAG-NEXT: global_store_b96 v3, v[0:2], s[4:5]
; GFX11-SDAG-NEXT: s_endpgm
%v = call <3 x ptr addrspace(5)> @llvm.amdgcn.permlane64.v3p5(<3 x ptr addrspace(5)> %src0)
store <3 x ptr addrspace(5)> %v, ptr addrspace(1) %out
@@ -129,10 +105,8 @@ define amdgpu_kernel void @test_p6(ptr addrspace(1) %out, ptr addrspace(6) %src0
; GFX11-SDAG-NEXT: s_load_b32 s2, s[4:5], 0x2c
; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v0
-; GFX11-SDAG-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; GFX11-SDAG-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-SDAG-NEXT: s_endpgm
%v = call ptr addrspace(6) @llvm.amdgcn.permlane64.p6(ptr addrspace(6) %src0)
store ptr addrspace(6) %v, ptr addrspace(1) %out
@@ -146,14 +120,9 @@ define amdgpu_kernel void @test_v3p6(ptr addrspace(1) %out, <3 x ptr addrspace(6
; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
; GFX11-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v3, s0
-; GFX11-SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v2, v0
-; GFX11-SDAG-NEXT: v_permlane64_b32 v1, v1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_permlane64_b32 v0, v3
-; GFX11-SDAG-NEXT: global_store_b96 v4, v[0:2], s[4:5]
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s0
+; GFX11-SDAG-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-SDAG-NEXT: global_store_b96 v3, v[0:2], s[4:5]
; GFX11-SDAG-NEXT: s_endpgm
%v = call <3 x ptr addrspace(6)> @llvm.amdgcn.permlane64.v3p6(<3 x ptr addrspace(6)> %src0)
store <3 x ptr addrspace(6)> %v, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
index d1ba892..02d2990 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll
@@ -396,8 +396,7 @@ define amdgpu_kernel void @test_readfirstlane_imm_f64(ptr addrspace(1) %out) {
;
; CHECK-GISEL-LABEL: test_readfirstlane_imm_f64:
; CHECK-GISEL: ; %bb.0:
-; CHECK-GISEL-NEXT: s_mov_b32 s0, 0
-; CHECK-GISEL-NEXT: s_mov_b32 s1, 0x40400000
+; CHECK-GISEL-NEXT: s_mov_b64 s[0:1], 0x4040000000000000
; CHECK-GISEL-NEXT: ;;#ASMSTART
; CHECK-GISEL-NEXT: ; use s[0:1]
; CHECK-GISEL-NEXT: ;;#ASMEND
@@ -456,14 +455,13 @@ define amdgpu_kernel void @test_readfirstlane_imm_fold_i64(ptr addrspace(1) %out
; CHECK-GISEL-LABEL: test_readfirstlane_imm_fold_i64:
; CHECK-GISEL: ; %bb.0:
; CHECK-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; CHECK-GISEL-NEXT: s_mov_b64 s[2:3], 32
; CHECK-GISEL-NEXT: s_add_i32 s12, s12, s17
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, 32
; CHECK-GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13
+; CHECK-GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-GISEL-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, 0
; CHECK-GISEL-NEXT: v_mov_b32_e32 v2, s0
; CHECK-GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; CHECK-GISEL-NEXT: s_endpgm
@@ -490,15 +488,13 @@ define amdgpu_kernel void @test_readfirstlane_imm_fold_f64(ptr addrspace(1) %out
; CHECK-GISEL-LABEL: test_readfirstlane_imm_fold_f64:
; CHECK-GISEL: ; %bb.0:
; CHECK-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; CHECK-GISEL-NEXT: s_mov_b32 s2, 0
; CHECK-GISEL-NEXT: s_add_i32 s12, s12, s17
-; CHECK-GISEL-NEXT: s_mov_b32 s3, 0x40400000
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, s2
-; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, 0
; CHECK-GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13
; CHECK-GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, 0x40400000
; CHECK-GISEL-NEXT: v_mov_b32_e32 v2, s0
; CHECK-GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; CHECK-GISEL-NEXT: s_endpgm
@@ -588,17 +584,17 @@ define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i64(ptr addrspace(1
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; CHECK-SDAG-NEXT: s_add_i32 s12, s12, s17
-; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
-; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CHECK-SDAG-NEXT: ;;#ASMSTART
; CHECK-SDAG-NEXT: s_mov_b64 s[2:3], 0
; CHECK-SDAG-NEXT: ;;#ASMEND
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s2
+; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
; CHECK-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s3
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-SDAG-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s3
+; CHECK-SDAG-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; CHECK-SDAG-NEXT: s_endpgm
;
; CHECK-GISEL-LABEL: test_readfirstlane_copy_from_sgpr_i64:
@@ -628,17 +624,17 @@ define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_f64(ptr addrspace(1
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; CHECK-SDAG-NEXT: s_add_i32 s12, s12, s17
-; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
-; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CHECK-SDAG-NEXT: ;;#ASMSTART
; CHECK-SDAG-NEXT: s_mov_b64 s[2:3], 0
; CHECK-SDAG-NEXT: ;;#ASMEND
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s2
+; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
; CHECK-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s3
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-SDAG-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s3
+; CHECK-SDAG-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; CHECK-SDAG-NEXT: s_endpgm
;
; CHECK-GISEL-LABEL: test_readfirstlane_copy_from_sgpr_f64:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
index 7ff5eb4..0795f40 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll
@@ -9,7 +9,7 @@ declare double @llvm.amdgcn.readlane.f64(double, i32) #0
define amdgpu_kernel void @test_readlane_sreg_sreg_i32(i32 %src0, i32 %src1) #1 {
; CHECK-SDAG-LABEL: test_readlane_sreg_sreg_i32:
; CHECK-SDAG: ; %bb.0:
-; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; CHECK-SDAG-NEXT: s_load_dword s0, s[8:9], 0x0
; CHECK-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-SDAG-NEXT: ;;#ASMSTART
; CHECK-SDAG-NEXT: ; use s0
@@ -18,7 +18,7 @@ define amdgpu_kernel void @test_readlane_sreg_sreg_i32(i32 %src0, i32 %src1) #1
;
; CHECK-GISEL-LABEL: test_readlane_sreg_sreg_i32:
; CHECK-GISEL: ; %bb.0:
-; CHECK-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; CHECK-GISEL-NEXT: s_load_dword s0, s[8:9], 0x0
; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-GISEL-NEXT: ;;#ASMSTART
; CHECK-GISEL-NEXT: ; use s0
@@ -224,14 +224,13 @@ define amdgpu_kernel void @test_readlane_imm_sreg_i64(ptr addrspace(1) %out, i32
; CHECK-GISEL-LABEL: test_readlane_imm_sreg_i64:
; CHECK-GISEL: ; %bb.0:
; CHECK-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; CHECK-GISEL-NEXT: s_mov_b64 s[2:3], 32
; CHECK-GISEL-NEXT: s_add_i32 s12, s12, s17
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, s2
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, 32
; CHECK-GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13
+; CHECK-GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-GISEL-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, 0
; CHECK-GISEL-NEXT: v_mov_b32_e32 v2, s0
; CHECK-GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; CHECK-GISEL-NEXT: s_endpgm
@@ -258,15 +257,13 @@ define amdgpu_kernel void @test_readlane_imm_sreg_f64(ptr addrspace(1) %out, i32
; CHECK-GISEL-LABEL: test_readlane_imm_sreg_f64:
; CHECK-GISEL: ; %bb.0:
; CHECK-GISEL-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
-; CHECK-GISEL-NEXT: s_mov_b32 s2, 0
; CHECK-GISEL-NEXT: s_add_i32 s12, s12, s17
-; CHECK-GISEL-NEXT: s_mov_b32 s3, 0x40400000
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, s2
-; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v0, 0
; CHECK-GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13
; CHECK-GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, 0x40400000
; CHECK-GISEL-NEXT: v_mov_b32_e32 v2, s0
; CHECK-GISEL-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; CHECK-GISEL-NEXT: s_endpgm
@@ -660,17 +657,17 @@ define amdgpu_kernel void @test_readlane_copy_from_sgpr_i64(ptr addrspace(1) %ou
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; CHECK-SDAG-NEXT: s_add_i32 s12, s12, s17
-; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
-; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CHECK-SDAG-NEXT: ;;#ASMSTART
; CHECK-SDAG-NEXT: s_mov_b64 s[2:3], 0
; CHECK-SDAG-NEXT: ;;#ASMEND
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s2
+; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
; CHECK-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s3
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-SDAG-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s3
+; CHECK-SDAG-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; CHECK-SDAG-NEXT: s_endpgm
;
; CHECK-GISEL-LABEL: test_readlane_copy_from_sgpr_i64:
@@ -700,17 +697,17 @@ define amdgpu_kernel void @test_readlane_copy_from_sgpr_f64(ptr addrspace(1) %ou
; CHECK-SDAG: ; %bb.0:
; CHECK-SDAG-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
; CHECK-SDAG-NEXT: s_add_i32 s12, s12, s17
-; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
-; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CHECK-SDAG-NEXT: ;;#ASMSTART
; CHECK-SDAG-NEXT: s_mov_b64 s[2:3], 0
; CHECK-SDAG-NEXT: ;;#ASMEND
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s2
+; CHECK-SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13
; CHECK-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s3
-; CHECK-SDAG-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-SDAG-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v1, s1
+; CHECK-SDAG-NEXT: v_mov_b32_e32 v3, s3
+; CHECK-SDAG-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; CHECK-SDAG-NEXT: s_endpgm
;
; CHECK-GISEL-LABEL: test_readlane_copy_from_sgpr_f64:
diff --git a/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll b/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll
index c573253..48ed5c4 100644
--- a/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll
+++ b/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll
@@ -73,10 +73,10 @@ define amdgpu_kernel void @constant_zextload_v64i16_to_v64i32(ptr addrspace(1) %
}
; CHECK-LABEL: {{^}}excess_soft_clause_reg_pressure:
-; GFX908: NumSgprs: 64
-; GFX908-GCNTRACKERS: NumSgprs: 64
+; GFX908: NumSgprs: 56
+; GFX908-GCNTRACKERS: NumSgprs: 56
; GFX908: NumVgprs: 43
-; GFX908-GCNTRACKERS: NumVgprs: 39
+; GFX908-GCNTRACKERS: NumVgprs: 40
; GFX908: Occupancy: 5
; GFX908-GCNTRACKERS: Occupancy: 6
diff --git a/llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll b/llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll
index 586579f..ef96944 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll
@@ -20,38 +20,33 @@ define void @test() {
; CHECK-NEXT: ; in Loop: Header=BB0_1 Depth=1
; CHECK-NEXT: .LBB0_3: ; %bb.3
; CHECK-NEXT: ; in Loop: Header=BB0_1 Depth=1
-; CHECK-NEXT: ; implicit-def: $sgpr4
-; CHECK-NEXT: v_mov_b32_e32 v0, s4
-; CHECK-NEXT: v_readfirstlane_b32 s6, v0
; CHECK-NEXT: s_mov_b64 s[4:5], -1
-; CHECK-NEXT: s_mov_b32 s7, 0
-; CHECK-NEXT: s_cmp_eq_u32 s6, s7
; CHECK-NEXT: ; implicit-def: $vgpr1 : SGPR spill to VGPR lane
; CHECK-NEXT: v_writelane_b32 v1, s4, 0
; CHECK-NEXT: v_writelane_b32 v1, s5, 1
-; CHECK-NEXT: s_mov_b64 s[10:11], exec
-; CHECK-NEXT: s_mov_b64 exec, -1
+; CHECK-NEXT: s_or_saveexec_b64 s[8:9], -1
+; CHECK-NEXT: s_nop 0
; CHECK-NEXT: v_accvgpr_write_b32 a0, v1 ; Reload Reuse
-; CHECK-NEXT: s_mov_b64 exec, s[10:11]
+; CHECK-NEXT: s_mov_b64 exec, s[8:9]
; CHECK-NEXT: s_cbranch_scc1 .LBB0_5
; CHECK-NEXT: ; %bb.4: ; %bb.4
; CHECK-NEXT: ; in Loop: Header=BB0_1 Depth=1
-; CHECK-NEXT: s_or_saveexec_b64 s[10:11], -1
+; CHECK-NEXT: s_or_saveexec_b64 s[8:9], -1
; CHECK-NEXT: v_accvgpr_read_b32 v1, a0 ; Reload Reuse
-; CHECK-NEXT: s_mov_b64 exec, s[10:11]
+; CHECK-NEXT: s_mov_b64 exec, s[8:9]
; CHECK-NEXT: s_mov_b64 s[4:5], 0
; CHECK-NEXT: v_writelane_b32 v1, s4, 0
; CHECK-NEXT: v_writelane_b32 v1, s5, 1
-; CHECK-NEXT: s_or_saveexec_b64 s[10:11], -1
+; CHECK-NEXT: s_or_saveexec_b64 s[8:9], -1
; CHECK-NEXT: s_nop 0
; CHECK-NEXT: v_accvgpr_write_b32 a0, v1 ; Reload Reuse
-; CHECK-NEXT: s_mov_b64 exec, s[10:11]
+; CHECK-NEXT: s_mov_b64 exec, s[8:9]
; CHECK-NEXT: .LBB0_5: ; %Flow
; CHECK-NEXT: ; in Loop: Header=BB0_1 Depth=1
-; CHECK-NEXT: s_or_saveexec_b64 s[10:11], -1
+; CHECK-NEXT: s_or_saveexec_b64 s[8:9], -1
; CHECK-NEXT: s_nop 0
; CHECK-NEXT: v_accvgpr_read_b32 v1, a0 ; Reload Reuse
-; CHECK-NEXT: s_mov_b64 exec, s[10:11]
+; CHECK-NEXT: s_mov_b64 exec, s[8:9]
; CHECK-NEXT: v_readlane_b32 s4, v1, 0
; CHECK-NEXT: v_readlane_b32 s5, v1, 1
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll b/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll
index 5aafb0f..364598f 100644
--- a/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll
+++ b/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll
@@ -31,8 +31,8 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x
; CHECK-NEXT: [[COPY13:%[0-9]+]]:sgpr_32 = COPY $sgpr10
; CHECK-NEXT: [[COPY14:%[0-9]+]]:sgpr_32 = COPY $sgpr8
; CHECK-NEXT: undef [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub0_sub1:sgpr_128 = S_LOAD_DWORDX2_IMM [[COPY]], 232, 0 :: (invariant load (s64) from %ir.39, addrspace 4)
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %125:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: KILL undef %125:sgpr_128
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %117:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: KILL undef %117:sgpr_128
; CHECK-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY5]], 4, implicit-def dead $scc
; CHECK-NEXT: [[S_LSHL_B32_1:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY4]], 4, implicit-def dead $scc
; CHECK-NEXT: [[S_LSHL_B32_2:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY3]], 4, implicit-def dead $scc
@@ -44,87 +44,85 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x
; CHECK-NEXT: [[S_SUB_I32_1:%[0-9]+]]:sreg_32 = S_SUB_I32 [[S_BUFFER_LOAD_DWORD_IMM]], 30, implicit-def dead $scc
; CHECK-NEXT: undef [[S_ADD_U32_:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_2]], implicit-def $scc
; CHECK-NEXT: [[S_ADD_U32_:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_]], 16, 0 :: (invariant load (s128) from %ir.81, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_]], 16, 0 :: (invariant load (s128) from %ir.71, addrspace 4)
; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM1:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM undef %74:sreg_64, 0, 0 :: (invariant load (s128) from `ptr addrspace(4) poison`, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM2:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_]], 64, 0 :: (invariant load (s128) from %ir.88, addrspace 4)
; CHECK-NEXT: KILL undef %74:sreg_64
; CHECK-NEXT: KILL [[S_ADD_U32_]].sub0, [[S_ADD_U32_]].sub1
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[S_LOAD_DWORDX4_IMM]], 0, 0 :: (dereferenceable invariant load (s32))
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: undef [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_128 = S_MOV_B32 0
- ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET undef %118:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], undef %89:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET undef %112:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], undef %87:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM1]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: KILL undef %89:sgpr_128
- ; CHECK-NEXT: KILL undef %118:sgpr_128
+ ; CHECK-NEXT: KILL undef %112:sgpr_128
+ ; CHECK-NEXT: KILL undef %87:sgpr_128
; CHECK-NEXT: [[S_SUB_I32_2:%[0-9]+]]:sreg_32 = S_SUB_I32 [[S_BUFFER_LOAD_DWORD_IMM1]], 31, implicit-def dead $scc
; CHECK-NEXT: undef [[S_ADD_U32_1:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_]], implicit-def $scc
; CHECK-NEXT: [[S_ADD_U32_1:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
; CHECK-NEXT: undef [[S_ADD_U32_2:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_1]], implicit-def $scc
; CHECK-NEXT: [[S_ADD_U32_2:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_3:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM2:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_1]], 64, 0 :: (invariant load (s128) from %ir.87, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM3:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_2]], 64, 0 :: (invariant load (s128) from %ir.93, addrspace 4)
- ; CHECK-NEXT: KILL [[S_ADD_U32_1]].sub0, [[S_ADD_U32_1]].sub1
+ ; CHECK-NEXT: [[S_ASHR_I32_3:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 undef %148:sreg_32, 31, implicit-def dead $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_3:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], undef %148:sreg_32, implicit-def $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM3:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_1]], 64, 0 :: (invariant load (s128) from %ir.77, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM4:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_2]], 64, 0 :: (invariant load (s128) from %ir.83, addrspace 4)
; CHECK-NEXT: KILL [[S_ADD_U32_2]].sub0, [[S_ADD_U32_2]].sub1
- ; CHECK-NEXT: [[S_ADD_U32_3:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_ASHR_I32_3:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 undef %169:sreg_32, 31, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_4:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], undef %169:sreg_32, implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_4:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_5:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_5:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_6:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_1]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_6:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_7:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, undef %169:sreg_32, implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_7:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_8:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_8:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_9:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY8]], [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_9:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %48:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_10:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_1]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_10:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_11:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_11:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: KILL [[S_ADD_U32_1]].sub0, [[S_ADD_U32_1]].sub1
+ ; CHECK-NEXT: [[S_ADD_U32_3:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_4:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_4:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_5:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_1]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_5:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_6:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, undef %148:sreg_32, implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_6:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_7:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_2]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_7:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_8:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY8]], [[S_LSHL_B32_]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_8:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %48:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_9:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_1]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_9:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_10:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_2]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_10:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_]], 16, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_1:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_2]], 16, implicit-def dead $scc
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], undef %302:sreg_32, 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], undef %279:sreg_32, 0, 0 :: (dereferenceable invariant load (s32))
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32))
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[S_MOV_B32_]], 16, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %357:sgpr_128, undef %358:sreg_32, 0, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %368:sgpr_128, 16, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM4:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_3]], 64, 0 :: (invariant load (s128) from %ir.99, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM5:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 64, 0 :: (invariant load (s128) from %ir.107, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM6:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 0, 0 :: (invariant load (s128) from %ir.112, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM7:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 0, 0 :: (invariant load (s128) from %ir.117, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM8:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 0, 0 :: (invariant load (s128) from %ir.124, addrspace 4)
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM2]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %352:sgpr_128, [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM5:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %363:sgpr_128, [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM3]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %334:sgpr_128, undef %335:sreg_32, 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %345:sgpr_128, 16, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM5:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_3]], 64, 0 :: (invariant load (s128) from %ir.95, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM6:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 0, 0 :: (invariant load (s128) from %ir.100, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM7:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 0, 0 :: (invariant load (s128) from %ir.105, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM8:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 0, 0 :: (invariant load (s128) from %ir.112, addrspace 4)
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %329:sgpr_128, [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM5:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %340:sgpr_128, [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM3]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM4]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ADD_I32_2:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM]], -98, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_3:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM1]], -114, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_4:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM2]], -130, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_5:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM2]], -178, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_12:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY10]], [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_12:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %42:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_13:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_13:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_14:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_1]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_14:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_15:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_15:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_11:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY10]], [[S_LSHL_B32_]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_11:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %42:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_12:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_12:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_13:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_1]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_13:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_14:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_2]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_14:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
; CHECK-NEXT: [[S_LSHL_B32_3:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY12]], 4, implicit-def dead $scc
- ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN4:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM4]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN4:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM2]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ADD_I32_6:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_3]], 16, implicit-def dead $scc
- ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %384:sgpr_128, [[S_ADD_I32_6]], 0, 0 :: (dereferenceable invariant load (s32))
+ ; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_SGPR_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %361:sgpr_128, [[S_ADD_I32_6]], 0, 0 :: (dereferenceable invariant load (s32))
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN5:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM5]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM9:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 224, 0 :: (invariant load (s128) from %ir.129, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM10:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[COPY7]], 224, 0 :: (invariant load (s128) from %ir.145, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM11:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 576, 0 :: (invariant load (s128) from %ir.150, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM9:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 224, 0 :: (invariant load (s128) from %ir.117, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM10:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[COPY7]], 224, 0 :: (invariant load (s128) from %ir.133, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM11:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 576, 0 :: (invariant load (s128) from %ir.138, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN6:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM6]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM12:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 224, 0 :: (invariant load (s128) from %ir.134, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM13:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 576, 0 :: (invariant load (s128) from %ir.162, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM14:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 224, 0 :: (invariant load (s128) from %ir.140, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM12:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 224, 0 :: (invariant load (s128) from %ir.122, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM13:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 576, 0 :: (invariant load (s128) from %ir.150, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM14:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 224, 0 :: (invariant load (s128) from %ir.128, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN7:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM7]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN8:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM8]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ADD_I32_7:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM4]], -217, implicit-def dead $scc
@@ -135,49 +133,49 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x
; CHECK-NEXT: [[S_ADD_I32_12:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -329, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_13:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -345, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_14:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM6]], -441, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_16:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_16:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_15:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_2]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_15:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
; CHECK-NEXT: [[S_LSHL_B32_4:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY13]], 4, implicit-def dead $scc
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN9:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM9]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ASHR_I32_4:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_4]], 31, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_17:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_4]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_17:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_4]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: undef [[S_ADD_U32_16:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_4]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_16:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_4]], implicit-def dead $scc, implicit $scc
; CHECK-NEXT: [[S_LSHL_B32_5:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY5]], 3, implicit-def dead $scc
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN10:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM12]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ASHR_I32_5:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_5]], 31, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_18:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_5]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_18:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_5]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_18]], 168, 0 :: (invariant load (s32) from %ir.273, align 8, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM15:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 576, 0 :: (invariant load (s128) from %ir.157, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_17:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_5]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_17:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_5]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_17]], 168, 0 :: (invariant load (s32) from %ir.260, align 8, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM15:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 576, 0 :: (invariant load (s128) from %ir.145, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN11:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM14]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN12:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM10]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN13:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM11]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub3:sgpr_128 = S_MOV_B32 553734060
; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub2:sgpr_128 = S_MOV_B32 -1
; CHECK-NEXT: [[COPY15:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]]
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM16:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_9]], 0, 0 :: (invariant load (s128) from %ir.170, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM16:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 0, 0 :: (invariant load (s128) from %ir.158, addrspace 4)
; CHECK-NEXT: [[COPY15:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]].sub1
; CHECK-NEXT: [[COPY15:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORD_IMM]]
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY15]], 0, 0 :: (dereferenceable invariant load (s32))
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN14:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM15]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN15:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM13]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM17:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_10]], 0, 0 :: (invariant load (s128) from %ir.178, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM18:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_11]], 0, 0 :: (invariant load (s128) from %ir.183, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM17:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_9]], 0, 0 :: (invariant load (s128) from %ir.166, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM18:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_10]], 0, 0 :: (invariant load (s128) from %ir.171, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN16:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM16]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_LSHL_B32_6:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY4]], 3, implicit-def dead $scc
; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM1]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ASHR_I32_6:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_6]], 31, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_15:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM4]], -467, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_19:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_6]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_19:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_6]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_19]], 168, 0 :: (invariant load (s64) from %ir.282, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_18:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_6]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_18:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_6]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_18]], 168, 0 :: (invariant load (s64) from %ir.269, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM17]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM18]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM19:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_12]], 0, 0 :: (invariant load (s128) from %ir.205, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM20:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_13]], 0, 0 :: (invariant load (s128) from %ir.211, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM19:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_11]], 0, 0 :: (invariant load (s128) from %ir.193, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM20:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_12]], 0, 0 :: (invariant load (s128) from %ir.199, addrspace 4)
; CHECK-NEXT: [[COPY16:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]]
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM21:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_14]], 0, 0 :: (invariant load (s128) from %ir.216, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM22:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_15]], 0, 0 :: (invariant load (s128) from %ir.221, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM21:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_13]], 0, 0 :: (invariant load (s128) from %ir.204, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM22:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_14]], 0, 0 :: (invariant load (s128) from %ir.209, addrspace 4)
; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORDX2_IMM1]].sub1, 65535, implicit-def dead $scc
; CHECK-NEXT: [[COPY16:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
; CHECK-NEXT: [[COPY16:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_]]
@@ -189,30 +187,30 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN20:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM22]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[S_ASHR_I32_7:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_7]], 31, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_16:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM5]], -468, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_20:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_7]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_20:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_7]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM2:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_20]], 168, 0 :: (invariant load (s64) from %ir.293, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_19:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_7]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_19:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_7]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM2:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_19]], 168, 0 :: (invariant load (s64) from %ir.280, addrspace 4)
; CHECK-NEXT: [[COPY17:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]]
; CHECK-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORDX2_IMM2]].sub1, 65535, implicit-def dead $scc
; CHECK-NEXT: [[COPY17:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM2]].sub0
; CHECK-NEXT: [[COPY17:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_1]]
; CHECK-NEXT: [[S_BUFFER_LOAD_DWORD_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY17]], 0, 0 :: (dereferenceable invariant load (s32))
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM23:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_16]], 160, 0 :: (invariant load (s128) from %ir.256, addrspace 4)
- ; CHECK-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %470:sreg_64, 0, 0 :: (invariant load (s32) from `ptr addrspace(4) poison`, addrspace 4)
- ; CHECK-NEXT: KILL [[S_ADD_U32_16]].sub0, [[S_ADD_U32_16]].sub1
- ; CHECK-NEXT: KILL undef %470:sreg_64
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM23:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_15]], 160, 0 :: (invariant load (s128) from %ir.244, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %443:sreg_64, 0, 0 :: (invariant load (s32) from `ptr addrspace(4) poison`, addrspace 4)
+ ; CHECK-NEXT: KILL [[S_ADD_U32_15]].sub0, [[S_ADD_U32_15]].sub1
; CHECK-NEXT: KILL [[COPY17]].sub0_sub1_sub2, [[COPY17]].sub3
+ ; CHECK-NEXT: KILL undef %443:sreg_64
; CHECK-NEXT: [[S_LSHL_B32_8:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY14]], 3, implicit-def dead $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM24:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_17]], 160, 0 :: (invariant load (s128) from %ir.265, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM24:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_16]], 160, 0 :: (invariant load (s128) from %ir.252, addrspace 4)
; CHECK-NEXT: [[S_ASHR_I32_8:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_8]], 31, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_17:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM6]], -469, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_21:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_8]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_21:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_8]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_21]], 168, 0 :: (invariant load (s32) from %ir.305, align 8, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_20:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_8]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_20:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_8]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_20]], 168, 0 :: (invariant load (s32) from %ir.291, align 8, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN21:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM23]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN22:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM24]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM24]]
; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM23]]
+ ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM24]]
; CHECK-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORD_IMM1]], 65535, implicit-def dead $scc
; CHECK-NEXT: [[COPY18:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]]
; CHECK-NEXT: [[COPY18:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_2]]
@@ -224,22 +222,22 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x
; CHECK-NEXT: [[S_ADD_I32_21:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -507, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_22:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -539, implicit-def dead $scc
; CHECK-NEXT: [[S_ADD_I32_23:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM7]], -473, implicit-def dead $scc
- ; CHECK-NEXT: undef [[S_ADD_U32_22:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_22:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM25:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_22]], 96, 0 :: (invariant load (s128) from %ir.323, addrspace 4)
- ; CHECK-NEXT: undef [[S_ADD_U32_23:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_1]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_23:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM26:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_23]], 96, 0 :: (invariant load (s128) from %ir.329, addrspace 4)
- ; CHECK-NEXT: undef [[S_ADD_U32_24:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_2]], implicit-def $scc
- ; CHECK-NEXT: [[S_ADD_U32_24:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
- ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM27:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_24]], 96, 0 :: (invariant load (s128) from %ir.335, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_21:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_21:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM25:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_21]], 96, 0 :: (invariant load (s128) from %ir.309, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_22:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_1]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_22:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM26:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_22]], 96, 0 :: (invariant load (s128) from %ir.315, addrspace 4)
+ ; CHECK-NEXT: undef [[S_ADD_U32_23:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_2]], implicit-def $scc
+ ; CHECK-NEXT: [[S_ADD_U32_23:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc
+ ; CHECK-NEXT: [[S_LOAD_DWORDX4_IMM27:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_23]], 96, 0 :: (invariant load (s128) from %ir.321, addrspace 4)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN23:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM25]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN24:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM26]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
; CHECK-NEXT: [[BUFFER_LOAD_FORMAT_X_IDXEN25:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM27]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM27]]
; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM25]]
- ; CHECK-NEXT: KILL [[V_MOV_B32_e32_]]
; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM26]]
+ ; CHECK-NEXT: KILL [[V_MOV_B32_e32_]]
+ ; CHECK-NEXT: KILL [[S_LOAD_DWORDX4_IMM27]]
; CHECK-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -2, [[BUFFER_LOAD_FORMAT_X_IDXEN]], 0, implicit $exec
; CHECK-NEXT: [[V_ADD_U32_e64_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -1, [[BUFFER_LOAD_FORMAT_X_IDXEN1]], 0, implicit $exec
; CHECK-NEXT: [[V_ADD_U32_e64_2:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -3, [[BUFFER_LOAD_FORMAT_X_IDXEN]], 0, implicit $exec
@@ -351,13 +349,13 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x
; CHECK-NEXT: [[V_OR_B32_e64_64:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_OR_B32_e64_63]], [[V_ADD_U32_e64_28]], implicit $exec
; CHECK-NEXT: [[V_ADD_U32_e64_30:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -593, [[BUFFER_LOAD_FORMAT_X_IDXEN]], 0, implicit $exec
; CHECK-NEXT: [[V_OR_B32_e64_65:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_OR_B32_e64_64]], [[V_ADD_U32_e64_29]], implicit $exec
- ; CHECK-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef %543:sreg_64, 0, 0 :: (invariant load (s256) from `ptr addrspace(4) poison`, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef %516:sreg_64, 0, 0 :: (invariant load (s256) from `ptr addrspace(4) poison`, addrspace 4)
; CHECK-NEXT: [[V_OR_B32_e64_66:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_OR_B32_e64_65]], [[V_ADD_U32_e64_30]], implicit $exec
; CHECK-NEXT: [[S_ADD_I32_24:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM8]], -594, implicit-def dead $scc
; CHECK-NEXT: [[V_OR_B32_e64_67:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[S_ADD_I32_24]], [[V_OR_B32_e64_66]], implicit $exec
; CHECK-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 0, [[V_OR_B32_e64_67]], implicit $exec
; CHECK-NEXT: undef [[V_CNDMASK_B32_e64_:%[0-9]+]].sub3:vreg_128 = V_CNDMASK_B32_e64 0, 0, 0, 1, [[V_CMP_EQ_U32_e64_]], implicit $exec
- ; CHECK-NEXT: IMAGE_STORE_V4_V2_nsa_gfx10 [[V_CNDMASK_B32_e64_]], undef %557:vgpr_32, undef %559:vgpr_32, [[S_LOAD_DWORDX8_IMM]], 15, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s128), addrspace 8)
+ ; CHECK-NEXT: IMAGE_STORE_V4_V2_nsa_gfx10 [[V_CNDMASK_B32_e64_]], undef %530:vgpr_32, undef %532:vgpr_32, [[S_LOAD_DWORDX8_IMM]], 15, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s128), addrspace 8)
; CHECK-NEXT: S_ENDPGM 0
.expVert:
%0 = extractelement <31 x i32> %userData, i64 2
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll b/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
index db49339..9c16b3c 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
@@ -22,8 +22,6 @@
; GFX9-DAG: s_mov_b32 s[[DESC3:[0-9]+]], 0xe00000
; OFFREG is offset system SGPR
-; GCN: buffer_store_dword {{v[0-9]+}}, off, s[[[DESC0]]:[[DESC3]]], 0 offset:{{[0-9]+}} ; 4-byte Folded Spill
-; GCN: buffer_load_dword v{{[0-9]+}}, off, s[[[DESC0]]:[[DESC3]]], 0 offset:{{[0-9]+}} ; 4-byte Folded Reload
; GCN: NumVgprs: 256
; GCN: ScratchSize: 640
diff --git a/llvm/test/CodeGen/AMDGPU/wqm.ll b/llvm/test/CodeGen/AMDGPU/wqm.ll
index ad8dcd3..21f0c00 100644
--- a/llvm/test/CodeGen/AMDGPU/wqm.ll
+++ b/llvm/test/CodeGen/AMDGPU/wqm.ll
@@ -3477,13 +3477,10 @@ define amdgpu_gs void @wqm_init_exec_wwm() {
; GFX9-W64-NEXT: s_mov_b64 exec, 0
; GFX9-W64-NEXT: s_mov_b32 s1, 0
; GFX9-W64-NEXT: s_mov_b32 s0, s1
-; GFX9-W64-NEXT: s_cmp_lg_u64 exec, 0
-; GFX9-W64-NEXT: s_cselect_b64 s[2:3], -1, 0
-; GFX9-W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX9-W64-NEXT: s_cmp_eq_u64 s[0:1], 0
; GFX9-W64-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GFX9-W64-NEXT: s_xor_b64 s[0:1], s[2:3], s[0:1]
-; GFX9-W64-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s[0:1]
-; GFX9-W64-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-W64-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-W64-NEXT: v_cndmask_b32_e64 v1, 0, 1.0, s[0:1]
; GFX9-W64-NEXT: exp mrt0 off, off, off, off
; GFX9-W64-NEXT: s_endpgm
;
@@ -3491,14 +3488,11 @@ define amdgpu_gs void @wqm_init_exec_wwm() {
; GFX10-W32: ; %bb.0:
; GFX10-W32-NEXT: s_mov_b32 exec_lo, 0
; GFX10-W32-NEXT: s_mov_b32 s1, 0
-; GFX10-W32-NEXT: s_cmp_lg_u64 exec, 0
+; GFX10-W32-NEXT: v_mov_b32_e32 v0, 0
; GFX10-W32-NEXT: s_mov_b32 s0, s1
-; GFX10-W32-NEXT: s_cselect_b32 s2, -1, 0
-; GFX10-W32-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX10-W32-NEXT: v_mov_b32_e32 v1, 0
+; GFX10-W32-NEXT: s_cmp_eq_u64 s[0:1], 0
; GFX10-W32-NEXT: s_cselect_b32 s0, -1, 0
-; GFX10-W32-NEXT: s_xor_b32 s0, s2, s0
-; GFX10-W32-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s0
+; GFX10-W32-NEXT: v_cndmask_b32_e64 v1, 0, 1.0, s0
; GFX10-W32-NEXT: exp mrt0 off, off, off, off
; GFX10-W32-NEXT: s_endpgm
call void @llvm.amdgcn.init.exec(i64 0)
diff --git a/llvm/test/CodeGen/ARM/strict-fp-func.ll b/llvm/test/CodeGen/ARM/strict-fp-func.ll
new file mode 100644
index 0000000..39bb2b4
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/strict-fp-func.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mtriple arm-none-eabi -stop-after=finalize-isel %s -o - | FileCheck %s
+
+define float @func_02(float %x, float %y) strictfp nounwind {
+ %call = call float @func_01(float %x) strictfp
+ %res = call float @llvm.experimental.constrained.fadd.f32(float %call, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore") strictfp
+ ret float %res
+}
+; CHECK-LABEL: name: func_02
+; CHECK: BL @func_01, {{.*}}, implicit-def $fpscr_rm
+
+
+declare float @func_01(float)
+declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/avg.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/avg.ll
new file mode 100644
index 0000000..2a5a8fa
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/avg.ll
@@ -0,0 +1,307 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+define void @xvavg_b(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_b:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrai.b $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <32 x i8>, ptr %a
+ %vb = load <32 x i8>, ptr %b
+ %add = add <32 x i8> %va, %vb
+ %shr = ashr <32 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <32 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_h(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_h:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrai.h $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i16>, ptr %a
+ %vb = load <16 x i16>, ptr %b
+ %add = add <16 x i16> %va, %vb
+ %shr = ashr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <16 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_w(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_w:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrai.w $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i32>, ptr %a
+ %vb = load <8 x i32>, ptr %b
+ %add = add <8 x i32> %va, %vb
+ %shr = ashr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ store <8 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_d(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_d:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrai.d $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i64>, ptr %a
+ %vb = load <4 x i64>, ptr %b
+ %add = add <4 x i64> %va, %vb
+ %shr = ashr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+ store <4 x i64> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_bu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_bu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrli.b $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <32 x i8>, ptr %a
+ %vb = load <32 x i8>, ptr %b
+ %add = add <32 x i8> %va, %vb
+ %shr = lshr <32 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <32 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_hu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_hu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrli.h $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i16>, ptr %a
+ %vb = load <16 x i16>, ptr %b
+ %add = add <16 x i16> %va, %vb
+ %shr = lshr <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <16 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_wu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_wu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrli.w $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i32>, ptr %a
+ %vb = load <8 x i32>, ptr %b
+ %add = add <8 x i32> %va, %vb
+ %shr = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ store <8 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @xvavg_du(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavg_du:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT: xvsrli.d $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i64>, ptr %a
+ %vb = load <4 x i64>, ptr %b
+ %add = add <4 x i64> %va, %vb
+ %shr = lshr <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+ store <4 x i64> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_b(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_b:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.bu $xr0, $xr0, 1
+; CHECK-NEXT: xvsrai.b $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <32 x i8>, ptr %a
+ %vb = load <32 x i8>, ptr %b
+ %add = add <32 x i8> %va, %vb
+ %add1 = add <32 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %shr = ashr <32 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <32 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_h(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_h:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.hu $xr0, $xr0, 1
+; CHECK-NEXT: xvsrai.h $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i16>, ptr %a
+ %vb = load <16 x i16>, ptr %b
+ %add = add <16 x i16> %va, %vb
+ %add1 = add <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shr = ashr <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <16 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_w(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_w:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.wu $xr0, $xr0, 1
+; CHECK-NEXT: xvsrai.w $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i32>, ptr %a
+ %vb = load <8 x i32>, ptr %b
+ %add = add <8 x i32> %va, %vb
+ %add1 = add <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shr = ashr <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ store <8 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_d(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_d:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.du $xr0, $xr0, 1
+; CHECK-NEXT: xvsrai.d $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i64>, ptr %a
+ %vb = load <4 x i64>, ptr %b
+ %add = add <4 x i64> %va, %vb
+ %add1 = add <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+ %shr = ashr <4 x i64> %add1, <i64 1, i64 1, i64 1, i64 1>
+ store <4 x i64> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_bu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_bu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.b $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.bu $xr0, $xr0, 1
+; CHECK-NEXT: xvsrli.b $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <32 x i8>, ptr %a
+ %vb = load <32 x i8>, ptr %b
+ %add = add <32 x i8> %va, %vb
+ %add1 = add <32 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %shr = lshr <32 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <32 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_hu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_hu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.h $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.hu $xr0, $xr0, 1
+; CHECK-NEXT: xvsrli.h $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i16>, ptr %a
+ %vb = load <16 x i16>, ptr %b
+ %add = add <16 x i16> %va, %vb
+ %add1 = add <16 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shr = lshr <16 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <16 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_wu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_wu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.wu $xr0, $xr0, 1
+; CHECK-NEXT: xvsrli.w $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i32>, ptr %a
+ %vb = load <8 x i32>, ptr %b
+ %add = add <8 x i32> %va, %vb
+ %add1 = add <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %shr = lshr <8 x i32> %add1, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ store <8 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @xvavgr_du(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvavgr_du:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvadd.d $xr0, $xr0, $xr1
+; CHECK-NEXT: xvaddi.du $xr0, $xr0, 1
+; CHECK-NEXT: xvsrli.d $xr0, $xr0, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i64>, ptr %a
+ %vb = load <4 x i64>, ptr %b
+ %add = add <4 x i64> %va, %vb
+ %add1 = add <4 x i64> %add, <i64 1, i64 1, i64 1, i64 1>
+ %shr = lshr <4 x i64> %add1, <i64 1, i64 1, i64 1, i64 1>
+ store <4 x i64> %shr, ptr %res
+ ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/avg.ll b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/avg.ll
new file mode 100644
index 0000000..20b88984
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/avg.ll
@@ -0,0 +1,307 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+define void @vavg_b(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_b:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrai.b $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i8>, ptr %a
+ %vb = load <16 x i8>, ptr %b
+ %add = add <16 x i8> %va, %vb
+ %shr = ashr <16 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <16 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_h(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_h:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrai.h $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i16>, ptr %a
+ %vb = load <8 x i16>, ptr %b
+ %add = add <8 x i16> %va, %vb
+ %shr = ashr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <8 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_w(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_w:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrai.w $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i32>, ptr %a
+ %vb = load <4 x i32>, ptr %b
+ %add = add <4 x i32> %va, %vb
+ %shr = ashr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_d(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_d:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrai.d $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <2 x i64>, ptr %a
+ %vb = load <2 x i64>, ptr %b
+ %add = add <2 x i64> %va, %vb
+ %shr = ashr <2 x i64> %add, <i64 1, i64 1>
+ store <2 x i64> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_bu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_bu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrli.b $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i8>, ptr %a
+ %vb = load <16 x i8>, ptr %b
+ %add = add <16 x i8> %va, %vb
+ %shr = lshr <16 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <16 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_hu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_hu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrli.h $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i16>, ptr %a
+ %vb = load <8 x i16>, ptr %b
+ %add = add <8 x i16> %va, %vb
+ %shr = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <8 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_wu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_wu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrli.w $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i32>, ptr %a
+ %vb = load <4 x i32>, ptr %b
+ %add = add <4 x i32> %va, %vb
+ %shr = lshr <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @vavg_du(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavg_du:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vsrli.d $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <2 x i64>, ptr %a
+ %vb = load <2 x i64>, ptr %b
+ %add = add <2 x i64> %va, %vb
+ %shr = lshr <2 x i64> %add, <i64 1, i64 1>
+ store <2 x i64> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_b(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_b:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.bu $vr0, $vr0, 1
+; CHECK-NEXT: vsrai.b $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i8>, ptr %a
+ %vb = load <16 x i8>, ptr %b
+ %add = add <16 x i8> %va, %vb
+ %add1 = add <16 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %shr = ashr <16 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <16 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_h(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_h:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.hu $vr0, $vr0, 1
+; CHECK-NEXT: vsrai.h $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i16>, ptr %a
+ %vb = load <8 x i16>, ptr %b
+ %add = add <8 x i16> %va, %vb
+ %add1 = add <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shr = ashr <8 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <8 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_w(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_w:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.wu $vr0, $vr0, 1
+; CHECK-NEXT: vsrai.w $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i32>, ptr %a
+ %vb = load <4 x i32>, ptr %b
+ %add = add <4 x i32> %va, %vb
+ %add1 = add <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+ %shr = ashr <4 x i32> %add1, <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_d(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_d:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.du $vr0, $vr0, 1
+; CHECK-NEXT: vsrai.d $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <2 x i64>, ptr %a
+ %vb = load <2 x i64>, ptr %b
+ %add = add <2 x i64> %va, %vb
+ %add1 = add <2 x i64> %add, <i64 1, i64 1>
+ %shr = ashr <2 x i64> %add1, <i64 1, i64 1>
+ store <2 x i64> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_bu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_bu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.b $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.bu $vr0, $vr0, 1
+; CHECK-NEXT: vsrli.b $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <16 x i8>, ptr %a
+ %vb = load <16 x i8>, ptr %b
+ %add = add <16 x i8> %va, %vb
+ %add1 = add <16 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ %shr = lshr <16 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+ store <16 x i8> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_hu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_hu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.h $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.hu $vr0, $vr0, 1
+; CHECK-NEXT: vsrli.h $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i16>, ptr %a
+ %vb = load <8 x i16>, ptr %b
+ %add = add <8 x i16> %va, %vb
+ %add1 = add <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ %shr = lshr <8 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+ store <8 x i16> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_wu(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_wu:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.w $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.wu $vr0, $vr0, 1
+; CHECK-NEXT: vsrli.w $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i32>, ptr %a
+ %vb = load <4 x i32>, ptr %b
+ %add = add <4 x i32> %va, %vb
+ %add1 = add <4 x i32> %add, <i32 1, i32 1, i32 1, i32 1>
+ %shr = lshr <4 x i32> %add1, <i32 1, i32 1, i32 1, i32 1>
+ store <4 x i32> %shr, ptr %res
+ ret void
+}
+
+define void @vavgr_du(ptr %res, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: vavgr_du:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vld $vr0, $a1, 0
+; CHECK-NEXT: vld $vr1, $a2, 0
+; CHECK-NEXT: vadd.d $vr0, $vr0, $vr1
+; CHECK-NEXT: vaddi.du $vr0, $vr0, 1
+; CHECK-NEXT: vsrli.d $vr0, $vr0, 1
+; CHECK-NEXT: vst $vr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <2 x i64>, ptr %a
+ %vb = load <2 x i64>, ptr %b
+ %add = add <2 x i64> %va, %vb
+ %add1 = add <2 x i64> %add, <i64 1, i64 1>
+ %shr = lshr <2 x i64> %add1, <i64 1, i64 1>
+ store <2 x i64> %shr, ptr %res
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/combine-sext-and-shl-after-isel.ll b/llvm/test/CodeGen/PowerPC/combine-sext-and-shl-after-isel.ll
index 00a77f9..530169f 100644
--- a/llvm/test/CodeGen/PowerPC/combine-sext-and-shl-after-isel.ll
+++ b/llvm/test/CodeGen/PowerPC/combine-sext-and-shl-after-isel.ll
@@ -212,37 +212,33 @@ define hidden void @testCaller(i1 %incond) local_unnamed_addr align 2 nounwind {
; CHECK-NEXT: std r30, 48(r1) # 8-byte Folded Spill
; CHECK-NEXT: andi. r3, r3, 1
; CHECK-NEXT: li r3, -1
+; CHECK-NEXT: li r4, 0
; CHECK-NEXT: li r30, 0
; CHECK-NEXT: crmove 4*cr2+lt, gt
; CHECK-NEXT: std r29, 40(r1) # 8-byte Folded Spill
; CHECK-NEXT: b .LBB3_2
-; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB3_1: # %if.end116
; CHECK-NEXT: #
; CHECK-NEXT: bl callee
; CHECK-NEXT: nop
; CHECK-NEXT: mr r3, r29
-; CHECK-NEXT: .LBB3_2: # %cond.end.i.i
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB3_3 Depth 2
-; CHECK-NEXT: lwz r29, 0(r3)
-; CHECK-NEXT: li r5, 0
-; CHECK-NEXT: extsw r4, r29
-; CHECK-NEXT: .p2align 5
-; CHECK-NEXT: .LBB3_3: # %while.body5.i
-; CHECK-NEXT: # Parent Loop BB3_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
-; CHECK-NEXT: addi r5, r5, -1
-; CHECK-NEXT: cmpwi r5, 0
-; CHECK-NEXT: bgt cr0, .LBB3_3
-; CHECK-NEXT: # %bb.4: # %while.cond12.preheader.i
+; CHECK-NEXT: li r4, 0
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: .LBB3_2: # %while.body5.i
; CHECK-NEXT: #
+; CHECK-NEXT: addi r4, r4, -1
+; CHECK-NEXT: cmpwi r4, 0
+; CHECK-NEXT: bgt cr0, .LBB3_2
+; CHECK-NEXT: # %bb.3: # %while.cond12.preheader.i
+; CHECK-NEXT: #
+; CHECK-NEXT: lwz r29, 0(r3)
; CHECK-NEXT: bc 12, 4*cr2+lt, .LBB3_1
-; CHECK-NEXT: # %bb.5: # %for.cond99.preheader
+; CHECK-NEXT: # %bb.4: # %for.cond99.preheader
; CHECK-NEXT: #
+; CHECK-NEXT: extsw r4, r29
; CHECK-NEXT: ld r5, 0(r3)
-; CHECK-NEXT: sldi r4, r4, 2
; CHECK-NEXT: stw r3, 0(r3)
+; CHECK-NEXT: sldi r4, r4, 2
; CHECK-NEXT: stwx r30, r5, r4
; CHECK-NEXT: b .LBB3_1
;
@@ -256,37 +252,33 @@ define hidden void @testCaller(i1 %incond) local_unnamed_addr align 2 nounwind {
; CHECK-BE-NEXT: std r30, 64(r1) # 8-byte Folded Spill
; CHECK-BE-NEXT: andi. r3, r3, 1
; CHECK-BE-NEXT: li r3, -1
+; CHECK-BE-NEXT: li r4, 0
; CHECK-BE-NEXT: li r30, 0
; CHECK-BE-NEXT: crmove 4*cr2+lt, gt
; CHECK-BE-NEXT: std r29, 56(r1) # 8-byte Folded Spill
; CHECK-BE-NEXT: b .LBB3_2
-; CHECK-BE-NEXT: .p2align 4
; CHECK-BE-NEXT: .LBB3_1: # %if.end116
; CHECK-BE-NEXT: #
; CHECK-BE-NEXT: bl callee
; CHECK-BE-NEXT: nop
; CHECK-BE-NEXT: mr r3, r29
-; CHECK-BE-NEXT: .LBB3_2: # %cond.end.i.i
-; CHECK-BE-NEXT: # =>This Loop Header: Depth=1
-; CHECK-BE-NEXT: # Child Loop BB3_3 Depth 2
-; CHECK-BE-NEXT: lwz r29, 0(r3)
-; CHECK-BE-NEXT: li r5, 0
-; CHECK-BE-NEXT: extsw r4, r29
-; CHECK-BE-NEXT: .p2align 5
-; CHECK-BE-NEXT: .LBB3_3: # %while.body5.i
-; CHECK-BE-NEXT: # Parent Loop BB3_2 Depth=1
-; CHECK-BE-NEXT: # => This Inner Loop Header: Depth=2
-; CHECK-BE-NEXT: addi r5, r5, -1
-; CHECK-BE-NEXT: cmpwi r5, 0
-; CHECK-BE-NEXT: bgt cr0, .LBB3_3
-; CHECK-BE-NEXT: # %bb.4: # %while.cond12.preheader.i
+; CHECK-BE-NEXT: li r4, 0
+; CHECK-BE-NEXT: .p2align 4
+; CHECK-BE-NEXT: .LBB3_2: # %while.body5.i
+; CHECK-BE-NEXT: #
+; CHECK-BE-NEXT: addi r4, r4, -1
+; CHECK-BE-NEXT: cmpwi r4, 0
+; CHECK-BE-NEXT: bgt cr0, .LBB3_2
+; CHECK-BE-NEXT: # %bb.3: # %while.cond12.preheader.i
; CHECK-BE-NEXT: #
+; CHECK-BE-NEXT: lwz r29, 0(r3)
; CHECK-BE-NEXT: bc 12, 4*cr2+lt, .LBB3_1
-; CHECK-BE-NEXT: # %bb.5: # %for.cond99.preheader
+; CHECK-BE-NEXT: # %bb.4: # %for.cond99.preheader
; CHECK-BE-NEXT: #
+; CHECK-BE-NEXT: extsw r4, r29
; CHECK-BE-NEXT: ld r5, 0(r3)
-; CHECK-BE-NEXT: sldi r4, r4, 2
; CHECK-BE-NEXT: stw r3, 0(r3)
+; CHECK-BE-NEXT: sldi r4, r4, 2
; CHECK-BE-NEXT: stwx r30, r5, r4
; CHECK-BE-NEXT: b .LBB3_1
;
@@ -300,32 +292,28 @@ define hidden void @testCaller(i1 %incond) local_unnamed_addr align 2 nounwind {
; CHECK-P9-NEXT: std r0, 80(r1)
; CHECK-P9-NEXT: std r30, 48(r1) # 8-byte Folded Spill
; CHECK-P9-NEXT: li r3, -1
+; CHECK-P9-NEXT: li r4, 0
; CHECK-P9-NEXT: li r30, 0
; CHECK-P9-NEXT: std r29, 40(r1) # 8-byte Folded Spill
; CHECK-P9-NEXT: crmove 4*cr2+lt, gt
; CHECK-P9-NEXT: b .LBB3_2
-; CHECK-P9-NEXT: .p2align 4
; CHECK-P9-NEXT: .LBB3_1: # %if.end116
; CHECK-P9-NEXT: #
; CHECK-P9-NEXT: bl callee
; CHECK-P9-NEXT: nop
; CHECK-P9-NEXT: mr r3, r29
-; CHECK-P9-NEXT: .LBB3_2: # %cond.end.i.i
-; CHECK-P9-NEXT: # =>This Loop Header: Depth=1
-; CHECK-P9-NEXT: # Child Loop BB3_3 Depth 2
-; CHECK-P9-NEXT: lwz r29, 0(r3)
; CHECK-P9-NEXT: li r4, 0
-; CHECK-P9-NEXT: .p2align 5
-; CHECK-P9-NEXT: .LBB3_3: # %while.body5.i
-; CHECK-P9-NEXT: # Parent Loop BB3_2 Depth=1
-; CHECK-P9-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-P9-NEXT: .p2align 4
+; CHECK-P9-NEXT: .LBB3_2: # %while.body5.i
+; CHECK-P9-NEXT: #
; CHECK-P9-NEXT: addi r4, r4, -1
; CHECK-P9-NEXT: cmpwi r4, 0
-; CHECK-P9-NEXT: bgt cr0, .LBB3_3
-; CHECK-P9-NEXT: # %bb.4: # %while.cond12.preheader.i
+; CHECK-P9-NEXT: bgt cr0, .LBB3_2
+; CHECK-P9-NEXT: # %bb.3: # %while.cond12.preheader.i
; CHECK-P9-NEXT: #
+; CHECK-P9-NEXT: lwz r29, 0(r3)
; CHECK-P9-NEXT: bc 12, 4*cr2+lt, .LBB3_1
-; CHECK-P9-NEXT: # %bb.5: # %for.cond99.preheader
+; CHECK-P9-NEXT: # %bb.4: # %for.cond99.preheader
; CHECK-P9-NEXT: #
; CHECK-P9-NEXT: ld r4, 0(r3)
; CHECK-P9-NEXT: extswsli r5, r29, 2
@@ -343,32 +331,28 @@ define hidden void @testCaller(i1 %incond) local_unnamed_addr align 2 nounwind {
; CHECK-P9-BE-NEXT: std r0, 96(r1)
; CHECK-P9-BE-NEXT: std r30, 64(r1) # 8-byte Folded Spill
; CHECK-P9-BE-NEXT: li r3, -1
+; CHECK-P9-BE-NEXT: li r4, 0
; CHECK-P9-BE-NEXT: li r30, 0
; CHECK-P9-BE-NEXT: std r29, 56(r1) # 8-byte Folded Spill
; CHECK-P9-BE-NEXT: crmove 4*cr2+lt, gt
; CHECK-P9-BE-NEXT: b .LBB3_2
-; CHECK-P9-BE-NEXT: .p2align 4
; CHECK-P9-BE-NEXT: .LBB3_1: # %if.end116
; CHECK-P9-BE-NEXT: #
; CHECK-P9-BE-NEXT: bl callee
; CHECK-P9-BE-NEXT: nop
; CHECK-P9-BE-NEXT: mr r3, r29
-; CHECK-P9-BE-NEXT: .LBB3_2: # %cond.end.i.i
-; CHECK-P9-BE-NEXT: # =>This Loop Header: Depth=1
-; CHECK-P9-BE-NEXT: # Child Loop BB3_3 Depth 2
-; CHECK-P9-BE-NEXT: lwz r29, 0(r3)
; CHECK-P9-BE-NEXT: li r4, 0
-; CHECK-P9-BE-NEXT: .p2align 5
-; CHECK-P9-BE-NEXT: .LBB3_3: # %while.body5.i
-; CHECK-P9-BE-NEXT: # Parent Loop BB3_2 Depth=1
-; CHECK-P9-BE-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-P9-BE-NEXT: .p2align 4
+; CHECK-P9-BE-NEXT: .LBB3_2: # %while.body5.i
+; CHECK-P9-BE-NEXT: #
; CHECK-P9-BE-NEXT: addi r4, r4, -1
; CHECK-P9-BE-NEXT: cmpwi r4, 0
-; CHECK-P9-BE-NEXT: bgt cr0, .LBB3_3
-; CHECK-P9-BE-NEXT: # %bb.4: # %while.cond12.preheader.i
+; CHECK-P9-BE-NEXT: bgt cr0, .LBB3_2
+; CHECK-P9-BE-NEXT: # %bb.3: # %while.cond12.preheader.i
; CHECK-P9-BE-NEXT: #
+; CHECK-P9-BE-NEXT: lwz r29, 0(r3)
; CHECK-P9-BE-NEXT: bc 12, 4*cr2+lt, .LBB3_1
-; CHECK-P9-BE-NEXT: # %bb.5: # %for.cond99.preheader
+; CHECK-P9-BE-NEXT: # %bb.4: # %for.cond99.preheader
; CHECK-P9-BE-NEXT: #
; CHECK-P9-BE-NEXT: ld r4, 0(r3)
; CHECK-P9-BE-NEXT: extswsli r5, r29, 2
diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll
index d07f608..c3183a1 100644
--- a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll
@@ -290,9 +290,9 @@ define void @liveConstant() {
; CHECK-NEXT: .half 2
; CHECK-NEXT: .half 0
; CHECK-NEXT: .word
-define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) {
+define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i8 %l25, i16 zeroext %l26, i32 signext %l27) {
entry:
- call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 28, ptr null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27)
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 28, ptr null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i8 %l25, i16 %l26, i32 %l27)
ret void
}
diff --git a/llvm/test/CodeGen/RISCV/rv64p.ll b/llvm/test/CodeGen/RISCV/rv64p.ll
index cb07f94..f937f44 100644
--- a/llvm/test/CodeGen/RISCV/rv64p.ll
+++ b/llvm/test/CodeGen/RISCV/rv64p.ll
@@ -297,8 +297,7 @@ declare i32 @llvm.abs.i32(i32, i1 immarg)
define i32 @abs_i32(i32 %x) {
; CHECK-LABEL: abs_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: sext.w a0, a0
-; CHECK-NEXT: abs a0, a0
+; CHECK-NEXT: absw a0, a0
; CHECK-NEXT: ret
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
ret i32 %abs
@@ -307,8 +306,7 @@ define i32 @abs_i32(i32 %x) {
define signext i32 @abs_i32_sext(i32 signext %x) {
; CHECK-LABEL: abs_i32_sext:
; CHECK: # %bb.0:
-; CHECK-NEXT: abs a0, a0
-; CHECK-NEXT: sext.w a0, a0
+; CHECK-NEXT: absw a0, a0
; CHECK-NEXT: ret
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
ret i32 %abs
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
index 9c36bae..ec257bc 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
@@ -6,77 +6,81 @@ define void @arm_min_q31(ptr nocapture readonly %pSrc, i32 %blockSize, ptr nocap
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NEXT: .pad #4
+; CHECK-NEXT: sub sp, #4
; CHECK-NEXT: ldr.w r12, [r0]
; CHECK-NEXT: subs.w r9, r1, #1
; CHECK-NEXT: beq .LBB0_3
; CHECK-NEXT: @ %bb.1: @ %while.body.preheader
-; CHECK-NEXT: and r8, r9, #3
+; CHECK-NEXT: and r6, r9, #3
; CHECK-NEXT: subs r7, r1, #2
; CHECK-NEXT: cmp r7, #3
; CHECK-NEXT: bhs .LBB0_4
; CHECK-NEXT: @ %bb.2:
-; CHECK-NEXT: movs r6, #0
-; CHECK-NEXT: b .LBB0_6
+; CHECK-NEXT: mov.w r10, #0
+; CHECK-NEXT: cbnz r6, .LBB0_7
+; CHECK-NEXT: b .LBB0_10
; CHECK-NEXT: .LBB0_3:
-; CHECK-NEXT: movs r6, #0
+; CHECK-NEXT: mov.w r10, #0
; CHECK-NEXT: b .LBB0_10
; CHECK-NEXT: .LBB0_4: @ %while.body.preheader.new
; CHECK-NEXT: bic r7, r9, #3
-; CHECK-NEXT: movs r6, #1
+; CHECK-NEXT: str r6, [sp] @ 4-byte Spill
; CHECK-NEXT: subs r7, #4
+; CHECK-NEXT: movs r6, #1
+; CHECK-NEXT: mov.w r8, #0
+; CHECK-NEXT: mov.w r10, #0
; CHECK-NEXT: add.w lr, r6, r7, lsr #2
-; CHECK-NEXT: movs r6, #0
-; CHECK-NEXT: movs r7, #4
; CHECK-NEXT: .LBB0_5: @ %while.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ldr r10, [r0, #16]!
-; CHECK-NEXT: sub.w r9, r9, #4
-; CHECK-NEXT: ldrd r5, r4, [r0, #-12]
-; CHECK-NEXT: ldr r11, [r0, #-4]
+; CHECK-NEXT: ldr r11, [r0, #16]!
+; CHECK-NEXT: ldrd r5, r7, [r0, #-12]
+; CHECK-NEXT: ldr r4, [r0, #-4]
; CHECK-NEXT: cmp r12, r5
-; CHECK-NEXT: it gt
-; CHECK-NEXT: subgt r6, r7, #3
; CHECK-NEXT: csel r5, r5, r12, gt
-; CHECK-NEXT: cmp r5, r4
+; CHECK-NEXT: csinc r6, r10, r8, le
+; CHECK-NEXT: cmp r5, r7
; CHECK-NEXT: it gt
-; CHECK-NEXT: subgt r6, r7, #2
-; CHECK-NEXT: csel r5, r4, r5, gt
-; CHECK-NEXT: cmp r5, r11
+; CHECK-NEXT: addgt.w r6, r8, #2
+; CHECK-NEXT: csel r7, r7, r5, gt
+; CHECK-NEXT: cmp r7, r4
; CHECK-NEXT: it gt
-; CHECK-NEXT: subgt r6, r7, #1
-; CHECK-NEXT: csel r5, r11, r5, gt
-; CHECK-NEXT: cmp r5, r10
-; CHECK-NEXT: csel r6, r7, r6, gt
-; CHECK-NEXT: add.w r7, r7, #4
-; CHECK-NEXT: csel r12, r10, r5, gt
+; CHECK-NEXT: addgt.w r6, r8, #3
+; CHECK-NEXT: csel r7, r4, r7, gt
+; CHECK-NEXT: add.w r8, r8, #4
+; CHECK-NEXT: cmp r7, r11
+; CHECK-NEXT: csel r10, r8, r6, gt
+; CHECK-NEXT: csel r12, r11, r7, gt
; CHECK-NEXT: le lr, .LBB0_5
-; CHECK-NEXT: .LBB0_6: @ %while.end.loopexit.unr-lcssa
-; CHECK-NEXT: cmp.w r8, #0
-; CHECK-NEXT: beq .LBB0_10
-; CHECK-NEXT: @ %bb.7: @ %while.body.epil
+; CHECK-NEXT: @ %bb.6: @ %while.end.loopexit.unr-lcssa.loopexit
+; CHECK-NEXT: ldr r6, [sp] @ 4-byte Reload
+; CHECK-NEXT: sub.w r9, r9, r8
+; CHECK-NEXT: cbz r6, .LBB0_10
+; CHECK-NEXT: .LBB0_7: @ %while.body.epil
; CHECK-NEXT: ldr r7, [r0, #4]
; CHECK-NEXT: sub.w r1, r1, r9
; CHECK-NEXT: cmp r12, r7
-; CHECK-NEXT: csel r6, r1, r6, gt
+; CHECK-NEXT: csel r10, r1, r10, gt
; CHECK-NEXT: csel r12, r7, r12, gt
-; CHECK-NEXT: cmp.w r8, #1
+; CHECK-NEXT: cmp r6, #1
; CHECK-NEXT: beq .LBB0_10
; CHECK-NEXT: @ %bb.8: @ %while.body.epil.1
; CHECK-NEXT: ldr r7, [r0, #8]
; CHECK-NEXT: cmp r12, r7
-; CHECK-NEXT: csinc r6, r6, r1, le
+; CHECK-NEXT: csinc r10, r10, r1, le
; CHECK-NEXT: csel r12, r7, r12, gt
-; CHECK-NEXT: cmp.w r8, #2
+; CHECK-NEXT: cmp r6, #2
; CHECK-NEXT: beq .LBB0_10
; CHECK-NEXT: @ %bb.9: @ %while.body.epil.2
; CHECK-NEXT: ldr r0, [r0, #12]
; CHECK-NEXT: cmp r12, r0
; CHECK-NEXT: it gt
-; CHECK-NEXT: addgt r6, r1, #2
+; CHECK-NEXT: addgt.w r10, r1, #2
; CHECK-NEXT: csel r12, r0, r12, gt
; CHECK-NEXT: .LBB0_10: @ %while.end
; CHECK-NEXT: str.w r12, [r2]
-; CHECK-NEXT: str r6, [r3]
+; CHECK-NEXT: str.w r10, [r3]
+; CHECK-NEXT: add sp, #4
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
entry:
%0 = load i32, ptr %pSrc, align 4
diff --git a/llvm/test/CodeGen/X86/bittest-big-integer.ll b/llvm/test/CodeGen/X86/bittest-big-integer.ll
index 19d751d1..cc3dcf3 100644
--- a/llvm/test/CodeGen/X86/bittest-big-integer.ll
+++ b/llvm/test/CodeGen/X86/bittest-big-integer.ll
@@ -203,24 +203,14 @@ define i1 @init_eq_i32(ptr %word, i32 %position, i1 zeroext %value) nounwind {
define i1 @test_ne_i64(ptr %word, i32 %position) nounwind {
; X86-LABEL: test_ne_i64:
; X86: # %bb.0:
-; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $1, %edx
-; X86-NEXT: xorl %esi, %esi
-; X86-NEXT: shldl %cl, %edx, %esi
-; X86-NEXT: shll %cl, %edx
-; X86-NEXT: testb $32, %cl
-; X86-NEXT: je .LBB5_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %edx, %esi
-; X86-NEXT: xorl %edx, %edx
-; X86-NEXT: .LBB5_2:
-; X86-NEXT: andl 4(%eax), %esi
-; X86-NEXT: andl (%eax), %edx
-; X86-NEXT: orl %esi, %edx
-; X86-NEXT: setne %al
-; X86-NEXT: popl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: andl $32, %edx
+; X86-NEXT: shrl $3, %edx
+; X86-NEXT: movl (%eax,%edx), %eax
+; X86-NEXT: btl %ecx, %eax
+; X86-NEXT: setb %al
; X86-NEXT: retl
;
; X64-LABEL: test_ne_i64:
@@ -242,38 +232,20 @@ define i1 @test_ne_i64(ptr %word, i32 %position) nounwind {
define i1 @complement_ne_i64(ptr %word, i32 %position) nounwind {
; X86-LABEL: complement_ne_i64:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $1, %eax
-; X86-NEXT: xorl %esi, %esi
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: shll %cl, %eax
-; X86-NEXT: testb $32, %cl
-; X86-NEXT: je .LBB6_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %eax, %esi
-; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: .LBB6_2:
-; X86-NEXT: movl (%edx), %ecx
-; X86-NEXT: movl 4(%edx), %edi
-; X86-NEXT: movl %edi, %ebx
-; X86-NEXT: andl %esi, %ebx
-; X86-NEXT: movl %ecx, %ebp
-; X86-NEXT: andl %eax, %ebp
-; X86-NEXT: xorl %esi, %edi
-; X86-NEXT: xorl %eax, %ecx
-; X86-NEXT: orl %ebx, %ebp
-; X86-NEXT: setne %al
-; X86-NEXT: movl %ecx, (%edx)
-; X86-NEXT: movl %edi, 4(%edx)
+; X86-NEXT: movl %edx, %esi
+; X86-NEXT: andl $32, %esi
+; X86-NEXT: shrl $3, %esi
+; X86-NEXT: movl (%ecx,%esi), %edi
+; X86-NEXT: btl %edx, %edi
+; X86-NEXT: setb %al
+; X86-NEXT: btcl %edx, %edi
+; X86-NEXT: movl %edi, (%ecx,%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: complement_ne_i64:
@@ -300,40 +272,20 @@ define i1 @complement_ne_i64(ptr %word, i32 %position) nounwind {
define i1 @reset_eq_i64(ptr %word, i32 %position) nounwind {
; X86-LABEL: reset_eq_i64:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $1, %esi
-; X86-NEXT: xorl %edi, %edi
-; X86-NEXT: shldl %cl, %esi, %edi
-; X86-NEXT: shll %cl, %esi
-; X86-NEXT: testb $32, %cl
-; X86-NEXT: je .LBB7_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: xorl %esi, %esi
-; X86-NEXT: .LBB7_2:
-; X86-NEXT: movl (%edx), %eax
-; X86-NEXT: movl 4(%edx), %ecx
-; X86-NEXT: movl %ecx, %ebx
-; X86-NEXT: andl %edi, %ebx
-; X86-NEXT: notl %edi
-; X86-NEXT: movl %eax, %ebp
-; X86-NEXT: andl %esi, %ebp
-; X86-NEXT: notl %esi
-; X86-NEXT: andl %ecx, %edi
-; X86-NEXT: andl %eax, %esi
-; X86-NEXT: orl %ebx, %ebp
-; X86-NEXT: sete %al
-; X86-NEXT: movl %esi, (%edx)
-; X86-NEXT: movl %edi, 4(%edx)
+; X86-NEXT: movl %edx, %esi
+; X86-NEXT: andl $32, %esi
+; X86-NEXT: shrl $3, %esi
+; X86-NEXT: movl (%ecx,%esi), %edi
+; X86-NEXT: btl %edx, %edi
+; X86-NEXT: setae %al
+; X86-NEXT: btrl %edx, %edi
+; X86-NEXT: movl %edi, (%ecx,%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: reset_eq_i64:
@@ -361,38 +313,20 @@ define i1 @reset_eq_i64(ptr %word, i32 %position) nounwind {
define i1 @set_ne_i64(ptr %word, i32 %position) nounwind {
; X86-LABEL: set_ne_i64:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $1, %eax
-; X86-NEXT: xorl %esi, %esi
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: shll %cl, %eax
-; X86-NEXT: testb $32, %cl
-; X86-NEXT: je .LBB8_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %eax, %esi
-; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: .LBB8_2:
-; X86-NEXT: movl (%edx), %ecx
-; X86-NEXT: movl 4(%edx), %edi
-; X86-NEXT: movl %edi, %ebx
-; X86-NEXT: andl %esi, %ebx
-; X86-NEXT: movl %ecx, %ebp
-; X86-NEXT: andl %eax, %ebp
-; X86-NEXT: orl %esi, %edi
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: orl %ebx, %ebp
-; X86-NEXT: setne %al
-; X86-NEXT: movl %ecx, (%edx)
-; X86-NEXT: movl %edi, 4(%edx)
+; X86-NEXT: movl %edx, %esi
+; X86-NEXT: andl $32, %esi
+; X86-NEXT: shrl $3, %esi
+; X86-NEXT: movl (%ecx,%esi), %edi
+; X86-NEXT: btl %edx, %edi
+; X86-NEXT: setb %al
+; X86-NEXT: btsl %edx, %edi
+; X86-NEXT: movl %edi, (%ecx,%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; X64-LABEL: set_ne_i64:
@@ -419,52 +353,47 @@ define i1 @set_ne_i64(ptr %word, i32 %position) nounwind {
define i1 @init_eq_i64(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; X86-LABEL: init_eq_i64:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $1, %eax
-; X86-NEXT: xorl %edx, %edx
-; X86-NEXT: shldl %cl, %eax, %edx
-; X86-NEXT: shll %cl, %eax
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl $1, %edx
+; X86-NEXT: xorl %esi, %esi
+; X86-NEXT: shldl %cl, %edx, %esi
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT: xorl %edi, %edi
-; X86-NEXT: shldl %cl, %esi, %edi
-; X86-NEXT: shll %cl, %esi
+; X86-NEXT: shldl %cl, %eax, %edi
+; X86-NEXT: shll %cl, %eax
; X86-NEXT: testb $32, %cl
; X86-NEXT: je .LBB9_2
; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: movl $0, %eax
+; X86-NEXT: movl %edx, %esi
+; X86-NEXT: movl $0, %edx
; X86-NEXT: .LBB9_2:
-; X86-NEXT: movl %edx, %ebx
-; X86-NEXT: notl %ebx
-; X86-NEXT: movl %eax, %ebp
-; X86-NEXT: notl %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: notl %esi
+; X86-NEXT: notl %edx
; X86-NEXT: je .LBB9_4
; X86-NEXT: # %bb.3:
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: xorl %esi, %esi
+; X86-NEXT: movl %eax, %edi
+; X86-NEXT: xorl %eax, %eax
; X86-NEXT: .LBB9_4:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl 4(%ecx), %ecx
-; X86-NEXT: andl %ecx, %edx
-; X86-NEXT: andl %ecx, %ebx
-; X86-NEXT: orl %edi, %ebx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movl (%edi), %ecx
-; X86-NEXT: andl %ecx, %eax
-; X86-NEXT: andl %ecx, %ebp
-; X86-NEXT: orl %esi, %ebp
-; X86-NEXT: orl %edx, %eax
-; X86-NEXT: movl %ebp, (%edi)
-; X86-NEXT: movl %ebx, 4(%edi)
-; X86-NEXT: sete %al
+; X86-NEXT: andl 4(%ebx), %esi
+; X86-NEXT: orl %edi, %esi
+; X86-NEXT: andl (%ebx), %edx
+; X86-NEXT: orl %eax, %edx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: andl $32, %eax
+; X86-NEXT: shrl $3, %eax
+; X86-NEXT: movl (%ebx,%eax), %eax
+; X86-NEXT: btl %ecx, %eax
+; X86-NEXT: setae %al
+; X86-NEXT: movl %esi, 4(%ebx)
+; X86-NEXT: movl %edx, (%ebx)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
; SSE-LABEL: init_eq_i64:
@@ -516,101 +445,25 @@ define i1 @init_eq_i64(ptr %word, i32 %position, i1 zeroext %value) nounwind {
define i1 @test_ne_i128(ptr %word, i32 %position) nounwind {
; X86-LABEL: test_ne_i128:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %ebx
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $48, %esp
-; X86-NEXT: movzbl 12(%ebp), %ecx
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, (%esp)
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: shrb $3, %al
-; X86-NEXT: andb $12, %al
-; X86-NEXT: negb %al
-; X86-NEXT: movsbl %al, %esi
-; X86-NEXT: movl 24(%esp,%esi), %edi
-; X86-NEXT: movl 28(%esp,%esi), %eax
-; X86-NEXT: shldl %cl, %edi, %eax
-; X86-NEXT: movl 16(%esp,%esi), %edx
-; X86-NEXT: movl 20(%esp,%esi), %esi
-; X86-NEXT: shldl %cl, %esi, %edi
-; X86-NEXT: shldl %cl, %edx, %esi
-; X86-NEXT: movl 8(%ebp), %ebx
-; X86-NEXT: shll %cl, %edx
-; X86-NEXT: andl 8(%ebx), %edi
-; X86-NEXT: andl (%ebx), %edx
-; X86-NEXT: orl %edi, %edx
-; X86-NEXT: andl 12(%ebx), %eax
-; X86-NEXT: andl 4(%ebx), %esi
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: orl %edx, %esi
-; X86-NEXT: setne %al
-; X86-NEXT: leal -12(%ebp), %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: andl $96, %edx
+; X86-NEXT: shrl $3, %edx
+; X86-NEXT: movl (%eax,%edx), %eax
+; X86-NEXT: btl %ecx, %eax
+; X86-NEXT: setb %al
; X86-NEXT: retl
;
-; SSE-LABEL: test_ne_i128:
-; SSE: # %bb.0:
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: movl $1, %eax
-; SSE-NEXT: xorl %edx, %edx
-; SSE-NEXT: shldq %cl, %rax, %rdx
-; SSE-NEXT: xorl %esi, %esi
-; SSE-NEXT: shlq %cl, %rax
-; SSE-NEXT: testb $64, %cl
-; SSE-NEXT: cmovneq %rax, %rdx
-; SSE-NEXT: cmovneq %rsi, %rax
-; SSE-NEXT: andq 8(%rdi), %rdx
-; SSE-NEXT: andq (%rdi), %rax
-; SSE-NEXT: orq %rdx, %rax
-; SSE-NEXT: setne %al
-; SSE-NEXT: retq
-;
-; AVX2-LABEL: test_ne_i128:
-; AVX2: # %bb.0:
-; AVX2-NEXT: movl %esi, %ecx
-; AVX2-NEXT: xorl %eax, %eax
-; AVX2-NEXT: movl $1, %edx
-; AVX2-NEXT: xorl %esi, %esi
-; AVX2-NEXT: shldq %cl, %rdx, %rsi
-; AVX2-NEXT: shlxq %rcx, %rdx, %rdx
-; AVX2-NEXT: testb $64, %cl
-; AVX2-NEXT: cmovneq %rdx, %rsi
-; AVX2-NEXT: cmovneq %rax, %rdx
-; AVX2-NEXT: andq 8(%rdi), %rsi
-; AVX2-NEXT: andq (%rdi), %rdx
-; AVX2-NEXT: orq %rsi, %rdx
-; AVX2-NEXT: setne %al
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_ne_i128:
-; AVX512: # %bb.0:
-; AVX512-NEXT: movl %esi, %ecx
-; AVX512-NEXT: movl $1, %eax
-; AVX512-NEXT: xorl %edx, %edx
-; AVX512-NEXT: shldq %cl, %rax, %rdx
-; AVX512-NEXT: xorl %esi, %esi
-; AVX512-NEXT: shlxq %rcx, %rax, %rax
-; AVX512-NEXT: testb $64, %cl
-; AVX512-NEXT: cmovneq %rax, %rdx
-; AVX512-NEXT: cmovneq %rsi, %rax
-; AVX512-NEXT: andq 8(%rdi), %rdx
-; AVX512-NEXT: andq (%rdi), %rax
-; AVX512-NEXT: orq %rdx, %rax
-; AVX512-NEXT: setne %al
-; AVX512-NEXT: retq
+; X64-LABEL: test_ne_i128:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %eax
+; X64-NEXT: andl $96, %eax
+; X64-NEXT: shrl $3, %eax
+; X64-NEXT: movl (%rdi,%rax), %eax
+; X64-NEXT: btl %esi, %eax
+; X64-NEXT: setb %al
+; X64-NEXT: retq
%rem = and i32 %position, 127
%ofs = zext nneg i32 %rem to i128
%bit = shl nuw i128 1, %ofs
@@ -623,124 +476,33 @@ define i1 @test_ne_i128(ptr %word, i32 %position) nounwind {
define i1 @complement_ne_i128(ptr %word, i32 %position) nounwind {
; X86-LABEL: complement_ne_i128:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $80, %esp
-; X86-NEXT: movzbl 12(%ebp), %ecx
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: shrb $3, %al
-; X86-NEXT: andb $12, %al
-; X86-NEXT: negb %al
-; X86-NEXT: movsbl %al, %eax
-; X86-NEXT: movl 56(%esp,%eax), %esi
-; X86-NEXT: movl 60(%esp,%eax), %edx
-; X86-NEXT: shldl %cl, %esi, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 48(%esp,%eax), %edi
-; X86-NEXT: movl 52(%esp,%eax), %ebx
-; X86-NEXT: shldl %cl, %ebx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edi, %ebx
-; X86-NEXT: movl 8(%ebp), %eax
-; X86-NEXT: shll %cl, %edi
-; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: movl 8(%eax), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %esi, %eax
-; X86-NEXT: movl (%ecx), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ecx, %esi
-; X86-NEXT: movl %edx, %ecx
-; X86-NEXT: andl %edi, %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl 12(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 4(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ebx, %eax
-; X86-NEXT: orl %edx, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: movl 8(%ebp), %eax
-; X86-NEXT: movl %edx, 8(%eax)
-; X86-NEXT: movl %esi, 12(%eax)
-; X86-NEXT: movl %edi, (%eax)
-; X86-NEXT: movl %ebx, 4(%eax)
-; X86-NEXT: setne %al
-; X86-NEXT: leal -12(%ebp), %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, %esi
+; X86-NEXT: andl $96, %esi
+; X86-NEXT: shrl $3, %esi
+; X86-NEXT: movl (%ecx,%esi), %edi
+; X86-NEXT: btl %edx, %edi
+; X86-NEXT: setb %al
+; X86-NEXT: btcl %edx, %edi
+; X86-NEXT: movl %edi, (%ecx,%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
-; SSE-LABEL: complement_ne_i128:
-; SSE: # %bb.0:
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: movl $1, %edx
-; SSE-NEXT: xorl %esi, %esi
-; SSE-NEXT: shldq %cl, %rdx, %rsi
-; SSE-NEXT: shlq %cl, %rdx
-; SSE-NEXT: xorl %eax, %eax
-; SSE-NEXT: testb $64, %cl
-; SSE-NEXT: cmovneq %rdx, %rsi
-; SSE-NEXT: cmovneq %rax, %rdx
-; SSE-NEXT: movq (%rdi), %rax
-; SSE-NEXT: movq 8(%rdi), %rcx
-; SSE-NEXT: movq %rcx, %r8
-; SSE-NEXT: andq %rsi, %r8
-; SSE-NEXT: movq %rax, %r9
-; SSE-NEXT: andq %rdx, %r9
-; SSE-NEXT: xorq %rcx, %rsi
-; SSE-NEXT: xorq %rax, %rdx
-; SSE-NEXT: orq %r8, %r9
-; SSE-NEXT: setne %al
-; SSE-NEXT: movq %rdx, (%rdi)
-; SSE-NEXT: movq %rsi, 8(%rdi)
-; SSE-NEXT: retq
-;
-; AVX-LABEL: complement_ne_i128:
-; AVX: # %bb.0:
-; AVX-NEXT: movl %esi, %ecx
-; AVX-NEXT: xorl %eax, %eax
-; AVX-NEXT: movl $1, %edx
-; AVX-NEXT: xorl %esi, %esi
-; AVX-NEXT: shldq %cl, %rdx, %rsi
-; AVX-NEXT: shlxq %rcx, %rdx, %rdx
-; AVX-NEXT: testb $64, %cl
-; AVX-NEXT: cmovneq %rdx, %rsi
-; AVX-NEXT: cmovneq %rax, %rdx
-; AVX-NEXT: movq (%rdi), %rax
-; AVX-NEXT: movq 8(%rdi), %rcx
-; AVX-NEXT: movq %rcx, %r8
-; AVX-NEXT: andq %rsi, %r8
-; AVX-NEXT: movq %rax, %r9
-; AVX-NEXT: andq %rdx, %r9
-; AVX-NEXT: xorq %rcx, %rsi
-; AVX-NEXT: xorq %rax, %rdx
-; AVX-NEXT: orq %r8, %r9
-; AVX-NEXT: setne %al
-; AVX-NEXT: movq %rdx, (%rdi)
-; AVX-NEXT: movq %rsi, 8(%rdi)
-; AVX-NEXT: retq
+; X64-LABEL: complement_ne_i128:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: andl $96, %ecx
+; X64-NEXT: shrl $3, %ecx
+; X64-NEXT: movl (%rdi,%rcx), %edx
+; X64-NEXT: btl %esi, %edx
+; X64-NEXT: setb %al
+; X64-NEXT: btcl %esi, %edx
+; X64-NEXT: movl %edx, (%rdi,%rcx)
+; X64-NEXT: retq
%rem = and i32 %position, 127
%ofs = zext nneg i32 %rem to i128
%bit = shl nuw i128 1, %ofs
@@ -755,124 +517,33 @@ define i1 @complement_ne_i128(ptr %word, i32 %position) nounwind {
define i1 @reset_eq_i128(ptr %word, i32 %position) nounwind {
; X86-LABEL: reset_eq_i128:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $80, %esp
-; X86-NEXT: movzbl 12(%ebp), %ecx
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: shrb $3, %al
-; X86-NEXT: andb $12, %al
-; X86-NEXT: negb %al
-; X86-NEXT: movsbl %al, %eax
-; X86-NEXT: movl 56(%esp,%eax), %edx
-; X86-NEXT: movl 60(%esp,%eax), %esi
-; X86-NEXT: shldl %cl, %edx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 48(%esp,%eax), %esi
-; X86-NEXT: movl 52(%esp,%eax), %edi
-; X86-NEXT: shldl %cl, %edi, %edx
-; X86-NEXT: shldl %cl, %esi, %edi
-; X86-NEXT: movl 8(%ebp), %ebx
-; X86-NEXT: shll %cl, %esi
-; X86-NEXT: movl 8(%ebx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edx, %eax
-; X86-NEXT: movl (%ebx), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %esi, %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%ebx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl %edi, %ecx
-; X86-NEXT: movl 4(%ebx), %ebx
-; X86-NEXT: andl %ebx, %edi
-; X86-NEXT: orl %eax, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: notl %eax
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: notl %ecx
-; X86-NEXT: andl %ebx, %ecx
-; X86-NEXT: notl %esi
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: movl 8(%ebp), %edi
-; X86-NEXT: movl %edx, 8(%edi)
-; X86-NEXT: movl %eax, 12(%edi)
-; X86-NEXT: movl %esi, (%edi)
-; X86-NEXT: movl %ecx, 4(%edi)
-; X86-NEXT: sete %al
-; X86-NEXT: leal -12(%ebp), %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, %esi
+; X86-NEXT: andl $96, %esi
+; X86-NEXT: shrl $3, %esi
+; X86-NEXT: movl (%ecx,%esi), %edi
+; X86-NEXT: btl %edx, %edi
+; X86-NEXT: setae %al
+; X86-NEXT: btrl %edx, %edi
+; X86-NEXT: movl %edi, (%ecx,%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
-; SSE-LABEL: reset_eq_i128:
-; SSE: # %bb.0:
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: movl $1, %edx
-; SSE-NEXT: xorl %esi, %esi
-; SSE-NEXT: shldq %cl, %rdx, %rsi
-; SSE-NEXT: xorl %eax, %eax
-; SSE-NEXT: shlq %cl, %rdx
-; SSE-NEXT: testb $64, %cl
-; SSE-NEXT: cmovneq %rdx, %rsi
-; SSE-NEXT: cmovneq %rax, %rdx
-; SSE-NEXT: movq (%rdi), %rax
-; SSE-NEXT: movq 8(%rdi), %rcx
-; SSE-NEXT: movq %rcx, %r8
-; SSE-NEXT: andq %rsi, %r8
-; SSE-NEXT: notq %rsi
-; SSE-NEXT: movq %rax, %r9
-; SSE-NEXT: andq %rdx, %r9
-; SSE-NEXT: notq %rdx
-; SSE-NEXT: andq %rcx, %rsi
-; SSE-NEXT: andq %rax, %rdx
-; SSE-NEXT: orq %r8, %r9
-; SSE-NEXT: sete %al
-; SSE-NEXT: movq %rdx, (%rdi)
-; SSE-NEXT: movq %rsi, 8(%rdi)
-; SSE-NEXT: retq
-;
-; AVX-LABEL: reset_eq_i128:
-; AVX: # %bb.0:
-; AVX-NEXT: movl %esi, %ecx
-; AVX-NEXT: xorl %eax, %eax
-; AVX-NEXT: movl $1, %edx
-; AVX-NEXT: xorl %esi, %esi
-; AVX-NEXT: shldq %cl, %rdx, %rsi
-; AVX-NEXT: shlxq %rcx, %rdx, %rdx
-; AVX-NEXT: testb $64, %cl
-; AVX-NEXT: cmovneq %rdx, %rsi
-; AVX-NEXT: cmovneq %rax, %rdx
-; AVX-NEXT: movq (%rdi), %rax
-; AVX-NEXT: movq 8(%rdi), %rcx
-; AVX-NEXT: andnq %rcx, %rsi, %r8
-; AVX-NEXT: andq %rsi, %rcx
-; AVX-NEXT: andnq %rax, %rdx, %rsi
-; AVX-NEXT: andq %rdx, %rax
-; AVX-NEXT: orq %rcx, %rax
-; AVX-NEXT: sete %al
-; AVX-NEXT: movq %rsi, (%rdi)
-; AVX-NEXT: movq %r8, 8(%rdi)
-; AVX-NEXT: retq
+; X64-LABEL: reset_eq_i128:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: andl $96, %ecx
+; X64-NEXT: shrl $3, %ecx
+; X64-NEXT: movl (%rdi,%rcx), %edx
+; X64-NEXT: btl %esi, %edx
+; X64-NEXT: setae %al
+; X64-NEXT: btrl %esi, %edx
+; X64-NEXT: movl %edx, (%rdi,%rcx)
+; X64-NEXT: retq
%rem = and i32 %position, 127
%ofs = zext nneg i32 %rem to i128
%bit = shl nuw i128 1, %ofs
@@ -888,124 +559,33 @@ define i1 @reset_eq_i128(ptr %word, i32 %position) nounwind {
define i1 @set_ne_i128(ptr %word, i32 %position) nounwind {
; X86-LABEL: set_ne_i128:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $80, %esp
-; X86-NEXT: movzbl 12(%ebp), %ecx
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: shrb $3, %al
-; X86-NEXT: andb $12, %al
-; X86-NEXT: negb %al
-; X86-NEXT: movsbl %al, %eax
-; X86-NEXT: movl 56(%esp,%eax), %esi
-; X86-NEXT: movl 60(%esp,%eax), %edx
-; X86-NEXT: shldl %cl, %esi, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 48(%esp,%eax), %edi
-; X86-NEXT: movl 52(%esp,%eax), %ebx
-; X86-NEXT: shldl %cl, %ebx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edi, %ebx
-; X86-NEXT: movl 8(%ebp), %eax
-; X86-NEXT: shll %cl, %edi
-; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: movl 8(%eax), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %esi, %eax
-; X86-NEXT: movl (%ecx), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ecx, %esi
-; X86-NEXT: movl %edx, %ecx
-; X86-NEXT: andl %edi, %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl 12(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 4(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ebx, %eax
-; X86-NEXT: orl %edx, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: movl 8(%ebp), %eax
-; X86-NEXT: movl %edx, 8(%eax)
-; X86-NEXT: movl %esi, 12(%eax)
-; X86-NEXT: movl %edi, (%eax)
-; X86-NEXT: movl %ebx, 4(%eax)
-; X86-NEXT: setne %al
-; X86-NEXT: leal -12(%ebp), %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, %esi
+; X86-NEXT: andl $96, %esi
+; X86-NEXT: shrl $3, %esi
+; X86-NEXT: movl (%ecx,%esi), %edi
+; X86-NEXT: btl %edx, %edi
+; X86-NEXT: setb %al
+; X86-NEXT: btsl %edx, %edi
+; X86-NEXT: movl %edi, (%ecx,%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
-; SSE-LABEL: set_ne_i128:
-; SSE: # %bb.0:
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: movl $1, %edx
-; SSE-NEXT: xorl %esi, %esi
-; SSE-NEXT: shldq %cl, %rdx, %rsi
-; SSE-NEXT: shlq %cl, %rdx
-; SSE-NEXT: xorl %eax, %eax
-; SSE-NEXT: testb $64, %cl
-; SSE-NEXT: cmovneq %rdx, %rsi
-; SSE-NEXT: cmovneq %rax, %rdx
-; SSE-NEXT: movq (%rdi), %rax
-; SSE-NEXT: movq 8(%rdi), %rcx
-; SSE-NEXT: movq %rcx, %r8
-; SSE-NEXT: andq %rsi, %r8
-; SSE-NEXT: movq %rax, %r9
-; SSE-NEXT: andq %rdx, %r9
-; SSE-NEXT: orq %rcx, %rsi
-; SSE-NEXT: orq %rax, %rdx
-; SSE-NEXT: orq %r8, %r9
-; SSE-NEXT: setne %al
-; SSE-NEXT: movq %rdx, (%rdi)
-; SSE-NEXT: movq %rsi, 8(%rdi)
-; SSE-NEXT: retq
-;
-; AVX-LABEL: set_ne_i128:
-; AVX: # %bb.0:
-; AVX-NEXT: movl %esi, %ecx
-; AVX-NEXT: xorl %eax, %eax
-; AVX-NEXT: movl $1, %edx
-; AVX-NEXT: xorl %esi, %esi
-; AVX-NEXT: shldq %cl, %rdx, %rsi
-; AVX-NEXT: shlxq %rcx, %rdx, %rdx
-; AVX-NEXT: testb $64, %cl
-; AVX-NEXT: cmovneq %rdx, %rsi
-; AVX-NEXT: cmovneq %rax, %rdx
-; AVX-NEXT: movq (%rdi), %rax
-; AVX-NEXT: movq 8(%rdi), %rcx
-; AVX-NEXT: movq %rcx, %r8
-; AVX-NEXT: andq %rsi, %r8
-; AVX-NEXT: movq %rax, %r9
-; AVX-NEXT: andq %rdx, %r9
-; AVX-NEXT: orq %rcx, %rsi
-; AVX-NEXT: orq %rax, %rdx
-; AVX-NEXT: orq %r8, %r9
-; AVX-NEXT: setne %al
-; AVX-NEXT: movq %rdx, (%rdi)
-; AVX-NEXT: movq %rsi, 8(%rdi)
-; AVX-NEXT: retq
+; X64-LABEL: set_ne_i128:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: andl $96, %ecx
+; X64-NEXT: shrl $3, %ecx
+; X64-NEXT: movl (%rdi,%rcx), %edx
+; X64-NEXT: btl %esi, %edx
+; X64-NEXT: setb %al
+; X64-NEXT: btsl %esi, %edx
+; X64-NEXT: movl %edx, (%rdi,%rcx)
+; X64-NEXT: retq
%rem = and i32 %position, 127
%ofs = zext nneg i32 %rem to i128
%bit = shl nuw i128 1, %ofs
@@ -1026,9 +606,9 @@ define i1 @init_eq_i128(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $128, %esp
-; X86-NEXT: movzbl 12(%ebp), %ecx
-; X86-NEXT: movzbl 16(%ebp), %eax
+; X86-NEXT: subl $96, %esp
+; X86-NEXT: movl 12(%ebp), %ecx
+; X86-NEXT: movzbl 16(%ebp), %ebx
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
@@ -1037,25 +617,29 @@ define i1 @init_eq_i128(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ecx, %edx
-; X86-NEXT: shrb $3, %dl
-; X86-NEXT: andb $12, %dl
-; X86-NEXT: negb %dl
-; X86-NEXT: movsbl %dl, %esi
-; X86-NEXT: movl 64(%esp,%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 68(%esp,%esi), %edx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: shrb $3, %al
+; X86-NEXT: andb $12, %al
+; X86-NEXT: negb %al
+; X86-NEXT: movsbl %al, %edi
+; X86-NEXT: movl 72(%esp,%edi), %edx
+; X86-NEXT: movl 76(%esp,%edi), %esi
+; X86-NEXT: movzbl %bl, %eax
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 64(%esp,%edi), %ebx
+; X86-NEXT: movl %ebx, (%esp) # 4-byte Spill
+; X86-NEXT: movl 68(%esp,%edi), %ebx
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: shldl %cl, %edx, %esi
+; X86-NEXT: shldl %cl, %ebx, %edx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 72(%esp,%esi), %ebx
+; X86-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-NEXT: shldl %cl, %eax, %ebx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X86-NEXT: notl %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movzbl %al, %eax
-; X86-NEXT: movl 76(%esp,%esi), %edi
-; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT: movl %ebx, %eax
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: shldl %cl, %ebx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shll %cl, %edx
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
@@ -1063,72 +647,53 @@ define i1 @init_eq_i128(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl 8(%ebp), %esi
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%esi), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ebx, %eax
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl (%esi), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ebx, %edx
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %edi, %esi
+; X86-NEXT: movl 40(%esp,%eax), %edi
+; X86-NEXT: movl 44(%esp,%eax), %esi
+; X86-NEXT: movl 12(%ebp), %ecx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shldl %cl, %edi, %esi
; X86-NEXT: movl 8(%ebp), %ecx
-; X86-NEXT: movl 12(%ecx), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ebx, %edi
-; X86-NEXT: movl %eax, %ebx
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: movl 4(%ecx), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ecx, %ebx
-; X86-NEXT: orl %edi, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: notl %ecx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl 100(%esp,%ecx), %edi
-; X86-NEXT: movl 104(%esp,%ecx), %ecx
-; X86-NEXT: movl %ecx, %ebx
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: movzbl 12(%ebp), %ecx
-; X86-NEXT: shldl %cl, %edi, %ebx
-; X86-NEXT: orl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: notl %esi
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: movl 108(%esp,%ebx), %ebx
-; X86-NEXT: shldl %cl, %eax, %ebx
-; X86-NEXT: orl %ebx, %esi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: andl 12(%ecx), %eax
+; X86-NEXT: orl %esi, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: notl %eax
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: movl 96(%esp,%ebx), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shll %cl, %ebx
-; X86-NEXT: orl %ebx, %eax
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT: movl 36(%esp,%esi), %esi
+; X86-NEXT: movl 12(%ebp), %ecx
+; X86-NEXT: shldl %cl, %esi, %edi
+; X86-NEXT: movl 8(%ebp), %edx
+; X86-NEXT: andl 8(%edx), %eax
+; X86-NEXT: orl %edi, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: movl 32(%esp,%eax), %eax
+; X86-NEXT: shldl %cl, %eax, %esi
+; X86-NEXT: movl 8(%ebp), %edi
+; X86-NEXT: andl 4(%edi), %ebx
+; X86-NEXT: orl %esi, %ebx
+; X86-NEXT: movl (%esp), %edx # 4-byte Reload
; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: shldl %cl, %ebx, %edi
-; X86-NEXT: orl %edi, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: movl 8(%ebp), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 8(%ecx)
-; X86-NEXT: movl %esi, 12(%ecx)
-; X86-NEXT: movl %eax, (%ecx)
-; X86-NEXT: movl %edx, 4(%ecx)
-; X86-NEXT: sete %al
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: andl (%edi), %edx
+; X86-NEXT: orl %eax, %edx
+; X86-NEXT: movl 12(%ebp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: andl $96, %eax
+; X86-NEXT: shrl $3, %eax
+; X86-NEXT: movl (%edi,%eax), %eax
+; X86-NEXT: btl %ecx, %eax
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: movl %eax, 12(%edi)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: movl %eax, 8(%edi)
+; X86-NEXT: movl %ebx, 4(%edi)
+; X86-NEXT: movl %edx, (%edi)
+; X86-NEXT: setae %al
; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
@@ -1151,86 +716,84 @@ define i1 @init_eq_i128(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; SSE-NEXT: testb $64, %cl
; SSE-NEXT: cmovneq %rsi, %r8
; SSE-NEXT: cmovneq %r9, %rsi
+; SSE-NEXT: notq %r8
; SSE-NEXT: cmovneq %rax, %rdx
; SSE-NEXT: cmovneq %r9, %rax
-; SSE-NEXT: movq (%rdi), %rcx
-; SSE-NEXT: movq 8(%rdi), %r9
-; SSE-NEXT: movq %r9, %r10
-; SSE-NEXT: andq %r8, %r10
-; SSE-NEXT: notq %r8
-; SSE-NEXT: movq %rcx, %r11
-; SSE-NEXT: andq %rsi, %r11
; SSE-NEXT: notq %rsi
-; SSE-NEXT: andq %r9, %r8
+; SSE-NEXT: andq 8(%rdi), %r8
; SSE-NEXT: orq %rdx, %r8
-; SSE-NEXT: andq %rcx, %rsi
+; SSE-NEXT: andq (%rdi), %rsi
; SSE-NEXT: orq %rax, %rsi
-; SSE-NEXT: orq %r10, %r11
-; SSE-NEXT: sete %al
-; SSE-NEXT: movq %rsi, (%rdi)
+; SSE-NEXT: movl %ecx, %eax
+; SSE-NEXT: andl $96, %eax
+; SSE-NEXT: shrl $3, %eax
+; SSE-NEXT: movl (%rdi,%rax), %eax
+; SSE-NEXT: btl %ecx, %eax
+; SSE-NEXT: setae %al
; SSE-NEXT: movq %r8, 8(%rdi)
+; SSE-NEXT: movq %rsi, (%rdi)
; SSE-NEXT: retq
;
; AVX2-LABEL: init_eq_i128:
; AVX2: # %bb.0:
; AVX2-NEXT: movl %esi, %ecx
-; AVX2-NEXT: movl $1, %esi
-; AVX2-NEXT: xorl %eax, %eax
-; AVX2-NEXT: shldq %cl, %rsi, %rax
-; AVX2-NEXT: xorl %r8d, %r8d
+; AVX2-NEXT: movl $1, %eax
+; AVX2-NEXT: xorl %esi, %esi
+; AVX2-NEXT: shldq %cl, %rax, %rsi
; AVX2-NEXT: movl %edx, %edx
+; AVX2-NEXT: xorl %r8d, %r8d
+; AVX2-NEXT: shldq %cl, %rdx, %r8
; AVX2-NEXT: xorl %r9d, %r9d
-; AVX2-NEXT: shldq %cl, %rdx, %r9
-; AVX2-NEXT: shlxq %rcx, %rsi, %rsi
+; AVX2-NEXT: shlxq %rcx, %rax, %rax
; AVX2-NEXT: testb $64, %cl
-; AVX2-NEXT: cmovneq %rsi, %rax
-; AVX2-NEXT: cmovneq %r8, %rsi
-; AVX2-NEXT: shlxq %rcx, %rdx, %rcx
-; AVX2-NEXT: cmovneq %rcx, %r9
-; AVX2-NEXT: cmovneq %r8, %rcx
-; AVX2-NEXT: movq (%rdi), %rdx
-; AVX2-NEXT: movq 8(%rdi), %r8
-; AVX2-NEXT: andnq %r8, %rax, %r10
-; AVX2-NEXT: andq %rax, %r8
-; AVX2-NEXT: andnq %rdx, %rsi, %r11
-; AVX2-NEXT: andq %rsi, %rdx
-; AVX2-NEXT: orq %r9, %r10
-; AVX2-NEXT: orq %rcx, %r11
-; AVX2-NEXT: orq %r8, %rdx
-; AVX2-NEXT: sete %al
-; AVX2-NEXT: movq %r11, (%rdi)
-; AVX2-NEXT: movq %r10, 8(%rdi)
+; AVX2-NEXT: cmovneq %rax, %rsi
+; AVX2-NEXT: cmovneq %r9, %rax
+; AVX2-NEXT: shlxq %rcx, %rdx, %rdx
+; AVX2-NEXT: cmovneq %rdx, %r8
+; AVX2-NEXT: cmovneq %r9, %rdx
+; AVX2-NEXT: andnq 8(%rdi), %rsi, %rsi
+; AVX2-NEXT: orq %r8, %rsi
+; AVX2-NEXT: andnq (%rdi), %rax, %r8
+; AVX2-NEXT: orq %rdx, %r8
+; AVX2-NEXT: movl %ecx, %eax
+; AVX2-NEXT: andl $96, %eax
+; AVX2-NEXT: shrl $3, %eax
+; AVX2-NEXT: movl (%rdi,%rax), %eax
+; AVX2-NEXT: btl %ecx, %eax
+; AVX2-NEXT: setae %al
+; AVX2-NEXT: movq %rsi, 8(%rdi)
+; AVX2-NEXT: movq %r8, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: init_eq_i128:
; AVX512: # %bb.0:
; AVX512-NEXT: movl %esi, %ecx
-; AVX512-NEXT: xorl %eax, %eax
-; AVX512-NEXT: movl $1, %esi
+; AVX512-NEXT: movl $1, %eax
+; AVX512-NEXT: xorl %esi, %esi
+; AVX512-NEXT: shldq %cl, %rax, %rsi
; AVX512-NEXT: xorl %r8d, %r8d
-; AVX512-NEXT: shldq %cl, %rsi, %r8
-; AVX512-NEXT: shlxq %rcx, %rsi, %rsi
+; AVX512-NEXT: shlxq %rcx, %rax, %rax
; AVX512-NEXT: movl %edx, %edx
; AVX512-NEXT: xorl %r9d, %r9d
; AVX512-NEXT: shldq %cl, %rdx, %r9
; AVX512-NEXT: testb $64, %cl
-; AVX512-NEXT: cmovneq %rsi, %r8
; AVX512-NEXT: cmovneq %rax, %rsi
-; AVX512-NEXT: shlxq %rcx, %rdx, %rcx
-; AVX512-NEXT: cmovneq %rcx, %r9
-; AVX512-NEXT: cmovneq %rax, %rcx
-; AVX512-NEXT: movq (%rdi), %rax
-; AVX512-NEXT: movq 8(%rdi), %rdx
-; AVX512-NEXT: andnq %rdx, %r8, %r10
-; AVX512-NEXT: andq %r8, %rdx
-; AVX512-NEXT: andnq %rax, %rsi, %r8
-; AVX512-NEXT: andq %rsi, %rax
-; AVX512-NEXT: orq %r9, %r10
-; AVX512-NEXT: orq %rcx, %r8
-; AVX512-NEXT: orq %rdx, %rax
-; AVX512-NEXT: sete %al
+; AVX512-NEXT: cmovneq %r8, %rax
+; AVX512-NEXT: shlxq %rcx, %rdx, %rdx
+; AVX512-NEXT: cmovneq %rdx, %r9
+; AVX512-NEXT: cmovneq %r8, %rdx
+; AVX512-NEXT: andnq 8(%rdi), %rsi, %rsi
+; AVX512-NEXT: orq %r9, %rsi
+; AVX512-NEXT: andnq (%rdi), %rax, %r8
+; AVX512-NEXT: orq %rdx, %r8
+; AVX512-NEXT: movl %ecx, %eax
+; AVX512-NEXT: andl $96, %eax
+; AVX512-NEXT: shrl $3, %eax
+; AVX512-NEXT: movl (%rdi,%rax), %eax
+; AVX512-NEXT: btl %ecx, %eax
+; AVX512-NEXT: setae %al
+; AVX512-NEXT: movq %rsi, 8(%rdi)
; AVX512-NEXT: movq %r8, (%rdi)
-; AVX512-NEXT: movq %r10, 8(%rdi)
; AVX512-NEXT: retq
%rem = and i32 %position, 127
%ofs = zext nneg i32 %rem to i128
@@ -1252,344 +815,25 @@ define i1 @init_eq_i128(ptr %word, i32 %position, i1 zeroext %value) nounwind {
define i1 @test_ne_i512(ptr %word, i32 %position) nounwind {
; X86-LABEL: test_ne_i512:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %ebx
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $224, %esp
-; X86-NEXT: movl 12(%ebp), %ecx
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: shrl $3, %eax
-; X86-NEXT: andl $60, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: leal {{[0-9]+}}(%esp), %edx
-; X86-NEXT: subl %eax, %edx
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl 24(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 28(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl $31, %ecx
-; X86-NEXT: shldl %cl, %esi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 56(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 60(%edx), %eax
-; X86-NEXT: shldl %cl, %esi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %esi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 40(%edx), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 44(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 16(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 20(%edx), %edi
-; X86-NEXT: movl %edi, %ebx
-; X86-NEXT: shldl %cl, %eax, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 32(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 36(%edx), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %esi, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl 52(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl 4(%edx), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: shldl %cl, %edi, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: shldl %cl, %edi, %eax
-; X86-NEXT: movl 8(%ebp), %ebx
-; X86-NEXT: andl 40(%ebx), %eax
-; X86-NEXT: andl 8(%ebx), %esi
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 56(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: andl 24(%ebx), %edi
-; X86-NEXT: orl %eax, %edi
-; X86-NEXT: orl %esi, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %ebx, %edi
-; X86-NEXT: andl 44(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl 12(%ebx), %esi
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: movl %esi, %ebx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl 60(%edi), %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 28(%edi), %eax
-; X86-NEXT: orl %esi, %eax
-; X86-NEXT: orl %ebx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl (%edx), %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl %cl, %edx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: negl %edx
-; X86-NEXT: movl 192(%esp,%edx), %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: shldl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: shldl %cl, %ebx, %edx
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: shll %cl, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl 8(%ebp), %ebx
-; X86-NEXT: andl 32(%ebx), %ecx
-; X86-NEXT: andl (%ebx), %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: andl 16(%ebx), %edi
-; X86-NEXT: andl 48(%ebx), %edx
-; X86-NEXT: orl %edi, %edx
-; X86-NEXT: orl %esi, %edx
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 36(%ebx), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl 4(%ebx), %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 20(%ebx), %ecx
-; X86-NEXT: andl 52(%ebx), %eax
-; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: orl %esi, %eax
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: orl %edx, %eax
-; X86-NEXT: setne %al
-; X86-NEXT: leal -12(%ebp), %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: shrl $3, %edx
+; X86-NEXT: andl $60, %edx
+; X86-NEXT: movl (%eax,%edx), %eax
+; X86-NEXT: btl %ecx, %eax
+; X86-NEXT: setb %al
; X86-NEXT: retl
;
-; SSE-LABEL: test_ne_i512:
-; SSE: # %bb.0:
-; SSE-NEXT: pushq %r15
-; SSE-NEXT: pushq %r14
-; SSE-NEXT: pushq %rbx
-; SSE-NEXT: xorps %xmm0, %xmm0
-; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movq $0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movq $1, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: andl $63, %ecx
-; SSE-NEXT: shrl $3, %esi
-; SSE-NEXT: andl $56, %esi
-; SSE-NEXT: negl %esi
-; SSE-NEXT: movslq %esi, %rbx
-; SSE-NEXT: movq -48(%rsp,%rbx), %rdx
-; SSE-NEXT: movq -40(%rsp,%rbx), %r14
-; SSE-NEXT: movq %r14, %rax
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq -16(%rsp,%rbx), %r11
-; SSE-NEXT: movq -8(%rsp,%rbx), %r10
-; SSE-NEXT: shldq %cl, %r11, %r10
-; SSE-NEXT: movq -32(%rsp,%rbx), %r9
-; SSE-NEXT: movq -24(%rsp,%rbx), %r15
-; SSE-NEXT: movq %r15, %r8
-; SSE-NEXT: shldq %cl, %r9, %r8
-; SSE-NEXT: movq -56(%rsp,%rbx), %rsi
-; SSE-NEXT: shldq %cl, %rsi, %rdx
-; SSE-NEXT: shldq %cl, %r15, %r11
-; SSE-NEXT: shldq %cl, %r14, %r9
-; SSE-NEXT: movq -64(%rsp,%rbx), %rbx
-; SSE-NEXT: shldq %cl, %rbx, %rsi
-; SSE-NEXT: # kill: def $cl killed $cl killed $ecx
-; SSE-NEXT: shlq %cl, %rbx
-; SSE-NEXT: andq 32(%rdi), %r9
-; SSE-NEXT: andq 48(%rdi), %r11
-; SSE-NEXT: andq 16(%rdi), %rdx
-; SSE-NEXT: orq %r11, %rdx
-; SSE-NEXT: andq 40(%rdi), %r8
-; SSE-NEXT: andq 56(%rdi), %r10
-; SSE-NEXT: andq 24(%rdi), %rax
-; SSE-NEXT: orq %r10, %rax
-; SSE-NEXT: andq (%rdi), %rbx
-; SSE-NEXT: orq %r9, %rbx
-; SSE-NEXT: orq %rdx, %rbx
-; SSE-NEXT: andq 8(%rdi), %rsi
-; SSE-NEXT: orq %r8, %rsi
-; SSE-NEXT: orq %rax, %rsi
-; SSE-NEXT: orq %rbx, %rsi
-; SSE-NEXT: setne %al
-; SSE-NEXT: popq %rbx
-; SSE-NEXT: popq %r14
-; SSE-NEXT: popq %r15
-; SSE-NEXT: retq
-;
-; AVX2-LABEL: test_ne_i512:
-; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovss {{.*#+}} xmm0 = [1,0,0,0]
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movl %esi, %ecx
-; AVX2-NEXT: andl $63, %ecx
-; AVX2-NEXT: shrl $3, %esi
-; AVX2-NEXT: andl $56, %esi
-; AVX2-NEXT: negl %esi
-; AVX2-NEXT: movslq %esi, %rsi
-; AVX2-NEXT: movq -48(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq -40(%rsp,%rsi), %rbx
-; AVX2-NEXT: movq %rbx, %rax
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq -16(%rsp,%rsi), %r11
-; AVX2-NEXT: movq -8(%rsp,%rsi), %r10
-; AVX2-NEXT: shldq %cl, %r11, %r10
-; AVX2-NEXT: movq -32(%rsp,%rsi), %r9
-; AVX2-NEXT: movq -24(%rsp,%rsi), %r14
-; AVX2-NEXT: movq %r14, %r8
-; AVX2-NEXT: shldq %cl, %r9, %r8
-; AVX2-NEXT: movq -64(%rsp,%rsi), %r15
-; AVX2-NEXT: movq -56(%rsp,%rsi), %rsi
-; AVX2-NEXT: shldq %cl, %rsi, %rdx
-; AVX2-NEXT: shldq %cl, %r14, %r11
-; AVX2-NEXT: shldq %cl, %rbx, %r9
-; AVX2-NEXT: shldq %cl, %r15, %rsi
-; AVX2-NEXT: shlxq %rcx, %r15, %rcx
-; AVX2-NEXT: andq 32(%rdi), %r9
-; AVX2-NEXT: andq 48(%rdi), %r11
-; AVX2-NEXT: andq 16(%rdi), %rdx
-; AVX2-NEXT: andq 40(%rdi), %r8
-; AVX2-NEXT: andq 56(%rdi), %r10
-; AVX2-NEXT: andq 24(%rdi), %rax
-; AVX2-NEXT: orq %r11, %rdx
-; AVX2-NEXT: orq %r10, %rax
-; AVX2-NEXT: andq (%rdi), %rcx
-; AVX2-NEXT: orq %r9, %rcx
-; AVX2-NEXT: orq %rdx, %rcx
-; AVX2-NEXT: andq 8(%rdi), %rsi
-; AVX2-NEXT: orq %r8, %rsi
-; AVX2-NEXT: orq %rax, %rsi
-; AVX2-NEXT: orq %rcx, %rsi
-; AVX2-NEXT: setne %al
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_ne_i512:
-; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %r15
-; AVX512-NEXT: pushq %r14
-; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [1,0,0,0]
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: movl %esi, %ecx
-; AVX512-NEXT: andl $63, %ecx
-; AVX512-NEXT: shrl $3, %esi
-; AVX512-NEXT: andl $56, %esi
-; AVX512-NEXT: negl %esi
-; AVX512-NEXT: movslq %esi, %rbx
-; AVX512-NEXT: movq -48(%rsp,%rbx), %rdx
-; AVX512-NEXT: movq -40(%rsp,%rbx), %r14
-; AVX512-NEXT: movq %r14, %rax
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq -16(%rsp,%rbx), %r11
-; AVX512-NEXT: movq -8(%rsp,%rbx), %r10
-; AVX512-NEXT: shldq %cl, %r11, %r10
-; AVX512-NEXT: movq -32(%rsp,%rbx), %r9
-; AVX512-NEXT: movq -24(%rsp,%rbx), %r15
-; AVX512-NEXT: movq %r15, %r8
-; AVX512-NEXT: shldq %cl, %r9, %r8
-; AVX512-NEXT: movq -56(%rsp,%rbx), %rsi
-; AVX512-NEXT: shldq %cl, %rsi, %rdx
-; AVX512-NEXT: shldq %cl, %r15, %r11
-; AVX512-NEXT: shldq %cl, %r14, %r9
-; AVX512-NEXT: movq -64(%rsp,%rbx), %rbx
-; AVX512-NEXT: shldq %cl, %rbx, %rsi
-; AVX512-NEXT: shlxq %rcx, %rbx, %rcx
-; AVX512-NEXT: andq 32(%rdi), %r9
-; AVX512-NEXT: andq 48(%rdi), %r11
-; AVX512-NEXT: andq 16(%rdi), %rdx
-; AVX512-NEXT: andq 40(%rdi), %r8
-; AVX512-NEXT: andq 56(%rdi), %r10
-; AVX512-NEXT: andq 24(%rdi), %rax
-; AVX512-NEXT: orq %r11, %rdx
-; AVX512-NEXT: orq %r10, %rax
-; AVX512-NEXT: andq (%rdi), %rcx
-; AVX512-NEXT: orq %r9, %rcx
-; AVX512-NEXT: orq %rdx, %rcx
-; AVX512-NEXT: andq 8(%rdi), %rsi
-; AVX512-NEXT: orq %r8, %rsi
-; AVX512-NEXT: orq %rax, %rsi
-; AVX512-NEXT: orq %rcx, %rsi
-; AVX512-NEXT: setne %al
-; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %r14
-; AVX512-NEXT: popq %r15
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; X64-LABEL: test_ne_i512:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %eax
+; X64-NEXT: shrl $3, %eax
+; X64-NEXT: andl $60, %eax
+; X64-NEXT: movl (%rdi,%rax), %eax
+; X64-NEXT: btl %esi, %eax
+; X64-NEXT: setb %al
+; X64-NEXT: retq
%rem = and i32 %position, 511
%ofs = zext nneg i32 %rem to i512
%bit = shl nuw i512 1, %ofs
@@ -1602,572 +846,33 @@ define i1 @test_ne_i512(ptr %word, i32 %position) nounwind {
define i1 @complement_ne_i512(ptr %word, i32 %position) nounwind {
; X86-LABEL: complement_ne_i512:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $272, %esp # imm = 0x110
-; X86-NEXT: movl 12(%ebp), %ecx
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: shrl $3, %eax
-; X86-NEXT: andl $60, %eax
-; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NEXT: leal {{[0-9]+}}(%esp), %edx
-; X86-NEXT: subl %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl 24(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 28(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl $31, %ecx
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 56(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 60(%edx), %esi
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 40(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 44(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 16(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 20(%edx), %ebx
-; X86-NEXT: movl %ebx, %esi
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 32(%edx), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 36(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edi, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl 52(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: shldl %cl, %esi, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 4(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%ebp), %edx
-; X86-NEXT: movl 40(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %esi, %eax
-; X86-NEXT: movl 8(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ebx, %esi
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: movl 56(%edx), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edi, %ebx
-; X86-NEXT: movl 24(%edx), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: orl %esi, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 44(%eax), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl 12(%eax), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; X86-NEXT: orl %esi, %ebx
-; X86-NEXT: movl 60(%eax), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %eax, %esi
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl 28(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl (%eax), %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NEXT: negl %eax
-; X86-NEXT: movl 240(%esp,%eax), %esi
-; X86-NEXT: shldl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: shll %cl, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%ebp), %esi
-; X86-NEXT: movl 32(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edi, %eax
-; X86-NEXT: movl (%esi), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edx, %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl 16(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ebx, %eax
-; X86-NEXT: movl 48(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl (%esp), %edx # 4-byte Folded Reload
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 36(%esi), %ebx
-; X86-NEXT: movl %ebx, %eax
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl 4(%esi), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl %esi, %eax
-; X86-NEXT: movl 20(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl %esi, %edi
-; X86-NEXT: movl 52(%eax), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: orl %edi, %eax
-; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: xorl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl %ecx, (%esp) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: orl %edx, %eax
-; X86-NEXT: movl 8(%ebp), %edx
-; X86-NEXT: movl %ebx, 60(%edx)
-; X86-NEXT: movl %edi, 56(%edx)
-; X86-NEXT: movl %ecx, 52(%edx)
-; X86-NEXT: movl %esi, 44(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 40(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 36(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 32(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 28(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 24(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 20(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 16(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 12(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 8(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 4(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, (%edx)
-; X86-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 48(%edx)
-; X86-NEXT: setne %al
-; X86-NEXT: leal -12(%ebp), %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, %esi
+; X86-NEXT: shrl $3, %esi
+; X86-NEXT: andl $60, %esi
+; X86-NEXT: movl (%ecx,%esi), %edi
+; X86-NEXT: btl %edx, %edi
+; X86-NEXT: setb %al
+; X86-NEXT: btcl %edx, %edi
+; X86-NEXT: movl %edi, (%ecx,%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
-; SSE-LABEL: complement_ne_i512:
-; SSE: # %bb.0:
-; SSE-NEXT: pushq %rbp
-; SSE-NEXT: pushq %r15
-; SSE-NEXT: pushq %r14
-; SSE-NEXT: pushq %r13
-; SSE-NEXT: pushq %r12
-; SSE-NEXT: pushq %rbx
-; SSE-NEXT: subq $56, %rsp
-; SSE-NEXT: xorps %xmm0, %xmm0
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movq $0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movq $1, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: andl $63, %ecx
-; SSE-NEXT: shrl $3, %esi
-; SSE-NEXT: andl $56, %esi
-; SSE-NEXT: negl %esi
-; SSE-NEXT: movslq %esi, %rbx
-; SSE-NEXT: movq (%rsp,%rbx), %rsi
-; SSE-NEXT: movq 8(%rsp,%rbx), %r14
-; SSE-NEXT: movq %r14, %rax
-; SSE-NEXT: shldq %cl, %rsi, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 32(%rsp,%rbx), %r8
-; SSE-NEXT: movq 40(%rsp,%rbx), %rbp
-; SSE-NEXT: shldq %cl, %r8, %rbp
-; SSE-NEXT: movq 16(%rsp,%rbx), %r9
-; SSE-NEXT: movq 24(%rsp,%rbx), %r15
-; SSE-NEXT: movq %r15, %r10
-; SSE-NEXT: shldq %cl, %r9, %r10
-; SSE-NEXT: movq -8(%rsp,%rbx), %r11
-; SSE-NEXT: shldq %cl, %r11, %rsi
-; SSE-NEXT: shldq %cl, %r15, %r8
-; SSE-NEXT: shldq %cl, %r14, %r9
-; SSE-NEXT: movq -16(%rsp,%rbx), %rbx
-; SSE-NEXT: shldq %cl, %rbx, %r11
-; SSE-NEXT: # kill: def $cl killed $cl killed $ecx
-; SSE-NEXT: shlq %cl, %rbx
-; SSE-NEXT: movq 24(%rdi), %r15
-; SSE-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 56(%rdi), %rcx
-; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 16(%rdi), %r12
-; SSE-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 48(%rdi), %r13
-; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %r8, %r13
-; SSE-NEXT: andq %rsi, %r12
-; SSE-NEXT: orq %r13, %r12
-; SSE-NEXT: movq %rcx, %r13
-; SSE-NEXT: andq %rbp, %r13
-; SSE-NEXT: andq %rax, %r15
-; SSE-NEXT: orq %r13, %r15
-; SSE-NEXT: movq 32(%rdi), %r14
-; SSE-NEXT: movq %r14, %rcx
-; SSE-NEXT: andq %r9, %rcx
-; SSE-NEXT: movq (%rdi), %r13
-; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %rbx, %r13
-; SSE-NEXT: orq %rcx, %r13
-; SSE-NEXT: orq %r12, %r13
-; SSE-NEXT: movq 40(%rdi), %rcx
-; SSE-NEXT: movq %rcx, %r12
-; SSE-NEXT: andq %r10, %r12
-; SSE-NEXT: movq 8(%rdi), %rdx
-; SSE-NEXT: movq %rdx, %rax
-; SSE-NEXT: andq %r11, %rax
-; SSE-NEXT: orq %r12, %rax
-; SSE-NEXT: orq %r15, %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
-; SSE-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; SSE-NEXT: xorq %rcx, %r10
-; SSE-NEXT: xorq %r14, %r9
-; SSE-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
-; SSE-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
-; SSE-NEXT: xorq %rdx, %r11
-; SSE-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; SSE-NEXT: orq %r13, %rax
-; SSE-NEXT: movq %r8, 48(%rdi)
-; SSE-NEXT: movq %rbp, 56(%rdi)
-; SSE-NEXT: movq %r9, 32(%rdi)
-; SSE-NEXT: movq %r10, 40(%rdi)
-; SSE-NEXT: movq %rsi, 16(%rdi)
-; SSE-NEXT: movq %r15, 24(%rdi)
-; SSE-NEXT: movq %rbx, (%rdi)
-; SSE-NEXT: movq %r11, 8(%rdi)
-; SSE-NEXT: setne %al
-; SSE-NEXT: addq $56, %rsp
-; SSE-NEXT: popq %rbx
-; SSE-NEXT: popq %r12
-; SSE-NEXT: popq %r13
-; SSE-NEXT: popq %r14
-; SSE-NEXT: popq %r15
-; SSE-NEXT: popq %rbp
-; SSE-NEXT: retq
-;
-; AVX2-LABEL: complement_ne_i512:
-; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: subq $72, %rsp
-; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovss {{.*#+}} xmm0 = [1,0,0,0]
-; AVX2-NEXT: vmovups %ymm0, (%rsp)
-; AVX2-NEXT: movl %esi, %ecx
-; AVX2-NEXT: andl $63, %ecx
-; AVX2-NEXT: shrl $3, %esi
-; AVX2-NEXT: andl $56, %esi
-; AVX2-NEXT: negl %esi
-; AVX2-NEXT: movslq %esi, %rbx
-; AVX2-NEXT: movq 16(%rsp,%rbx), %rsi
-; AVX2-NEXT: movq 24(%rsp,%rbx), %rbp
-; AVX2-NEXT: movq %rbp, %rax
-; AVX2-NEXT: shldq %cl, %rsi, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 48(%rsp,%rbx), %r8
-; AVX2-NEXT: movq 56(%rsp,%rbx), %r13
-; AVX2-NEXT: shldq %cl, %r8, %r13
-; AVX2-NEXT: movq 32(%rsp,%rbx), %r9
-; AVX2-NEXT: movq 40(%rsp,%rbx), %r14
-; AVX2-NEXT: movq %r14, %r10
-; AVX2-NEXT: shldq %cl, %r9, %r10
-; AVX2-NEXT: movq 8(%rsp,%rbx), %r11
-; AVX2-NEXT: shldq %cl, %r11, %rsi
-; AVX2-NEXT: shldq %cl, %r14, %r8
-; AVX2-NEXT: movq 16(%rdi), %r12
-; AVX2-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 48(%rdi), %r14
-; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq %r8, %r14
-; AVX2-NEXT: andq %rsi, %r12
-; AVX2-NEXT: orq %r14, %r12
-; AVX2-NEXT: movq 56(%rdi), %r15
-; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq %r13, %r15
-; AVX2-NEXT: movq 24(%rdi), %r14
-; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq %rax, %r14
-; AVX2-NEXT: orq %r15, %r14
-; AVX2-NEXT: shldq %cl, %rbp, %r9
-; AVX2-NEXT: movq (%rsp,%rbx), %rdx
-; AVX2-NEXT: movq 32(%rdi), %r15
-; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq %r9, %r15
-; AVX2-NEXT: shlxq %rcx, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq (%rdi), %rbx
-; AVX2-NEXT: movq %rbx, %rbp
-; AVX2-NEXT: andq %rax, %rbp
-; AVX2-NEXT: orq %r15, %rbp
-; AVX2-NEXT: orq %r12, %rbp
-; AVX2-NEXT: # kill: def $cl killed $cl killed $rcx
-; AVX2-NEXT: shldq %cl, %rdx, %r11
-; AVX2-NEXT: movq 40(%rdi), %rax
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: andq %r10, %rcx
-; AVX2-NEXT: movq 8(%rdi), %r15
-; AVX2-NEXT: movq %r15, %r12
-; AVX2-NEXT: andq %r11, %r12
-; AVX2-NEXT: orq %rcx, %r12
-; AVX2-NEXT: orq %r14, %r12
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; AVX2-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; AVX2-NEXT: xorq %rax, %r10
-; AVX2-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
-; AVX2-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
-; AVX2-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
-; AVX2-NEXT: xorq %r15, %r11
-; AVX2-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; AVX2-NEXT: orq %rbp, %r12
-; AVX2-NEXT: movq %r8, 48(%rdi)
-; AVX2-NEXT: movq %r13, 56(%rdi)
-; AVX2-NEXT: movq %r9, 32(%rdi)
-; AVX2-NEXT: movq %r10, 40(%rdi)
-; AVX2-NEXT: movq %rsi, 16(%rdi)
-; AVX2-NEXT: movq %rcx, 24(%rdi)
-; AVX2-NEXT: movq %rbx, (%rdi)
-; AVX2-NEXT: movq %r11, 8(%rdi)
-; AVX2-NEXT: setne %al
-; AVX2-NEXT: addq $72, %rsp
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: complement_ne_i512:
-; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
-; AVX512-NEXT: pushq %r15
-; AVX512-NEXT: pushq %r14
-; AVX512-NEXT: pushq %r13
-; AVX512-NEXT: pushq %r12
-; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: subq $72, %rsp
-; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [1,0,0,0]
-; AVX512-NEXT: vmovups %ymm0, (%rsp)
-; AVX512-NEXT: movl %esi, %ecx
-; AVX512-NEXT: andl $63, %ecx
-; AVX512-NEXT: shrl $3, %esi
-; AVX512-NEXT: andl $56, %esi
-; AVX512-NEXT: negl %esi
-; AVX512-NEXT: movslq %esi, %rbx
-; AVX512-NEXT: movq 16(%rsp,%rbx), %rsi
-; AVX512-NEXT: movq 24(%rsp,%rbx), %rbp
-; AVX512-NEXT: movq %rbp, %rax
-; AVX512-NEXT: shldq %cl, %rsi, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 48(%rsp,%rbx), %r8
-; AVX512-NEXT: movq 56(%rsp,%rbx), %r13
-; AVX512-NEXT: shldq %cl, %r8, %r13
-; AVX512-NEXT: movq 32(%rsp,%rbx), %r9
-; AVX512-NEXT: movq 40(%rsp,%rbx), %r14
-; AVX512-NEXT: movq %r14, %r10
-; AVX512-NEXT: shldq %cl, %r9, %r10
-; AVX512-NEXT: movq 8(%rsp,%rbx), %r11
-; AVX512-NEXT: shldq %cl, %r11, %rsi
-; AVX512-NEXT: shldq %cl, %r14, %r8
-; AVX512-NEXT: movq 16(%rdi), %r12
-; AVX512-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 48(%rdi), %r14
-; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: andq %r8, %r14
-; AVX512-NEXT: andq %rsi, %r12
-; AVX512-NEXT: orq %r14, %r12
-; AVX512-NEXT: movq 56(%rdi), %r15
-; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: andq %r13, %r15
-; AVX512-NEXT: movq 24(%rdi), %r14
-; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: andq %rax, %r14
-; AVX512-NEXT: orq %r15, %r14
-; AVX512-NEXT: shldq %cl, %rbp, %r9
-; AVX512-NEXT: movq (%rsp,%rbx), %rdx
-; AVX512-NEXT: movq 32(%rdi), %r15
-; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: andq %r9, %r15
-; AVX512-NEXT: shlxq %rcx, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq (%rdi), %rbx
-; AVX512-NEXT: movq %rbx, %rbp
-; AVX512-NEXT: andq %rax, %rbp
-; AVX512-NEXT: orq %r15, %rbp
-; AVX512-NEXT: orq %r12, %rbp
-; AVX512-NEXT: # kill: def $cl killed $cl killed $rcx
-; AVX512-NEXT: shldq %cl, %rdx, %r11
-; AVX512-NEXT: movq 40(%rdi), %rax
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: andq %r10, %rcx
-; AVX512-NEXT: movq 8(%rdi), %r15
-; AVX512-NEXT: movq %r15, %r12
-; AVX512-NEXT: andq %r11, %r12
-; AVX512-NEXT: orq %rcx, %r12
-; AVX512-NEXT: orq %r14, %r12
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX512-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; AVX512-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; AVX512-NEXT: xorq %rax, %r10
-; AVX512-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
-; AVX512-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
-; AVX512-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
-; AVX512-NEXT: xorq %r15, %r11
-; AVX512-NEXT: xorq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; AVX512-NEXT: orq %rbp, %r12
-; AVX512-NEXT: movq %r8, 48(%rdi)
-; AVX512-NEXT: movq %r13, 56(%rdi)
-; AVX512-NEXT: movq %r9, 32(%rdi)
-; AVX512-NEXT: movq %r10, 40(%rdi)
-; AVX512-NEXT: movq %rsi, 16(%rdi)
-; AVX512-NEXT: movq %rcx, 24(%rdi)
-; AVX512-NEXT: movq %rbx, (%rdi)
-; AVX512-NEXT: movq %r11, 8(%rdi)
-; AVX512-NEXT: setne %al
-; AVX512-NEXT: addq $72, %rsp
-; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %r12
-; AVX512-NEXT: popq %r13
-; AVX512-NEXT: popq %r14
-; AVX512-NEXT: popq %r15
-; AVX512-NEXT: popq %rbp
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; X64-LABEL: complement_ne_i512:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: shrl $3, %ecx
+; X64-NEXT: andl $60, %ecx
+; X64-NEXT: movl (%rdi,%rcx), %edx
+; X64-NEXT: btl %esi, %edx
+; X64-NEXT: setb %al
+; X64-NEXT: btcl %esi, %edx
+; X64-NEXT: movl %edx, (%rdi,%rcx)
+; X64-NEXT: retq
%rem = and i32 %position, 511
%ofs = zext nneg i32 %rem to i512
%bit = shl nuw i512 1, %ofs
@@ -2182,606 +887,33 @@ define i1 @complement_ne_i512(ptr %word, i32 %position) nounwind {
define i1 @reset_eq_i512(ptr %word, i32 %position) nounwind {
; X86-LABEL: reset_eq_i512:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $288, %esp # imm = 0x120
-; X86-NEXT: movl 12(%ebp), %ecx
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: shrl $3, %eax
-; X86-NEXT: andl $60, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: leal {{[0-9]+}}(%esp), %edi
-; X86-NEXT: subl %eax, %edi
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl 4(%edi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%edi), %eax
-; X86-NEXT: andl $31, %ecx
-; X86-NEXT: movl %eax, %ebx
-; X86-NEXT: shldl %cl, %edx, %ebx
-; X86-NEXT: movl 12(%edi), %edx
-; X86-NEXT: movl %edx, %esi
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 16(%edi), %eax
-; X86-NEXT: movl %eax, %esi
-; X86-NEXT: shldl %cl, %edx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 20(%edi), %edx
-; X86-NEXT: movl %edx, %esi
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 24(%edi), %eax
-; X86-NEXT: movl %eax, %esi
-; X86-NEXT: shldl %cl, %edx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 28(%edi), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %edx, %esi
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 32(%edi), %eax
-; X86-NEXT: movl %eax, %esi
-; X86-NEXT: shldl %cl, %edx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 36(%edi), %esi
-; X86-NEXT: movl %esi, %edx
-; X86-NEXT: shldl %cl, %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 40(%edi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %esi, %edx
-; X86-NEXT: movl 8(%ebp), %esi
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 40(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %eax, %edx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %eax, %ebx
-; X86-NEXT: orl %edx, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 44(%edi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %edx
-; X86-NEXT: movl 52(%edi), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 56(%edi), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: shldl %cl, %esi, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%ebp), %esi
-; X86-NEXT: movl 56(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %eax, %ebx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 24(%esi), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ecx, %eax
-; X86-NEXT: orl %ebx, %eax
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %esi, %ebx
-; X86-NEXT: movl 44(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %eax, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%esi), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ecx, %eax
-; X86-NEXT: orl %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 60(%edi), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: shldl %cl, %esi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 60(%ebx), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edx, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 28(%ebx), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edx, %esi
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl (%edi), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: negl %eax
-; X86-NEXT: movl 256(%esp,%eax), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: shldl %cl, %edi, %eax
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: shll %cl, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebx, %esi
-; X86-NEXT: movl 32(%ebx), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ecx, %edx
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl (%ebx), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ecx, %edi
-; X86-NEXT: orl %edx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 16(%esi), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ecx, %ebx
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 48(%esi), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ecx, %eax
-; X86-NEXT: orl %ebx, %eax
-; X86-NEXT: orl %edi, %eax
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl %cl, %edx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 36(%esi), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ebx, %edx
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 4(%esi), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ebx, %edi
-; X86-NEXT: orl %edx, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: shldl %cl, %edi, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 20(%esi), %edi
-; X86-NEXT: andl %edi, %ecx
-; X86-NEXT: movl %ecx, %esi
-; X86-NEXT: movl %edx, %ecx
-; X86-NEXT: movl 8(%ebp), %ebx
-; X86-NEXT: movl 52(%ebx), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ebx, %edx
-; X86-NEXT: orl %esi, %edx
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: notl %ebx
-; X86-NEXT: andl %edi, %ebx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: notl %esi
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: notl %edi
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: notl %edi
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: notl %edi
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: notl %ecx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: orl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl 8(%ebp), %eax
-; X86-NEXT: movl %edx, 60(%eax)
-; X86-NEXT: movl %esi, 56(%eax)
-; X86-NEXT: movl %ecx, 52(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, 44(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, 40(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, 36(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, 32(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, 28(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, 24(%eax)
-; X86-NEXT: movl %ebx, 20(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, 16(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, 12(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, 8(%eax)
-; X86-NEXT: movl %edi, 4(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, 48(%eax)
-; X86-NEXT: sete %al
-; X86-NEXT: leal -12(%ebp), %esp
+; X86-NEXT: shrl $3, %esi
+; X86-NEXT: andl $60, %esi
+; X86-NEXT: movl (%ecx,%esi), %edi
+; X86-NEXT: btl %edx, %edi
+; X86-NEXT: setae %al
+; X86-NEXT: btrl %edx, %edi
+; X86-NEXT: movl %edi, (%ecx,%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
-; SSE-LABEL: reset_eq_i512:
-; SSE: # %bb.0:
-; SSE-NEXT: pushq %rbp
-; SSE-NEXT: pushq %r15
-; SSE-NEXT: pushq %r14
-; SSE-NEXT: pushq %r13
-; SSE-NEXT: pushq %r12
-; SSE-NEXT: pushq %rbx
-; SSE-NEXT: subq $56, %rsp
-; SSE-NEXT: xorps %xmm0, %xmm0
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movq $0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movq $1, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: andl $63, %ecx
-; SSE-NEXT: shrl $3, %esi
-; SSE-NEXT: andl $56, %esi
-; SSE-NEXT: negl %esi
-; SSE-NEXT: movslq %esi, %rdx
-; SSE-NEXT: movq (%rsp,%rdx), %r9
-; SSE-NEXT: movq 8(%rsp,%rdx), %r8
-; SSE-NEXT: movq %r8, %rsi
-; SSE-NEXT: shldq %cl, %r9, %rsi
-; SSE-NEXT: movq -8(%rsp,%rdx), %rax
-; SSE-NEXT: shldq %cl, %rax, %r9
-; SSE-NEXT: movq 16(%rsp,%rdx), %r14
-; SSE-NEXT: movq 24(%rsp,%rdx), %r10
-; SSE-NEXT: movq %r10, %rbx
-; SSE-NEXT: shldq %cl, %r14, %rbx
-; SSE-NEXT: shldq %cl, %r8, %r14
-; SSE-NEXT: movq 32(%rsp,%rdx), %r13
-; SSE-NEXT: movq 40(%rsp,%rdx), %r12
-; SSE-NEXT: shldq %cl, %r13, %r12
-; SSE-NEXT: shldq %cl, %r10, %r13
-; SSE-NEXT: movq -16(%rsp,%rdx), %rdx
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: # kill: def $cl killed $cl killed $ecx
-; SSE-NEXT: shlq %cl, %rdx
-; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq %r12, %rbp
-; SSE-NEXT: movq %r9, %r15
-; SSE-NEXT: movq %rsi, %r11
-; SSE-NEXT: movq 16(%rdi), %r8
-; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 48(%rdi), %rcx
-; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %rcx, %r13
-; SSE-NEXT: andq %r8, %r9
-; SSE-NEXT: orq %r13, %r9
-; SSE-NEXT: movq 56(%rdi), %rcx
-; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %rcx, %r12
-; SSE-NEXT: movq 24(%rdi), %r10
-; SSE-NEXT: andq %r10, %rsi
-; SSE-NEXT: orq %r12, %rsi
-; SSE-NEXT: movq %r14, %r13
-; SSE-NEXT: movq 32(%rdi), %rcx
-; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %rcx, %r14
-; SSE-NEXT: movq %rdx, %r12
-; SSE-NEXT: movq (%rdi), %rcx
-; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %rcx, %rdx
-; SSE-NEXT: orq %r14, %rdx
-; SSE-NEXT: orq %r9, %rdx
-; SSE-NEXT: movq %rbx, %r14
-; SSE-NEXT: movq 40(%rdi), %rcx
-; SSE-NEXT: andq %rcx, %rbx
-; SSE-NEXT: movq %rax, %r9
-; SSE-NEXT: movq 8(%rdi), %r8
-; SSE-NEXT: andq %r8, %rax
-; SSE-NEXT: orq %rbx, %rax
-; SSE-NEXT: orq %rsi, %rax
-; SSE-NEXT: notq %r11
-; SSE-NEXT: andq %r10, %r11
-; SSE-NEXT: notq %r15
-; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
-; SSE-NEXT: notq %r14
-; SSE-NEXT: andq %rcx, %r14
-; SSE-NEXT: notq %r13
-; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
-; SSE-NEXT: notq %rbp
-; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE-NEXT: notq %rcx
-; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; SSE-NEXT: notq %r9
-; SSE-NEXT: andq %r8, %r9
-; SSE-NEXT: notq %r12
-; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
-; SSE-NEXT: orq %rdx, %rax
-; SSE-NEXT: movq %rcx, 48(%rdi)
-; SSE-NEXT: movq %rbp, 56(%rdi)
-; SSE-NEXT: movq %r13, 32(%rdi)
-; SSE-NEXT: movq %r14, 40(%rdi)
-; SSE-NEXT: movq %r15, 16(%rdi)
-; SSE-NEXT: movq %r11, 24(%rdi)
-; SSE-NEXT: movq %r12, (%rdi)
-; SSE-NEXT: movq %r9, 8(%rdi)
-; SSE-NEXT: sete %al
-; SSE-NEXT: addq $56, %rsp
-; SSE-NEXT: popq %rbx
-; SSE-NEXT: popq %r12
-; SSE-NEXT: popq %r13
-; SSE-NEXT: popq %r14
-; SSE-NEXT: popq %r15
-; SSE-NEXT: popq %rbp
-; SSE-NEXT: retq
-;
-; AVX2-LABEL: reset_eq_i512:
-; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: pushq %rax
-; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovss {{.*#+}} xmm0 = [1,0,0,0]
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movl %esi, %ecx
-; AVX2-NEXT: andl $63, %ecx
-; AVX2-NEXT: shrl $3, %esi
-; AVX2-NEXT: andl $56, %esi
-; AVX2-NEXT: negl %esi
-; AVX2-NEXT: movslq %esi, %rdx
-; AVX2-NEXT: movq -48(%rsp,%rdx), %r8
-; AVX2-NEXT: movq -40(%rsp,%rdx), %rbx
-; AVX2-NEXT: movq %rbx, %rax
-; AVX2-NEXT: shldq %cl, %r8, %rax
-; AVX2-NEXT: movq -16(%rsp,%rdx), %r10
-; AVX2-NEXT: movq -8(%rsp,%rdx), %rsi
-; AVX2-NEXT: shldq %cl, %r10, %rsi
-; AVX2-NEXT: movq -32(%rsp,%rdx), %r11
-; AVX2-NEXT: movq -24(%rsp,%rdx), %r14
-; AVX2-NEXT: movq %r14, %r9
-; AVX2-NEXT: shldq %cl, %r11, %r9
-; AVX2-NEXT: movq -64(%rsp,%rdx), %r15
-; AVX2-NEXT: movq -56(%rsp,%rdx), %rdx
-; AVX2-NEXT: shldq %cl, %rdx, %r8
-; AVX2-NEXT: shldq %cl, %r14, %r10
-; AVX2-NEXT: shldq %cl, %rbx, %r11
-; AVX2-NEXT: shldq %cl, %r15, %rdx
-; AVX2-NEXT: shlxq %rcx, %r15, %rcx
-; AVX2-NEXT: movq 24(%rdi), %rbx
-; AVX2-NEXT: movq 56(%rdi), %r14
-; AVX2-NEXT: movq 16(%rdi), %r15
-; AVX2-NEXT: movq 48(%rdi), %r13
-; AVX2-NEXT: movq 32(%rdi), %rbp
-; AVX2-NEXT: andnq %rbp, %r11, %r12
-; AVX2-NEXT: andq %r11, %rbp
-; AVX2-NEXT: andnq %r13, %r10, %r11
-; AVX2-NEXT: andq %r10, %r13
-; AVX2-NEXT: andnq %r15, %r8, %r10
-; AVX2-NEXT: andq %r8, %r15
-; AVX2-NEXT: movq 40(%rdi), %r8
-; AVX2-NEXT: orq %r13, %r15
-; AVX2-NEXT: andnq %r8, %r9, %r13
-; AVX2-NEXT: andq %r9, %r8
-; AVX2-NEXT: andnq %r14, %rsi, %r9
-; AVX2-NEXT: andq %rsi, %r14
-; AVX2-NEXT: andnq %rbx, %rax, %rsi
-; AVX2-NEXT: andq %rax, %rbx
-; AVX2-NEXT: movq (%rdi), %rax
-; AVX2-NEXT: orq %r14, %rbx
-; AVX2-NEXT: andnq %rax, %rcx, %r14
-; AVX2-NEXT: andq %rcx, %rax
-; AVX2-NEXT: orq %rbp, %rax
-; AVX2-NEXT: movq 8(%rdi), %rcx
-; AVX2-NEXT: orq %r15, %rax
-; AVX2-NEXT: andnq %rcx, %rdx, %r15
-; AVX2-NEXT: andq %rdx, %rcx
-; AVX2-NEXT: orq %r8, %rcx
-; AVX2-NEXT: orq %rbx, %rcx
-; AVX2-NEXT: orq %rax, %rcx
-; AVX2-NEXT: movq %r11, 48(%rdi)
-; AVX2-NEXT: movq %r9, 56(%rdi)
-; AVX2-NEXT: movq %r12, 32(%rdi)
-; AVX2-NEXT: movq %r13, 40(%rdi)
-; AVX2-NEXT: movq %r10, 16(%rdi)
-; AVX2-NEXT: movq %rsi, 24(%rdi)
-; AVX2-NEXT: movq %r14, (%rdi)
-; AVX2-NEXT: movq %r15, 8(%rdi)
-; AVX2-NEXT: sete %al
-; AVX2-NEXT: addq $8, %rsp
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: reset_eq_i512:
-; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
-; AVX512-NEXT: pushq %r15
-; AVX512-NEXT: pushq %r14
-; AVX512-NEXT: pushq %r13
-; AVX512-NEXT: pushq %r12
-; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: pushq %rax
-; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [1,0,0,0]
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: movl %esi, %ecx
-; AVX512-NEXT: andl $63, %ecx
-; AVX512-NEXT: shrl $3, %esi
-; AVX512-NEXT: andl $56, %esi
-; AVX512-NEXT: negl %esi
-; AVX512-NEXT: movslq %esi, %rbx
-; AVX512-NEXT: movq -48(%rsp,%rbx), %r8
-; AVX512-NEXT: movq -40(%rsp,%rbx), %r14
-; AVX512-NEXT: movq %r14, %rax
-; AVX512-NEXT: shldq %cl, %r8, %rax
-; AVX512-NEXT: movq -16(%rsp,%rbx), %r10
-; AVX512-NEXT: movq -8(%rsp,%rbx), %rsi
-; AVX512-NEXT: shldq %cl, %r10, %rsi
-; AVX512-NEXT: movq -32(%rsp,%rbx), %r11
-; AVX512-NEXT: movq -24(%rsp,%rbx), %r15
-; AVX512-NEXT: movq %r15, %r9
-; AVX512-NEXT: shldq %cl, %r11, %r9
-; AVX512-NEXT: movq -56(%rsp,%rbx), %rdx
-; AVX512-NEXT: shldq %cl, %rdx, %r8
-; AVX512-NEXT: shldq %cl, %r15, %r10
-; AVX512-NEXT: shldq %cl, %r14, %r11
-; AVX512-NEXT: movq -64(%rsp,%rbx), %rbx
-; AVX512-NEXT: shldq %cl, %rbx, %rdx
-; AVX512-NEXT: shlxq %rcx, %rbx, %rcx
-; AVX512-NEXT: movq 24(%rdi), %rbx
-; AVX512-NEXT: movq 56(%rdi), %r14
-; AVX512-NEXT: movq 16(%rdi), %r15
-; AVX512-NEXT: movq 48(%rdi), %r13
-; AVX512-NEXT: movq 32(%rdi), %rbp
-; AVX512-NEXT: andnq %rbp, %r11, %r12
-; AVX512-NEXT: andq %r11, %rbp
-; AVX512-NEXT: andnq %r13, %r10, %r11
-; AVX512-NEXT: andq %r10, %r13
-; AVX512-NEXT: andnq %r15, %r8, %r10
-; AVX512-NEXT: andq %r8, %r15
-; AVX512-NEXT: movq 40(%rdi), %r8
-; AVX512-NEXT: orq %r13, %r15
-; AVX512-NEXT: andnq %r8, %r9, %r13
-; AVX512-NEXT: andq %r9, %r8
-; AVX512-NEXT: andnq %r14, %rsi, %r9
-; AVX512-NEXT: andq %rsi, %r14
-; AVX512-NEXT: andnq %rbx, %rax, %rsi
-; AVX512-NEXT: andq %rax, %rbx
-; AVX512-NEXT: movq (%rdi), %rax
-; AVX512-NEXT: orq %r14, %rbx
-; AVX512-NEXT: andnq %rax, %rcx, %r14
-; AVX512-NEXT: andq %rcx, %rax
-; AVX512-NEXT: orq %rbp, %rax
-; AVX512-NEXT: movq 8(%rdi), %rcx
-; AVX512-NEXT: orq %r15, %rax
-; AVX512-NEXT: andnq %rcx, %rdx, %r15
-; AVX512-NEXT: andq %rdx, %rcx
-; AVX512-NEXT: orq %r8, %rcx
-; AVX512-NEXT: orq %rbx, %rcx
-; AVX512-NEXT: orq %rax, %rcx
-; AVX512-NEXT: movq %r11, 48(%rdi)
-; AVX512-NEXT: movq %r9, 56(%rdi)
-; AVX512-NEXT: movq %r12, 32(%rdi)
-; AVX512-NEXT: movq %r13, 40(%rdi)
-; AVX512-NEXT: movq %r10, 16(%rdi)
-; AVX512-NEXT: movq %rsi, 24(%rdi)
-; AVX512-NEXT: movq %r14, (%rdi)
-; AVX512-NEXT: movq %r15, 8(%rdi)
-; AVX512-NEXT: sete %al
-; AVX512-NEXT: addq $8, %rsp
-; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %r12
-; AVX512-NEXT: popq %r13
-; AVX512-NEXT: popq %r14
-; AVX512-NEXT: popq %r15
-; AVX512-NEXT: popq %rbp
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; X64-LABEL: reset_eq_i512:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: shrl $3, %ecx
+; X64-NEXT: andl $60, %ecx
+; X64-NEXT: movl (%rdi,%rcx), %edx
+; X64-NEXT: btl %esi, %edx
+; X64-NEXT: setae %al
+; X64-NEXT: btrl %esi, %edx
+; X64-NEXT: movl %edx, (%rdi,%rcx)
+; X64-NEXT: retq
%rem = and i32 %position, 511
%ofs = zext nneg i32 %rem to i512
%bit = shl nuw i512 1, %ofs
@@ -2797,572 +929,33 @@ define i1 @reset_eq_i512(ptr %word, i32 %position) nounwind {
define i1 @set_ne_i512(ptr %word, i32 %position) nounwind {
; X86-LABEL: set_ne_i512:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $272, %esp # imm = 0x110
-; X86-NEXT: movl 12(%ebp), %ecx
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: shrl $3, %eax
-; X86-NEXT: andl $60, %eax
-; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NEXT: leal {{[0-9]+}}(%esp), %edx
-; X86-NEXT: subl %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl 24(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 28(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl $31, %ecx
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 56(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 60(%edx), %esi
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 40(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 44(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 16(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 20(%edx), %ebx
-; X86-NEXT: movl %ebx, %esi
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 32(%edx), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 36(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edi, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl 52(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: shldl %cl, %esi, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 4(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%ebp), %edx
-; X86-NEXT: movl 40(%edx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %esi, %eax
-; X86-NEXT: movl 8(%edx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ebx, %esi
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: movl 56(%edx), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edi, %ebx
-; X86-NEXT: movl 24(%edx), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: orl %esi, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 44(%eax), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl 12(%eax), %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; X86-NEXT: orl %esi, %ebx
-; X86-NEXT: movl 60(%eax), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %eax, %esi
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl 28(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl (%eax), %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NEXT: negl %eax
-; X86-NEXT: movl 240(%esp,%eax), %esi
-; X86-NEXT: shldl %cl, %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %esi
-; X86-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: shll %cl, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%ebp), %esi
-; X86-NEXT: movl 32(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edi, %eax
-; X86-NEXT: movl (%esi), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edx, %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl 16(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ebx, %eax
-; X86-NEXT: movl 48(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl (%esp), %edx # 4-byte Folded Reload
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 36(%esi), %ebx
-; X86-NEXT: movl %ebx, %eax
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl 4(%esi), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl %esi, %eax
-; X86-NEXT: movl 20(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl %esi, %edi
-; X86-NEXT: movl 52(%eax), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: orl %edi, %eax
-; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: orl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl %ecx, (%esp) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT: orl %edx, %eax
-; X86-NEXT: movl 8(%ebp), %edx
-; X86-NEXT: movl %ebx, 60(%edx)
-; X86-NEXT: movl %edi, 56(%edx)
-; X86-NEXT: movl %ecx, 52(%edx)
-; X86-NEXT: movl %esi, 44(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 40(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 36(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 32(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 28(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 24(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 20(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 16(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 12(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 8(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 4(%edx)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, (%edx)
-; X86-NEXT: movl (%esp), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, 48(%edx)
-; X86-NEXT: setne %al
-; X86-NEXT: leal -12(%ebp), %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, %esi
+; X86-NEXT: shrl $3, %esi
+; X86-NEXT: andl $60, %esi
+; X86-NEXT: movl (%ecx,%esi), %edi
+; X86-NEXT: btl %edx, %edi
+; X86-NEXT: setb %al
+; X86-NEXT: btsl %edx, %edi
+; X86-NEXT: movl %edi, (%ecx,%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
; X86-NEXT: retl
;
-; SSE-LABEL: set_ne_i512:
-; SSE: # %bb.0:
-; SSE-NEXT: pushq %rbp
-; SSE-NEXT: pushq %r15
-; SSE-NEXT: pushq %r14
-; SSE-NEXT: pushq %r13
-; SSE-NEXT: pushq %r12
-; SSE-NEXT: pushq %rbx
-; SSE-NEXT: subq $56, %rsp
-; SSE-NEXT: xorps %xmm0, %xmm0
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movq $0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movq $1, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: andl $63, %ecx
-; SSE-NEXT: shrl $3, %esi
-; SSE-NEXT: andl $56, %esi
-; SSE-NEXT: negl %esi
-; SSE-NEXT: movslq %esi, %rbx
-; SSE-NEXT: movq (%rsp,%rbx), %rsi
-; SSE-NEXT: movq 8(%rsp,%rbx), %r14
-; SSE-NEXT: movq %r14, %rax
-; SSE-NEXT: shldq %cl, %rsi, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 32(%rsp,%rbx), %r8
-; SSE-NEXT: movq 40(%rsp,%rbx), %rbp
-; SSE-NEXT: shldq %cl, %r8, %rbp
-; SSE-NEXT: movq 16(%rsp,%rbx), %r9
-; SSE-NEXT: movq 24(%rsp,%rbx), %r15
-; SSE-NEXT: movq %r15, %r10
-; SSE-NEXT: shldq %cl, %r9, %r10
-; SSE-NEXT: movq -8(%rsp,%rbx), %r11
-; SSE-NEXT: shldq %cl, %r11, %rsi
-; SSE-NEXT: shldq %cl, %r15, %r8
-; SSE-NEXT: shldq %cl, %r14, %r9
-; SSE-NEXT: movq -16(%rsp,%rbx), %rbx
-; SSE-NEXT: shldq %cl, %rbx, %r11
-; SSE-NEXT: # kill: def $cl killed $cl killed $ecx
-; SSE-NEXT: shlq %cl, %rbx
-; SSE-NEXT: movq 24(%rdi), %r15
-; SSE-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 56(%rdi), %rcx
-; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 16(%rdi), %r12
-; SSE-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 48(%rdi), %r13
-; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %r8, %r13
-; SSE-NEXT: andq %rsi, %r12
-; SSE-NEXT: orq %r13, %r12
-; SSE-NEXT: movq %rcx, %r13
-; SSE-NEXT: andq %rbp, %r13
-; SSE-NEXT: andq %rax, %r15
-; SSE-NEXT: orq %r13, %r15
-; SSE-NEXT: movq 32(%rdi), %r14
-; SSE-NEXT: movq %r14, %rcx
-; SSE-NEXT: andq %r9, %rcx
-; SSE-NEXT: movq (%rdi), %r13
-; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %rbx, %r13
-; SSE-NEXT: orq %rcx, %r13
-; SSE-NEXT: orq %r12, %r13
-; SSE-NEXT: movq 40(%rdi), %rcx
-; SSE-NEXT: movq %rcx, %r12
-; SSE-NEXT: andq %r10, %r12
-; SSE-NEXT: movq 8(%rdi), %rdx
-; SSE-NEXT: movq %rdx, %rax
-; SSE-NEXT: andq %r11, %rax
-; SSE-NEXT: orq %r12, %rax
-; SSE-NEXT: orq %r15, %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
-; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; SSE-NEXT: orq %rcx, %r10
-; SSE-NEXT: orq %r14, %r9
-; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
-; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
-; SSE-NEXT: orq %rdx, %r11
-; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; SSE-NEXT: orq %r13, %rax
-; SSE-NEXT: movq %r8, 48(%rdi)
-; SSE-NEXT: movq %rbp, 56(%rdi)
-; SSE-NEXT: movq %r9, 32(%rdi)
-; SSE-NEXT: movq %r10, 40(%rdi)
-; SSE-NEXT: movq %rsi, 16(%rdi)
-; SSE-NEXT: movq %r15, 24(%rdi)
-; SSE-NEXT: movq %rbx, (%rdi)
-; SSE-NEXT: movq %r11, 8(%rdi)
-; SSE-NEXT: setne %al
-; SSE-NEXT: addq $56, %rsp
-; SSE-NEXT: popq %rbx
-; SSE-NEXT: popq %r12
-; SSE-NEXT: popq %r13
-; SSE-NEXT: popq %r14
-; SSE-NEXT: popq %r15
-; SSE-NEXT: popq %rbp
-; SSE-NEXT: retq
-;
-; AVX2-LABEL: set_ne_i512:
-; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: subq $72, %rsp
-; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovss {{.*#+}} xmm0 = [1,0,0,0]
-; AVX2-NEXT: vmovups %ymm0, (%rsp)
-; AVX2-NEXT: movl %esi, %ecx
-; AVX2-NEXT: andl $63, %ecx
-; AVX2-NEXT: shrl $3, %esi
-; AVX2-NEXT: andl $56, %esi
-; AVX2-NEXT: negl %esi
-; AVX2-NEXT: movslq %esi, %rbx
-; AVX2-NEXT: movq 16(%rsp,%rbx), %rsi
-; AVX2-NEXT: movq 24(%rsp,%rbx), %rbp
-; AVX2-NEXT: movq %rbp, %rax
-; AVX2-NEXT: shldq %cl, %rsi, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 48(%rsp,%rbx), %r8
-; AVX2-NEXT: movq 56(%rsp,%rbx), %r13
-; AVX2-NEXT: shldq %cl, %r8, %r13
-; AVX2-NEXT: movq 32(%rsp,%rbx), %r9
-; AVX2-NEXT: movq 40(%rsp,%rbx), %r14
-; AVX2-NEXT: movq %r14, %r10
-; AVX2-NEXT: shldq %cl, %r9, %r10
-; AVX2-NEXT: movq 8(%rsp,%rbx), %r11
-; AVX2-NEXT: shldq %cl, %r11, %rsi
-; AVX2-NEXT: shldq %cl, %r14, %r8
-; AVX2-NEXT: movq 16(%rdi), %r12
-; AVX2-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 48(%rdi), %r14
-; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq %r8, %r14
-; AVX2-NEXT: andq %rsi, %r12
-; AVX2-NEXT: orq %r14, %r12
-; AVX2-NEXT: movq 56(%rdi), %r15
-; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq %r13, %r15
-; AVX2-NEXT: movq 24(%rdi), %r14
-; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq %rax, %r14
-; AVX2-NEXT: orq %r15, %r14
-; AVX2-NEXT: shldq %cl, %rbp, %r9
-; AVX2-NEXT: movq (%rsp,%rbx), %rdx
-; AVX2-NEXT: movq 32(%rdi), %r15
-; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq %r9, %r15
-; AVX2-NEXT: shlxq %rcx, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq (%rdi), %rbx
-; AVX2-NEXT: movq %rbx, %rbp
-; AVX2-NEXT: andq %rax, %rbp
-; AVX2-NEXT: orq %r15, %rbp
-; AVX2-NEXT: orq %r12, %rbp
-; AVX2-NEXT: # kill: def $cl killed $cl killed $rcx
-; AVX2-NEXT: shldq %cl, %rdx, %r11
-; AVX2-NEXT: movq 40(%rdi), %rax
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: andq %r10, %rcx
-; AVX2-NEXT: movq 8(%rdi), %r15
-; AVX2-NEXT: movq %r15, %r12
-; AVX2-NEXT: andq %r11, %r12
-; AVX2-NEXT: orq %rcx, %r12
-; AVX2-NEXT: orq %r14, %r12
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; AVX2-NEXT: orq %rax, %r10
-; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
-; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
-; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
-; AVX2-NEXT: orq %r15, %r11
-; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; AVX2-NEXT: orq %rbp, %r12
-; AVX2-NEXT: movq %r8, 48(%rdi)
-; AVX2-NEXT: movq %r13, 56(%rdi)
-; AVX2-NEXT: movq %r9, 32(%rdi)
-; AVX2-NEXT: movq %r10, 40(%rdi)
-; AVX2-NEXT: movq %rsi, 16(%rdi)
-; AVX2-NEXT: movq %rcx, 24(%rdi)
-; AVX2-NEXT: movq %rbx, (%rdi)
-; AVX2-NEXT: movq %r11, 8(%rdi)
-; AVX2-NEXT: setne %al
-; AVX2-NEXT: addq $72, %rsp
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: set_ne_i512:
-; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
-; AVX512-NEXT: pushq %r15
-; AVX512-NEXT: pushq %r14
-; AVX512-NEXT: pushq %r13
-; AVX512-NEXT: pushq %r12
-; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: subq $72, %rsp
-; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [1,0,0,0]
-; AVX512-NEXT: vmovups %ymm0, (%rsp)
-; AVX512-NEXT: movl %esi, %ecx
-; AVX512-NEXT: andl $63, %ecx
-; AVX512-NEXT: shrl $3, %esi
-; AVX512-NEXT: andl $56, %esi
-; AVX512-NEXT: negl %esi
-; AVX512-NEXT: movslq %esi, %rbx
-; AVX512-NEXT: movq 16(%rsp,%rbx), %rsi
-; AVX512-NEXT: movq 24(%rsp,%rbx), %rbp
-; AVX512-NEXT: movq %rbp, %rax
-; AVX512-NEXT: shldq %cl, %rsi, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 48(%rsp,%rbx), %r8
-; AVX512-NEXT: movq 56(%rsp,%rbx), %r13
-; AVX512-NEXT: shldq %cl, %r8, %r13
-; AVX512-NEXT: movq 32(%rsp,%rbx), %r9
-; AVX512-NEXT: movq 40(%rsp,%rbx), %r14
-; AVX512-NEXT: movq %r14, %r10
-; AVX512-NEXT: shldq %cl, %r9, %r10
-; AVX512-NEXT: movq 8(%rsp,%rbx), %r11
-; AVX512-NEXT: shldq %cl, %r11, %rsi
-; AVX512-NEXT: shldq %cl, %r14, %r8
-; AVX512-NEXT: movq 16(%rdi), %r12
-; AVX512-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 48(%rdi), %r14
-; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: andq %r8, %r14
-; AVX512-NEXT: andq %rsi, %r12
-; AVX512-NEXT: orq %r14, %r12
-; AVX512-NEXT: movq 56(%rdi), %r15
-; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: andq %r13, %r15
-; AVX512-NEXT: movq 24(%rdi), %r14
-; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: andq %rax, %r14
-; AVX512-NEXT: orq %r15, %r14
-; AVX512-NEXT: shldq %cl, %rbp, %r9
-; AVX512-NEXT: movq (%rsp,%rbx), %rdx
-; AVX512-NEXT: movq 32(%rdi), %r15
-; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: andq %r9, %r15
-; AVX512-NEXT: shlxq %rcx, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq (%rdi), %rbx
-; AVX512-NEXT: movq %rbx, %rbp
-; AVX512-NEXT: andq %rax, %rbp
-; AVX512-NEXT: orq %r15, %rbp
-; AVX512-NEXT: orq %r12, %rbp
-; AVX512-NEXT: # kill: def $cl killed $cl killed $rcx
-; AVX512-NEXT: shldq %cl, %rdx, %r11
-; AVX512-NEXT: movq 40(%rdi), %rax
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: andq %r10, %rcx
-; AVX512-NEXT: movq 8(%rdi), %r15
-; AVX512-NEXT: movq %r15, %r12
-; AVX512-NEXT: andq %r11, %r12
-; AVX512-NEXT: orq %rcx, %r12
-; AVX512-NEXT: orq %r14, %r12
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; AVX512-NEXT: orq %rax, %r10
-; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
-; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
-; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
-; AVX512-NEXT: orq %r15, %r11
-; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; AVX512-NEXT: orq %rbp, %r12
-; AVX512-NEXT: movq %r8, 48(%rdi)
-; AVX512-NEXT: movq %r13, 56(%rdi)
-; AVX512-NEXT: movq %r9, 32(%rdi)
-; AVX512-NEXT: movq %r10, 40(%rdi)
-; AVX512-NEXT: movq %rsi, 16(%rdi)
-; AVX512-NEXT: movq %rcx, 24(%rdi)
-; AVX512-NEXT: movq %rbx, (%rdi)
-; AVX512-NEXT: movq %r11, 8(%rdi)
-; AVX512-NEXT: setne %al
-; AVX512-NEXT: addq $72, %rsp
-; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %r12
-; AVX512-NEXT: popq %r13
-; AVX512-NEXT: popq %r14
-; AVX512-NEXT: popq %r15
-; AVX512-NEXT: popq %rbp
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; X64-LABEL: set_ne_i512:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: shrl $3, %ecx
+; X64-NEXT: andl $60, %ecx
+; X64-NEXT: movl (%rdi,%rcx), %edx
+; X64-NEXT: btl %esi, %edx
+; X64-NEXT: setb %al
+; X64-NEXT: btsl %esi, %edx
+; X64-NEXT: movl %edx, (%rdi,%rcx)
+; X64-NEXT: retq
%rem = and i32 %position, 511
%ofs = zext nneg i32 %rem to i512
%bit = shl nuw i512 1, %ofs
@@ -3383,13 +976,14 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $432, %esp # imm = 0x1B0
+; X86-NEXT: subl $352, %esp # imm = 0x160
; X86-NEXT: movl 12(%ebp), %ecx
; X86-NEXT: movl %ecx, %edx
; X86-NEXT: shrl $3, %edx
; X86-NEXT: andl $60, %edx
-; X86-NEXT: leal {{[0-9]+}}(%esp), %esi
-; X86-NEXT: subl %edx, %esi
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl %edx, %eax
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
@@ -3422,60 +1016,58 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl 56(%esi), %eax
+; X86-NEXT: movl 56(%eax), %esi
+; X86-NEXT: movl 60(%eax), %ebx
+; X86-NEXT: movl 52(%eax), %edi
+; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 48(%eax), %edi
+; X86-NEXT: movl 44(%eax), %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 40(%eax), %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 36(%eax), %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 32(%eax), %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 28(%eax), %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 24(%eax), %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 20(%eax), %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 16(%eax), %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 12(%eax), %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 8(%eax), %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl 4(%eax), %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 60(%esi), %eax
+; X86-NEXT: movzbl 16(%ebp), %eax
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT: andl $31, %ecx
+; X86-NEXT: shldl %cl, %esi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: shldl %cl, %eax, %esi
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: shldl %cl, %edi, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 52(%esi), %eax
-; X86-NEXT: movl 48(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 40(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 44(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 36(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 32(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 28(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 24(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 20(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 16(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%esi), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl (%esi), %edi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: shldl %cl, %ebx, %edi
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 4(%esi), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movzbl 16(%ebp), %ebx
-; X86-NEXT: movzbl %bl, %esi
-; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT: leal {{[0-9]+}}(%esp), %esi
-; X86-NEXT: subl %edx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl $31, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT: shldl %cl, %edx, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: shldl %cl, %eax, %edx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: shldl %cl, %ebx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: shldl %cl, %esi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl %cl, %edx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: shldl %cl, %edx, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: shldl %cl, %eax, %edx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -3500,9 +1092,12 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: shldl %cl, %eax, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: movl %ebx, %edx
-; X86-NEXT: shldl %cl, %edi, %edx
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
@@ -3534,273 +1129,148 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl 8(%ebp), %ebx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 48(%ebx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %esi, %edx
+; X86-NEXT: movl 56(%eax), %esi
+; X86-NEXT: movl 60(%eax), %edi
+; X86-NEXT: shldl %cl, %esi, %edi
+; X86-NEXT: movl 8(%ebp), %edx
+; X86-NEXT: andl 60(%edx), %ebx
+; X86-NEXT: orl %edi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 52(%eax), %edi
+; X86-NEXT: shldl %cl, %edi, %esi
+; X86-NEXT: andl 56(%edx), %ebx
+; X86-NEXT: orl %esi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 48(%eax), %esi
+; X86-NEXT: shldl %cl, %esi, %edi
+; X86-NEXT: andl 52(%edx), %ebx
+; X86-NEXT: orl %edi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 44(%eax), %edi
+; X86-NEXT: shldl %cl, %edi, %esi
+; X86-NEXT: andl 48(%edx), %ebx
+; X86-NEXT: orl %esi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 40(%eax), %esi
+; X86-NEXT: shldl %cl, %esi, %edi
+; X86-NEXT: andl 44(%edx), %ebx
+; X86-NEXT: orl %edi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 36(%eax), %edi
+; X86-NEXT: shldl %cl, %edi, %esi
+; X86-NEXT: andl 40(%edx), %ebx
+; X86-NEXT: orl %esi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 32(%eax), %esi
+; X86-NEXT: shldl %cl, %esi, %edi
+; X86-NEXT: andl 36(%edx), %ebx
+; X86-NEXT: orl %edi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 28(%eax), %edi
+; X86-NEXT: shldl %cl, %edi, %esi
+; X86-NEXT: andl 32(%edx), %ebx
+; X86-NEXT: orl %esi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 24(%eax), %esi
+; X86-NEXT: shldl %cl, %esi, %edi
+; X86-NEXT: andl 28(%edx), %ebx
+; X86-NEXT: orl %edi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 20(%eax), %edi
+; X86-NEXT: shldl %cl, %edi, %esi
+; X86-NEXT: andl 24(%edx), %ebx
+; X86-NEXT: orl %esi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 16(%eax), %esi
+; X86-NEXT: shldl %cl, %esi, %edi
+; X86-NEXT: andl 20(%edx), %ebx
+; X86-NEXT: orl %edi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 12(%eax), %edi
+; X86-NEXT: shldl %cl, %edi, %esi
+; X86-NEXT: andl 16(%edx), %ebx
+; X86-NEXT: orl %esi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 8(%eax), %esi
+; X86-NEXT: shldl %cl, %esi, %edi
+; X86-NEXT: andl 12(%edx), %ebx
+; X86-NEXT: orl %edi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT: notl %ebx
+; X86-NEXT: movl 4(%eax), %edi
+; X86-NEXT: shldl %cl, %edi, %esi
+; X86-NEXT: andl 8(%edx), %ebx
+; X86-NEXT: orl %esi, %ebx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 16(%ebx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %eax, %esi
-; X86-NEXT: orl %edx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 40(%ebx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %eax, %edx
-; X86-NEXT: movl %edx, %esi
+; X86-NEXT: notl %esi
+; X86-NEXT: movl (%eax), %eax
+; X86-NEXT: shldl %cl, %eax, %edi
+; X86-NEXT: andl 4(%edx), %esi
+; X86-NEXT: orl %edi, %esi
+; X86-NEXT: movl %esi, %edi
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT: notl %esi
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: andl (%edx), %esi
+; X86-NEXT: orl %eax, %esi
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%ebx), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edx, %eax
-; X86-NEXT: orl %esi, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 56(%ebx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %esi, %edx
-; X86-NEXT: movl %edx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 24(%ebx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %esi, %edx
-; X86-NEXT: orl %edi, %edx
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl (%edx,%eax), %eax
+; X86-NEXT: btl %ecx, %eax
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 52(%ebx), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edx, %eax
-; X86-NEXT: movl %eax, %edx
+; X86-NEXT: movl %eax, 60(%edx)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 20(%ebx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %esi, %eax
-; X86-NEXT: orl %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %eax, 56(%edx)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 44(%ebx), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edx, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%ebx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %esi, %edi
-; X86-NEXT: orl %eax, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 60(%ebx), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edx, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 28(%ebx), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %eax, %edx
-; X86-NEXT: orl %esi, %edx
-; X86-NEXT: orl %edi, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT: movl %eax, 52(%edx)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %edx
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: shll %cl, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 32(%ebx), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %esi, %ecx
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl (%ebx), %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edi, %eax
-; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %eax, 48(%edx)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 36(%ebx), %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %ecx, %eax
-; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 4(%ebx), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl %edx, %eax
-; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl 56(%edi), %ebx
-; X86-NEXT: movl 60(%edi), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: shldl %cl, %ebx, %eax
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 52(%edi), %eax
-; X86-NEXT: shldl %cl, %eax, %ebx
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 48(%edi), %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %esi, %eax
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %eax, 44(%edx)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: notl %eax
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: movl 40(%edi), %ebx
-; X86-NEXT: movl 44(%edi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %ebx, %eax
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 36(%edi), %eax
-; X86-NEXT: shldl %cl, %eax, %ebx
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 32(%edi), %ebx
-; X86-NEXT: shldl %cl, %ebx, %eax
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 28(%edi), %eax
-; X86-NEXT: shldl %cl, %eax, %ebx
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 24(%edi), %ebx
-; X86-NEXT: shldl %cl, %ebx, %eax
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 20(%edi), %eax
-; X86-NEXT: shldl %cl, %eax, %ebx
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 16(%edi), %ebx
-; X86-NEXT: shldl %cl, %ebx, %eax
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl 12(%edi), %eax
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: shldl %cl, %eax, %ebx
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: notl %esi
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: movl 8(%edi), %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movl %eax, 40(%edx)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: notl %eax
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl 4(%edi), %ebx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: shldl %cl, %ebx, %edx
-; X86-NEXT: orl %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: movl %eax, 36(%edx)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: shldl %cl, %esi, %eax
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: movl %edx, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: notl %edx
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-NEXT: movl (%edi), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %ebx
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: notl %edi
-; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: shll %cl, %eax
-; X86-NEXT: orl %eax, %edi
-; X86-NEXT: movl %edi, %ecx
+; X86-NEXT: movl %eax, 32(%edx)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT: movl 8(%ebp), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 60(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 56(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 52(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 44(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 40(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 36(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 32(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 28(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 24(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 20(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 16(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 12(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl %edi, 8(%eax)
-; X86-NEXT: movl %edx, 4(%eax)
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: movl %esi, 48(%eax)
-; X86-NEXT: sete %al
+; X86-NEXT: movl %eax, 28(%edx)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: movl %eax, 24(%edx)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: movl %eax, 20(%edx)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: movl %eax, 16(%edx)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT: movl %eax, 12(%edx)
+; X86-NEXT: movl %ebx, 8(%edx)
+; X86-NEXT: movl %edi, 4(%edx)
+; X86-NEXT: movl %esi, (%edx)
+; X86-NEXT: setae %al
; X86-NEXT: leal -12(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
@@ -3816,7 +1286,8 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; SSE-NEXT: pushq %r13
; SSE-NEXT: pushq %r12
; SSE-NEXT: pushq %rbx
-; SSE-NEXT: subq $216, %rsp
+; SSE-NEXT: subq $184, %rsp
+; SSE-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
@@ -3829,139 +1300,103 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; SSE-NEXT: movq $1, {{[0-9]+}}(%rsp)
; SSE-NEXT: movl %esi, %ecx
; SSE-NEXT: andl $63, %ecx
-; SSE-NEXT: shrl $3, %esi
-; SSE-NEXT: andl $56, %esi
-; SSE-NEXT: negl %esi
-; SSE-NEXT: movslq %esi, %r10
-; SSE-NEXT: movq 184(%rsp,%r10), %r11
-; SSE-NEXT: movq 192(%rsp,%r10), %rsi
-; SSE-NEXT: movq %rsi, %r13
-; SSE-NEXT: shldq %cl, %r11, %r13
-; SSE-NEXT: movq 200(%rsp,%r10), %r15
-; SSE-NEXT: shldq %cl, %rsi, %r15
-; SSE-NEXT: movq 168(%rsp,%r10), %rbx
-; SSE-NEXT: movq 176(%rsp,%r10), %rsi
-; SSE-NEXT: movq %rsi, %r14
-; SSE-NEXT: shldq %cl, %rbx, %r14
-; SSE-NEXT: shldq %cl, %rsi, %r11
-; SSE-NEXT: movq 152(%rsp,%r10), %rax
-; SSE-NEXT: movq 160(%rsp,%r10), %r8
-; SSE-NEXT: movq %r8, %r12
-; SSE-NEXT: shldq %cl, %rax, %r12
-; SSE-NEXT: shldq %cl, %r8, %rbx
-; SSE-NEXT: movq 144(%rsp,%r10), %r9
-; SSE-NEXT: movq %r9, %r8
-; SSE-NEXT: shlq %cl, %r8
-; SSE-NEXT: shldq %cl, %r9, %rax
-; SSE-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movl %edx, %edx
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
+; SSE-NEXT: movl %esi, %eax
+; SSE-NEXT: shrl $3, %eax
+; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: andl $56, %eax
+; SSE-NEXT: negl %eax
+; SSE-NEXT: movslq %eax, %r12
+; SSE-NEXT: movq 160(%rsp,%r12), %rax
+; SSE-NEXT: movq 168(%rsp,%r12), %r10
+; SSE-NEXT: shldq %cl, %rax, %r10
+; SSE-NEXT: movq 152(%rsp,%r12), %rsi
+; SSE-NEXT: shldq %cl, %rsi, %rax
+; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: movq 144(%rsp,%r12), %r11
+; SSE-NEXT: shldq %cl, %r11, %rsi
+; SSE-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: movq 136(%rsp,%r12), %rbx
+; SSE-NEXT: shldq %cl, %rbx, %r11
+; SSE-NEXT: movq 128(%rsp,%r12), %r14
+; SSE-NEXT: shldq %cl, %r14, %rbx
+; SSE-NEXT: movq 120(%rsp,%r12), %r15
+; SSE-NEXT: shldq %cl, %r15, %r14
+; SSE-NEXT: movq 112(%rsp,%r12), %r13
+; SSE-NEXT: shldq %cl, %r13, %r15
+; SSE-NEXT: shlq %cl, %r13
+; SSE-NEXT: movl %edx, %eax
; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, (%rsp)
+; SSE-NEXT: movups %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE-NEXT: movq %rdx, {{[0-9]+}}(%rsp)
+; SSE-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; SSE-NEXT: movq $0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movq 16(%rdi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 48(%rdi), %rsi
-; SSE-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %rsi, %r13
-; SSE-NEXT: andq %rdx, %r12
-; SSE-NEXT: orq %r13, %r12
-; SSE-NEXT: movq %r15, %rsi
-; SSE-NEXT: movq 56(%rdi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %rdx, %r15
-; SSE-NEXT: movq %rbx, %r13
-; SSE-NEXT: movq 24(%rdi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %rdx, %rbx
-; SSE-NEXT: orq %r15, %rbx
-; SSE-NEXT: movq %r14, %rbp
-; SSE-NEXT: movq 32(%rdi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %rdx, %r14
-; SSE-NEXT: movq %r8, %r15
-; SSE-NEXT: movq (%rdi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %rdx, %r8
-; SSE-NEXT: orq %r14, %r8
-; SSE-NEXT: orq %r12, %r8
-; SSE-NEXT: movq %r11, %r12
-; SSE-NEXT: movq 40(%rdi), %r9
-; SSE-NEXT: andq %r9, %r11
-; SSE-NEXT: movq %rax, %r14
-; SSE-NEXT: movq 8(%rdi), %rdx
+; SSE-NEXT: movq 32(%rsp,%r12), %rax
+; SSE-NEXT: movq 40(%rsp,%r12), %rdx
+; SSE-NEXT: shldq %cl, %rax, %rdx
; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq %rdx, %rax
-; SSE-NEXT: orq %r11, %rax
-; SSE-NEXT: orq %rbx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: movq 24(%rsp,%r12), %rdx
+; SSE-NEXT: shldq %cl, %rdx, %rax
+; SSE-NEXT: movq 16(%rsp,%r12), %rsi
+; SSE-NEXT: shldq %cl, %rsi, %rdx
+; SSE-NEXT: movq 8(%rsp,%r12), %r8
+; SSE-NEXT: shldq %cl, %r8, %rsi
+; SSE-NEXT: movq (%rsp,%r12), %rbp
+; SSE-NEXT: shldq %cl, %rbp, %r8
+; SSE-NEXT: movq -8(%rsp,%r12), %r9
+; SSE-NEXT: shldq %cl, %r9, %rbp
+; SSE-NEXT: notq %r10
+; SSE-NEXT: andq 56(%rdi), %r10
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; SSE-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; SSE-NEXT: notq %r10
+; SSE-NEXT: andq 48(%rdi), %r10
+; SSE-NEXT: orq %rax, %r10
; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; SSE-NEXT: notq %rax
-; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; SSE-NEXT: andq 40(%rdi), %rax
+; SSE-NEXT: orq %rdx, %rax
; SSE-NEXT: movq %rax, %rdx
-; SSE-NEXT: movq 56(%rsp,%r10), %r11
-; SSE-NEXT: movq 64(%rsp,%r10), %rax
-; SSE-NEXT: movq %rax, %rbx
-; SSE-NEXT: shldq %cl, %r11, %rbx
-; SSE-NEXT: orq %rbx, %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: notq %rsi
-; SSE-NEXT: movq 72(%rsp,%r10), %rbx
-; SSE-NEXT: shldq %cl, %rax, %rbx
-; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; SSE-NEXT: orq %rbx, %rsi
-; SSE-NEXT: notq %rbp
-; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
-; SSE-NEXT: movq 40(%rsp,%r10), %rax
-; SSE-NEXT: movq 48(%rsp,%r10), %rdx
-; SSE-NEXT: movq %rdx, %rbx
-; SSE-NEXT: shldq %cl, %rax, %rbx
-; SSE-NEXT: orq %rbx, %rbp
-; SSE-NEXT: notq %r12
-; SSE-NEXT: andq %r9, %r12
-; SSE-NEXT: shldq %cl, %rdx, %r11
-; SSE-NEXT: movq 24(%rsp,%r10), %r9
-; SSE-NEXT: movq 32(%rsp,%r10), %rdx
-; SSE-NEXT: movq %rdx, %rbx
-; SSE-NEXT: shldq %cl, %r9, %rbx
-; SSE-NEXT: orq %r11, %r12
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
; SSE-NEXT: notq %r11
-; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: orq %rbx, %r11
-; SSE-NEXT: notq %r13
-; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
-; SSE-NEXT: orq %rax, %r13
-; SSE-NEXT: notq %r15
-; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
-; SSE-NEXT: movq 16(%rsp,%r10), %rax
-; SSE-NEXT: movq %rax, %rdx
-; SSE-NEXT: shlq %cl, %rdx
-; SSE-NEXT: orq %rdx, %r15
+; SSE-NEXT: andq 32(%rdi), %r11
+; SSE-NEXT: orq %rsi, %r11
+; SSE-NEXT: notq %rbx
+; SSE-NEXT: andq 24(%rdi), %rbx
+; SSE-NEXT: orq %r8, %rbx
; SSE-NEXT: notq %r14
-; SSE-NEXT: andq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
-; SSE-NEXT: # kill: def $cl killed $cl killed $ecx
+; SSE-NEXT: andq 16(%rdi), %r14
+; SSE-NEXT: orq %rbp, %r14
+; SSE-NEXT: notq %r15
+; SSE-NEXT: movq -16(%rsp,%r12), %rax
; SSE-NEXT: shldq %cl, %rax, %r9
-; SSE-NEXT: orq %r9, %r14
-; SSE-NEXT: orq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; SSE-NEXT: andq 8(%rdi), %r15
+; SSE-NEXT: orq %r9, %r15
+; SSE-NEXT: notq %r13
+; SSE-NEXT: # kill: def $cl killed $cl killed $ecx
+; SSE-NEXT: shlq %cl, %rax
+; SSE-NEXT: andq (%rdi), %r13
+; SSE-NEXT: orq %rax, %r13
+; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; SSE-NEXT: andl $60, %eax
+; SSE-NEXT: movl (%rdi,%rax), %eax
+; SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload
+; SSE-NEXT: btl %ecx, %eax
; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: movq %rax, 48(%rdi)
-; SSE-NEXT: movq %rsi, 56(%rdi)
-; SSE-NEXT: movq %rbp, 32(%rdi)
-; SSE-NEXT: movq %r12, 40(%rdi)
-; SSE-NEXT: movq %r11, 16(%rdi)
-; SSE-NEXT: movq %r13, 24(%rdi)
-; SSE-NEXT: movq %r15, (%rdi)
-; SSE-NEXT: movq %r14, 8(%rdi)
-; SSE-NEXT: sete %al
-; SSE-NEXT: addq $216, %rsp
+; SSE-NEXT: movq %rax, 56(%rdi)
+; SSE-NEXT: movq %r10, 48(%rdi)
+; SSE-NEXT: movq %rdx, 40(%rdi)
+; SSE-NEXT: movq %r11, 32(%rdi)
+; SSE-NEXT: movq %rbx, 24(%rdi)
+; SSE-NEXT: movq %r14, 16(%rdi)
+; SSE-NEXT: movq %r15, 8(%rdi)
+; SSE-NEXT: movq %r13, (%rdi)
+; SSE-NEXT: setae %al
+; SSE-NEXT: addq $184, %rsp
; SSE-NEXT: popq %rbx
; SSE-NEXT: popq %r12
; SSE-NEXT: popq %r13
@@ -3978,132 +1413,103 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; AVX2-NEXT: pushq %r13
; AVX2-NEXT: pushq %r12
; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: subq $200, %rsp
+; AVX2-NEXT: subq $168, %rsp
; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
; AVX2-NEXT: vmovss {{.*#+}} xmm1 = [1,0,0,0]
; AVX2-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: movl %esi, %r8d
-; AVX2-NEXT: andl $63, %r8d
-; AVX2-NEXT: shrl $3, %esi
-; AVX2-NEXT: andl $56, %esi
-; AVX2-NEXT: negl %esi
-; AVX2-NEXT: movslq %esi, %rsi
-; AVX2-NEXT: movq 144(%rsp,%rsi), %r11
-; AVX2-NEXT: movq 152(%rsp,%rsi), %r12
-; AVX2-NEXT: movq %r12, %r10
-; AVX2-NEXT: movl %r8d, %ecx
-; AVX2-NEXT: shldq %cl, %r11, %r10
-; AVX2-NEXT: movq 176(%rsp,%rsi), %r14
-; AVX2-NEXT: movq 184(%rsp,%rsi), %r9
-; AVX2-NEXT: shldq %cl, %r14, %r9
-; AVX2-NEXT: movq 160(%rsp,%rsi), %r15
-; AVX2-NEXT: movq 168(%rsp,%rsi), %r13
-; AVX2-NEXT: movq %r13, %rbx
-; AVX2-NEXT: shldq %cl, %r15, %rbx
-; AVX2-NEXT: movq 128(%rsp,%rsi), %rbp
-; AVX2-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 136(%rsp,%rsi), %rax
-; AVX2-NEXT: shldq %cl, %rax, %r11
-; AVX2-NEXT: shldq %cl, %r13, %r14
-; AVX2-NEXT: shldq %cl, %r12, %r15
-; AVX2-NEXT: shldq %cl, %rbp, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movl %edx, %edx
+; AVX2-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX2-NEXT: movl %esi, %ecx
+; AVX2-NEXT: andl $63, %ecx
+; AVX2-NEXT: movl %esi, %r11d
+; AVX2-NEXT: shrl $3, %r11d
+; AVX2-NEXT: movl %r11d, %eax
+; AVX2-NEXT: andl $56, %eax
+; AVX2-NEXT: negl %eax
+; AVX2-NEXT: movslq %eax, %r10
+; AVX2-NEXT: movq 104(%rsp,%r10), %r15
+; AVX2-NEXT: movq 112(%rsp,%r10), %rax
+; AVX2-NEXT: movq %rax, %rsi
+; AVX2-NEXT: shldq %cl, %r15, %rsi
+; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq 120(%rsp,%r10), %rsi
+; AVX2-NEXT: movq %rsi, %r8
+; AVX2-NEXT: shldq %cl, %rax, %r8
+; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq 128(%rsp,%r10), %rax
+; AVX2-NEXT: movq %rax, %rbx
+; AVX2-NEXT: shldq %cl, %rsi, %rbx
+; AVX2-NEXT: movq 136(%rsp,%r10), %rsi
+; AVX2-NEXT: movq %rsi, %r14
+; AVX2-NEXT: shldq %cl, %rax, %r14
+; AVX2-NEXT: movq 144(%rsp,%r10), %rax
+; AVX2-NEXT: movq %rax, %r12
+; AVX2-NEXT: shldq %cl, %rsi, %r12
+; AVX2-NEXT: movq 96(%rsp,%r10), %rsi
+; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq 152(%rsp,%r10), %r13
+; AVX2-NEXT: shldq %cl, %rax, %r13
+; AVX2-NEXT: shldq %cl, %rsi, %r15
+; AVX2-NEXT: movl %edx, %eax
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vmovups %xmm1, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: movq %rdx, (%rsp)
+; AVX2-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: movq $0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: movq 16(%rdi), %r12
-; AVX2-NEXT: movq 48(%rdi), %rbp
-; AVX2-NEXT: movq 32(%rdi), %r13
-; AVX2-NEXT: andnq %r13, %r15, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq %r15, %r13
-; AVX2-NEXT: andnq %rbp, %r14, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq %r14, %rbp
-; AVX2-NEXT: andnq %r12, %r11, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq %r11, %r12
-; AVX2-NEXT: movq 40(%rdi), %rax
+; AVX2-NEXT: movq 16(%rsp,%r10), %rbp
+; AVX2-NEXT: movq 24(%rsp,%r10), %r9
+; AVX2-NEXT: shldq %cl, %rbp, %r9
+; AVX2-NEXT: movq 8(%rsp,%r10), %rdx
+; AVX2-NEXT: shldq %cl, %rdx, %rbp
+; AVX2-NEXT: movq (%rsp,%r10), %rax
+; AVX2-NEXT: shldq %cl, %rax, %rdx
+; AVX2-NEXT: movq -8(%rsp,%r10), %r8
+; AVX2-NEXT: shldq %cl, %r8, %rax
+; AVX2-NEXT: movq -16(%rsp,%r10), %rsi
+; AVX2-NEXT: shldq %cl, %rsi, %r8
+; AVX2-NEXT: andnq 56(%rdi), %r13, %r13
+; AVX2-NEXT: orq %r9, %r13
+; AVX2-NEXT: movq -24(%rsp,%r10), %r9
+; AVX2-NEXT: shldq %cl, %r9, %rsi
+; AVX2-NEXT: andnq 48(%rdi), %r12, %r12
+; AVX2-NEXT: andnq 40(%rdi), %r14, %r14
; AVX2-NEXT: orq %rbp, %r12
-; AVX2-NEXT: andnq %rax, %rbx, %rcx
-; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq %rax, %rbp
-; AVX2-NEXT: andq %rbx, %rbp
-; AVX2-NEXT: movq 56(%rdi), %rcx
-; AVX2-NEXT: andnq %rcx, %r9, %rbx
-; AVX2-NEXT: andq %r9, %rcx
-; AVX2-NEXT: movq 24(%rdi), %rax
-; AVX2-NEXT: andnq %rax, %r10, %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq %r10, %rax
-; AVX2-NEXT: orq %rcx, %rax
-; AVX2-NEXT: shlxq %r8, {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; AVX2-NEXT: movq (%rdi), %r10
-; AVX2-NEXT: andnq %r10, %rcx, %r15
-; AVX2-NEXT: andq %rcx, %r10
-; AVX2-NEXT: movq 40(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq 48(%rsp,%rsi), %r11
-; AVX2-NEXT: movq %r11, %r9
-; AVX2-NEXT: movl %r8d, %ecx
-; AVX2-NEXT: shldq %cl, %rdx, %r9
-; AVX2-NEXT: orq %r13, %r10
-; AVX2-NEXT: orq %r12, %r10
-; AVX2-NEXT: movq 8(%rdi), %r13
+; AVX2-NEXT: orq %rdx, %r14
+; AVX2-NEXT: andnq 32(%rdi), %rbx, %rdx
+; AVX2-NEXT: orq %rax, %rdx
+; AVX2-NEXT: shlxq %rcx, {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; AVX2-NEXT: movq -32(%rsp,%r10), %r10
+; AVX2-NEXT: shlxq %rcx, %r10, %rbx
+; AVX2-NEXT: # kill: def $cl killed $cl killed $rcx
+; AVX2-NEXT: shldq %cl, %r10, %r9
; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: andnq %r13, %rcx, %r12
-; AVX2-NEXT: andq %rcx, %r13
-; AVX2-NEXT: orq %rbp, %r13
-; AVX2-NEXT: orq %rax, %r13
-; AVX2-NEXT: movq 56(%rsp,%rsi), %rax
-; AVX2-NEXT: movl %r8d, %ecx
-; AVX2-NEXT: shldq %cl, %r11, %rax
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX2-NEXT: orq %r9, %r14
-; AVX2-NEXT: orq %rax, %rbx
-; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 24(%rsp,%rsi), %rax
-; AVX2-NEXT: movq 32(%rsp,%rsi), %r9
-; AVX2-NEXT: movq %r9, %r11
-; AVX2-NEXT: shldq %cl, %rax, %r11
-; AVX2-NEXT: shldq %cl, %r9, %rdx
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; AVX2-NEXT: orq %r11, %rbp
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX2-NEXT: orq %rdx, %rbx
-; AVX2-NEXT: movq 8(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq 16(%rsp,%rsi), %r9
-; AVX2-NEXT: movq %r9, %r11
-; AVX2-NEXT: shldq %cl, %rdx, %r11
-; AVX2-NEXT: shldq %cl, %r9, %rax
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; AVX2-NEXT: orq %r11, %r9
-; AVX2-NEXT: movq (%rsp,%rsi), %rsi
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; AVX2-NEXT: orq %rax, %r11
-; AVX2-NEXT: shlxq %r8, %rsi, %rax
-; AVX2-NEXT: shldq %cl, %rsi, %rdx
-; AVX2-NEXT: orq %rax, %r15
-; AVX2-NEXT: orq %rdx, %r12
-; AVX2-NEXT: orq %r10, %r13
-; AVX2-NEXT: movq %r14, 48(%rdi)
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: movq %rax, 56(%rdi)
-; AVX2-NEXT: movq %rbp, 32(%rdi)
-; AVX2-NEXT: movq %rbx, 40(%rdi)
-; AVX2-NEXT: movq %r9, 16(%rdi)
-; AVX2-NEXT: movq %r11, 24(%rdi)
-; AVX2-NEXT: movq %r15, (%rdi)
-; AVX2-NEXT: movq %r12, 8(%rdi)
-; AVX2-NEXT: sete %al
-; AVX2-NEXT: addq $200, %rsp
+; AVX2-NEXT: andnq 24(%rdi), %rcx, %rcx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX2-NEXT: andnq 16(%rdi), %r10, %r10
+; AVX2-NEXT: orq %r8, %rcx
+; AVX2-NEXT: orq %rsi, %r10
+; AVX2-NEXT: andnq 8(%rdi), %r15, %rsi
+; AVX2-NEXT: orq %r9, %rsi
+; AVX2-NEXT: andnq (%rdi), %rax, %rax
+; AVX2-NEXT: orq %rbx, %rax
+; AVX2-NEXT: andl $60, %r11d
+; AVX2-NEXT: movl (%rdi,%r11), %r8d
+; AVX2-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %r9d # 4-byte Reload
+; AVX2-NEXT: btl %r9d, %r8d
+; AVX2-NEXT: movq %r13, 56(%rdi)
+; AVX2-NEXT: movq %r12, 48(%rdi)
+; AVX2-NEXT: movq %r14, 40(%rdi)
+; AVX2-NEXT: movq %rdx, 32(%rdi)
+; AVX2-NEXT: movq %rcx, 24(%rdi)
+; AVX2-NEXT: movq %r10, 16(%rdi)
+; AVX2-NEXT: movq %rsi, 8(%rdi)
+; AVX2-NEXT: movq %rax, (%rdi)
+; AVX2-NEXT: setae %al
+; AVX2-NEXT: addq $168, %rsp
; AVX2-NEXT: popq %rbx
; AVX2-NEXT: popq %r12
; AVX2-NEXT: popq %r13
@@ -4121,131 +1527,100 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind {
; AVX512-NEXT: pushq %r13
; AVX512-NEXT: pushq %r12
; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: subq $184, %rsp
+; AVX512-NEXT: subq $152, %rsp
; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
; AVX512-NEXT: vmovaps {{.*#+}} xmm1 = [1,0,0,0]
; AVX512-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
; AVX512-NEXT: movl %esi, %ecx
; AVX512-NEXT: andl $63, %ecx
-; AVX512-NEXT: shrl $3, %esi
-; AVX512-NEXT: andl $56, %esi
-; AVX512-NEXT: negl %esi
-; AVX512-NEXT: movslq %esi, %rsi
-; AVX512-NEXT: movq 128(%rsp,%rsi), %r10
-; AVX512-NEXT: movq 136(%rsp,%rsi), %r12
-; AVX512-NEXT: movq %r12, %rax
-; AVX512-NEXT: shldq %cl, %r10, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 160(%rsp,%rsi), %r14
-; AVX512-NEXT: movq 168(%rsp,%rsi), %rax
-; AVX512-NEXT: shldq %cl, %r14, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 144(%rsp,%rsi), %r15
-; AVX512-NEXT: movq 152(%rsp,%rsi), %r11
-; AVX512-NEXT: movq %r11, %rbx
-; AVX512-NEXT: shldq %cl, %r15, %rbx
-; AVX512-NEXT: movq 120(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rax, %r10
-; AVX512-NEXT: shldq %cl, %r11, %r14
-; AVX512-NEXT: movq %rdi, %r9
-; AVX512-NEXT: movq 112(%rsp,%rsi), %r11
-; AVX512-NEXT: shldq %cl, %r12, %r15
-; AVX512-NEXT: movl %edx, %edx
+; AVX512-NEXT: movl %esi, %r8d
+; AVX512-NEXT: shrl $3, %r8d
+; AVX512-NEXT: movl %r8d, %eax
+; AVX512-NEXT: andl $56, %eax
+; AVX512-NEXT: negl %eax
+; AVX512-NEXT: movslq %eax, %r9
+; AVX512-NEXT: movq 88(%rsp,%r9), %r10
+; AVX512-NEXT: movq 96(%rsp,%r9), %rax
+; AVX512-NEXT: movq %rax, %rsi
+; AVX512-NEXT: shldq %cl, %r10, %rsi
+; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq 104(%rsp,%r9), %rsi
+; AVX512-NEXT: movq %rsi, %r11
+; AVX512-NEXT: shldq %cl, %rax, %r11
+; AVX512-NEXT: movq 112(%rsp,%r9), %rax
+; AVX512-NEXT: movq %rax, %rbx
+; AVX512-NEXT: shldq %cl, %rsi, %rbx
+; AVX512-NEXT: movq 120(%rsp,%r9), %rsi
+; AVX512-NEXT: movq %rsi, %r14
+; AVX512-NEXT: shldq %cl, %rax, %r14
+; AVX512-NEXT: movq 128(%rsp,%r9), %rax
+; AVX512-NEXT: movq %rax, %r12
+; AVX512-NEXT: shldq %cl, %rsi, %r12
+; AVX512-NEXT: movq 136(%rsp,%r9), %r13
+; AVX512-NEXT: shldq %cl, %rax, %r13
+; AVX512-NEXT: movq 80(%rsp,%r9), %r15
+; AVX512-NEXT: shldq %cl, %r15, %r10
+; AVX512-NEXT: movl %edx, %eax
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vmovups %xmm1, {{[0-9]+}}(%rsp)
+; AVX512-NEXT: vmovups %xmm1, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; AVX512-NEXT: movq %rdx, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; AVX512-NEXT: movq $0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: movq 16(%rdi), %r12
-; AVX512-NEXT: movq 48(%rdi), %r13
-; AVX512-NEXT: movq 32(%rdi), %rbp
-; AVX512-NEXT: andnq %rbp, %r15, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: andq %r15, %rbp
-; AVX512-NEXT: andnq %r13, %r14, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: andq %r14, %r13
-; AVX512-NEXT: andnq %r12, %r10, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: andq %r10, %r12
-; AVX512-NEXT: movq 40(%rdi), %r8
-; AVX512-NEXT: orq %r13, %r12
-; AVX512-NEXT: andnq %r8, %rbx, %rdi
-; AVX512-NEXT: andq %rbx, %r8
-; AVX512-NEXT: movq 56(%r9), %r13
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; AVX512-NEXT: andnq %r13, %rdx, %r10
-; AVX512-NEXT: andq %rdx, %r13
-; AVX512-NEXT: movq 24(%r9), %rax
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; AVX512-NEXT: andnq %rax, %rdx, %r15
-; AVX512-NEXT: andq %rdx, %rax
-; AVX512-NEXT: orq %r13, %rax
-; AVX512-NEXT: shlxq %rcx, %r11, %r13
-; AVX512-NEXT: movq (%r9), %rdx
-; AVX512-NEXT: andnq %rdx, %r13, %r14
-; AVX512-NEXT: andq %r13, %rdx
-; AVX512-NEXT: orq %rbp, %rdx
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r11, %rbp
-; AVX512-NEXT: orq %r12, %rdx
-; AVX512-NEXT: movq 8(%r9), %r13
-; AVX512-NEXT: andnq %r13, %rbp, %rbx
-; AVX512-NEXT: andq %rbp, %r13
-; AVX512-NEXT: orq %r8, %r13
-; AVX512-NEXT: movq 24(%rsp,%rsi), %r8
-; AVX512-NEXT: orq %rax, %r13
-; AVX512-NEXT: movq 32(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, %r12
-; AVX512-NEXT: shldq %cl, %r8, %r12
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; AVX512-NEXT: orq %r12, %r11
-; AVX512-NEXT: movq 40(%rsp,%rsi), %r12
-; AVX512-NEXT: shldq %cl, %rax, %r12
-; AVX512-NEXT: orq %r12, %r10
-; AVX512-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 8(%rsp,%rsi), %rax
-; AVX512-NEXT: movq 16(%rsp,%rsi), %r12
-; AVX512-NEXT: movq %r12, %rbp
-; AVX512-NEXT: shldq %cl, %rax, %rbp
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX512-NEXT: orq %rbp, %r10
-; AVX512-NEXT: shldq %cl, %r12, %r8
-; AVX512-NEXT: orq %r8, %rdi
-; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq -8(%rsp,%rsi), %r8
-; AVX512-NEXT: movq (%rsp,%rsi), %r12
-; AVX512-NEXT: movq %r12, %rbp
-; AVX512-NEXT: shldq %cl, %r8, %rbp
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; AVX512-NEXT: orq %rbp, %rdi
-; AVX512-NEXT: movq -16(%rsp,%rsi), %rsi
-; AVX512-NEXT: shldq %cl, %r12, %rax
+; AVX512-NEXT: movq (%rsp,%r9), %rbp
+; AVX512-NEXT: movq 8(%rsp,%r9), %rsi
+; AVX512-NEXT: shldq %cl, %rbp, %rsi
+; AVX512-NEXT: movq -8(%rsp,%r9), %rdx
+; AVX512-NEXT: shldq %cl, %rdx, %rbp
+; AVX512-NEXT: movq -16(%rsp,%r9), %rax
+; AVX512-NEXT: shldq %cl, %rax, %rdx
+; AVX512-NEXT: andnq 56(%rdi), %r13, %r13
+; AVX512-NEXT: andnq 48(%rdi), %r12, %r12
+; AVX512-NEXT: orq %rsi, %r13
+; AVX512-NEXT: orq %rbp, %r12
+; AVX512-NEXT: andnq 40(%rdi), %r14, %r14
+; AVX512-NEXT: orq %rdx, %r14
+; AVX512-NEXT: movq -24(%rsp,%r9), %rsi
+; AVX512-NEXT: shldq %cl, %rsi, %rax
+; AVX512-NEXT: andnq 32(%rdi), %rbx, %rdx
+; AVX512-NEXT: orq %rax, %rdx
+; AVX512-NEXT: movq -32(%rsp,%r9), %rax
+; AVX512-NEXT: shldq %cl, %rax, %rsi
+; AVX512-NEXT: shlxq %rcx, %r15, %rbx
+; AVX512-NEXT: andnq 24(%rdi), %r11, %r11
+; AVX512-NEXT: orq %rsi, %r11
+; AVX512-NEXT: movq -48(%rsp,%r9), %rsi
+; AVX512-NEXT: movq -40(%rsp,%r9), %r9
+; AVX512-NEXT: shldq %cl, %r9, %rax
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; AVX512-NEXT: andnq 16(%rdi), %r15, %r15
; AVX512-NEXT: orq %rax, %r15
; AVX512-NEXT: shlxq %rcx, %rsi, %rax
; AVX512-NEXT: # kill: def $cl killed $cl killed $rcx
-; AVX512-NEXT: shldq %cl, %rsi, %r8
-; AVX512-NEXT: orq %rax, %r14
-; AVX512-NEXT: orq %r8, %rbx
-; AVX512-NEXT: orq %rdx, %r13
-; AVX512-NEXT: movq %r11, 48(%r9)
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: movq %rax, 56(%r9)
-; AVX512-NEXT: movq %r10, 32(%r9)
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: movq %rax, 40(%r9)
-; AVX512-NEXT: movq %rdi, 16(%r9)
-; AVX512-NEXT: movq %r15, 24(%r9)
-; AVX512-NEXT: movq %r14, (%r9)
-; AVX512-NEXT: movq %rbx, 8(%r9)
-; AVX512-NEXT: sete %al
-; AVX512-NEXT: addq $184, %rsp
+; AVX512-NEXT: shldq %cl, %rsi, %r9
+; AVX512-NEXT: andnq 8(%rdi), %r10, %rcx
+; AVX512-NEXT: orq %r9, %rcx
+; AVX512-NEXT: andnq (%rdi), %rbx, %rsi
+; AVX512-NEXT: orq %rax, %rsi
+; AVX512-NEXT: andl $60, %r8d
+; AVX512-NEXT: movl (%rdi,%r8), %eax
+; AVX512-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 4-byte Reload
+; AVX512-NEXT: btl %r8d, %eax
+; AVX512-NEXT: movq %r13, 56(%rdi)
+; AVX512-NEXT: movq %r12, 48(%rdi)
+; AVX512-NEXT: movq %r14, 40(%rdi)
+; AVX512-NEXT: movq %rdx, 32(%rdi)
+; AVX512-NEXT: movq %r11, 24(%rdi)
+; AVX512-NEXT: movq %r15, 16(%rdi)
+; AVX512-NEXT: movq %rcx, 8(%rdi)
+; AVX512-NEXT: movq %rsi, (%rdi)
+; AVX512-NEXT: setae %al
+; AVX512-NEXT: addq $152, %rsp
; AVX512-NEXT: popq %rbx
; AVX512-NEXT: popq %r12
; AVX512-NEXT: popq %r13
@@ -4274,2749 +1649,25 @@ define i1 @init_eq_i512(ptr %word, i32 %position, i1 zeroext %value) nounwind {
define i1 @test_ne_i4096(ptr %word, i32 %position) nounwind {
; X86-LABEL: test_ne_i4096:
; X86: # %bb.0:
-; X86-NEXT: pushl %ebp
-; X86-NEXT: movl %esp, %ebp
-; X86-NEXT: pushl %ebx
-; X86-NEXT: pushl %edi
-; X86-NEXT: pushl %esi
-; X86-NEXT: andl $-16, %esp
-; X86-NEXT: subl $1792, %esp # imm = 0x700
-; X86-NEXT: movl 12(%ebp), %ebx
-; X86-NEXT: movl %ebx, %ecx
-; X86-NEXT: shrl $3, %ecx
-; X86-NEXT: andl $508, %ecx # imm = 0x1FC
-; X86-NEXT: leal {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: subl %ecx, %esi
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $1, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; X86-NEXT: movl 248(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 252(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: andl $31, %ebx
-; X86-NEXT: movl %ebx, %ecx
-; X86-NEXT: shldl %cl, %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 504(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 508(%esi), %edx
-; X86-NEXT: shldl %cl, %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 120(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 124(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 376(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 380(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 184(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 188(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 440(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 444(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 56(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 60(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 312(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 316(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 216(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 220(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 472(%esi), %edi
-; X86-NEXT: movl 476(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 88(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 92(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 344(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 348(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 152(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 156(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 408(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 412(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 24(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 28(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 280(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 284(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 232(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 236(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 488(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 492(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 104(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 108(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 360(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 364(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 168(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 172(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 424(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 428(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 40(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 44(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 296(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 300(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 200(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 204(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 456(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 460(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 72(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 76(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 328(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 332(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 136(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 140(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 392(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 396(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 12(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 264(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 268(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 240(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 244(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 496(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 500(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 112(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 116(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 368(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 372(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 176(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 180(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 432(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 436(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 48(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 52(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 304(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 308(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 208(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 212(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 464(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 468(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 80(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 84(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 336(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 340(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 144(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 148(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 400(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 404(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 16(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 20(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 272(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 276(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 224(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 228(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 480(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 484(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 96(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 100(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 352(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 356(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 160(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 164(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 416(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 420(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 32(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 36(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 288(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 292(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 192(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 196(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 448(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 452(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 64(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 68(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 320(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 324(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 128(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 132(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %edi, %edx
-; X86-NEXT: movl 256(%esi), %edi
-; X86-NEXT: movl 260(%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: shldl %cl, %edi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl 388(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl 4(%esi), %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: shldl %cl, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shrdl $1, %eax, %edi
-; X86-NEXT: shrl %eax
-; X86-NEXT: movl %ebx, %edx
-; X86-NEXT: movl %eax, %ebx
-; X86-NEXT: notb %cl
-; X86-NEXT: shrdl %cl, %eax, %edi
-; X86-NEXT: shrl %cl, %ebx
-; X86-NEXT: movb $32, %cl
-; X86-NEXT: testb %cl, %cl
-; X86-NEXT: movl (%esi), %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl 8(%ebp), %eax
-; X86-NEXT: jne .LBB20_2
-; X86-NEXT: # %bb.1:
-; X86-NEXT: movl %edi, %ebx
-; X86-NEXT: .LBB20_2:
-; X86-NEXT: movl %edx, %ecx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shll %cl, %edx
-; X86-NEXT: orl %ebx, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 320(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl 64(%eax), %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 448(%eax), %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 192(%eax), %ecx
-; X86-NEXT: orl %edx, %ecx
-; X86-NEXT: orl %esi, %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 288(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 32(%eax), %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 416(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: andl 160(%eax), %edi
-; X86-NEXT: orl %ecx, %edi
-; X86-NEXT: orl %edx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 352(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl 96(%eax), %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 480(%eax), %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 224(%eax), %ecx
-; X86-NEXT: orl %edx, %ecx
-; X86-NEXT: orl %esi, %ecx
-; X86-NEXT: orl %edi, %ecx
-; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 272(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 16(%eax), %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 400(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl 144(%eax), %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: orl %edx, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 336(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 80(%eax), %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 464(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: andl 208(%eax), %edi
-; X86-NEXT: orl %ecx, %edi
-; X86-NEXT: orl %edx, %edi
-; X86-NEXT: orl %esi, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 304(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 48(%eax), %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 432(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl 176(%eax), %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: orl %edx, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 368(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 112(%eax), %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 496(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: andl 240(%eax), %ebx
-; X86-NEXT: orl %ecx, %ebx
-; X86-NEXT: orl %edx, %ebx
-; X86-NEXT: orl %esi, %ebx
-; X86-NEXT: orl %edi, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 264(%eax), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 8(%eax), %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl %eax, %ebx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 392(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 136(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: orl %edx, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 328(%ebx), %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 72(%ebx), %eax
-; X86-NEXT: orl %edx, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: andl 456(%ebx), %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl 200(%ebx), %esi
-; X86-NEXT: orl %edi, %esi
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: andl 296(%ebx), %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 40(%ebx), %eax
-; X86-NEXT: orl %edi, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: andl 424(%ebx), %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 168(%ebx), %edx
-; X86-NEXT: orl %edi, %edx
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 360(%ebx), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 104(%ebx), %eax
-; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 488(%ebx), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: andl 232(%ebx), %edi
-; X86-NEXT: orl %ecx, %edi
-; X86-NEXT: orl %eax, %edi
-; X86-NEXT: orl %edx, %edi
-; X86-NEXT: orl %esi, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 280(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 24(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 408(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 152(%ebx), %edx
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 344(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 88(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 472(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl 216(%ebx), %esi
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: orl %edx, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 312(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 56(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 440(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 184(%ebx), %edx
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 376(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 120(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 504(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: andl 248(%ebx), %edi
-; X86-NEXT: orl %eax, %edi
-; X86-NEXT: orl %ecx, %edi
-; X86-NEXT: orl %edx, %edi
-; X86-NEXT: orl %esi, %edi
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 324(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 68(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 452(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 196(%ebx), %edx
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 292(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 36(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 420(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 164(%ebx), %edx
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 356(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 100(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 484(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl 228(%ebx), %esi
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: orl %edx, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 276(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 20(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 404(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 148(%ebx), %edx
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 340(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 84(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 468(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl 212(%ebx), %esi
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: orl %edx, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 308(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 52(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 436(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 180(%ebx), %edx
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 372(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 116(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 500(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: andl 244(%ebx), %edi
-; X86-NEXT: orl %eax, %edi
-; X86-NEXT: orl %ecx, %edi
-; X86-NEXT: orl %edx, %edi
-; X86-NEXT: orl %esi, %edi
-; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 268(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 12(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 396(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 140(%ebx), %edx
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 332(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 76(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 460(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: andl 204(%ebx), %edi
-; X86-NEXT: orl %eax, %edi
-; X86-NEXT: orl %ecx, %edi
-; X86-NEXT: orl %edx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 300(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 44(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 428(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 172(%ebx), %edx
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 364(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 108(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 492(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: andl 236(%ebx), %esi
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: orl %edx, %esi
-; X86-NEXT: orl %edi, %esi
-; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 284(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 28(%ebx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 412(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: andl 156(%ebx), %edi
-; X86-NEXT: orl %eax, %edi
-; X86-NEXT: orl %ecx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 348(%ebx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 92(%ebx), %edx
-; X86-NEXT: orl %eax, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 476(%ebx), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 220(%ebx), %eax
-; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: orl %edx, %eax
-; X86-NEXT: orl %edi, %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 316(%ebx), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 60(%ebx), %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 444(%ebx), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT: andl 188(%ebx), %edi
-; X86-NEXT: orl %ecx, %edi
-; X86-NEXT: orl %edx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 380(%ebx), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: andl 124(%ebx), %edx
-; X86-NEXT: orl %ecx, %edx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 508(%ebx), %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT: movl 8(%ebp), %esi
-; X86-NEXT: andl 252(%esi), %ebx
-; X86-NEXT: orl %ecx, %ebx
-; X86-NEXT: orl %edx, %ebx
-; X86-NEXT: orl %edi, %ebx
-; X86-NEXT: orl %eax, %ebx
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: negl %ecx
-; X86-NEXT: movl 1648(%esp,%ecx), %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT: shldl %cl, %edi, %esi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: shldl %cl, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT: shldl %cl, %edx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movl 8(%ebp), %edx
-; X86-NEXT: andl 128(%edx), %ecx
-; X86-NEXT: andl 384(%edx), %edi
-; X86-NEXT: orl %ecx, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; X86-NEXT: shll %cl, %eax
-; X86-NEXT: andl (%edx), %eax
-; X86-NEXT: orl %eax, %edi
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 256(%edx), %eax
-; X86-NEXT: orl %eax, %edi
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 260(%edx), %eax
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: andl 4(%edx), %ecx
-; X86-NEXT: orl %eax, %ecx
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT: andl 132(%edx), %eax
-; X86-NEXT: andl 388(%edx), %esi
-; X86-NEXT: orl %eax, %esi
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-NEXT: orl %ebx, %esi
-; X86-NEXT: orl %edi, %esi
-; X86-NEXT: setne %al
-; X86-NEXT: leal -12(%ebp), %esp
-; X86-NEXT: popl %esi
-; X86-NEXT: popl %edi
-; X86-NEXT: popl %ebx
-; X86-NEXT: popl %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: andl $4064, %edx # imm = 0xFE0
+; X86-NEXT: shrl $3, %edx
+; X86-NEXT: movl (%eax,%edx), %eax
+; X86-NEXT: btl %ecx, %eax
+; X86-NEXT: setb %al
; X86-NEXT: retl
;
-; SSE-LABEL: test_ne_i4096:
-; SSE: # %bb.0:
-; SSE-NEXT: pushq %rbp
-; SSE-NEXT: pushq %r15
-; SSE-NEXT: pushq %r14
-; SSE-NEXT: pushq %r13
-; SSE-NEXT: pushq %r12
-; SSE-NEXT: pushq %rbx
-; SSE-NEXT: subq $1576, %rsp # imm = 0x628
-; SSE-NEXT: movl %esi, %ecx
-; SSE-NEXT: movl %esi, %eax
-; SSE-NEXT: andl $4032, %eax # imm = 0xFC0
-; SSE-NEXT: xorps %xmm0, %xmm0
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movups %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movq $0, {{[0-9]+}}(%rsp)
-; SSE-NEXT: movq $1, {{[0-9]+}}(%rsp)
-; SSE-NEXT: andl $63, %ecx
-; SSE-NEXT: shrl $3, %eax
-; SSE-NEXT: negl %eax
-; SSE-NEXT: movslq %eax, %rsi
-; SSE-NEXT: movq 1296(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1304(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1552(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1560(%rsp,%rsi), %rax
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1168(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1176(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1424(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1432(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1232(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1240(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1488(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1496(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1104(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1112(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1360(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, (%rsp) # 8-byte Spill
-; SSE-NEXT: movq 1368(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1264(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1272(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1520(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1528(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1136(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1144(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1392(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1400(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1200(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1208(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1456(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1464(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1072(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1080(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1328(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1336(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1280(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1288(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1536(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1544(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1152(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1160(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1408(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1416(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1216(%rsp,%rsi), %r11
-; SSE-NEXT: movq 1224(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %r11, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1472(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1480(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1088(%rsp,%rsi), %r9
-; SSE-NEXT: movq 1096(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %r9, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1344(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1352(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1248(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1256(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rax, %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1504(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1512(%rsp,%rsi), %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rdx, %rax
-; SSE-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1120(%rsp,%rsi), %rax
-; SSE-NEXT: movq 1128(%rsp,%rsi), %r8
-; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: shldq %cl, %rax, %r8
-; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1376(%rsp,%rsi), %r13
-; SSE-NEXT: movq 1384(%rsp,%rsi), %rbx
-; SSE-NEXT: movq %rbx, %r8
-; SSE-NEXT: shldq %cl, %r13, %r8
-; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1184(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1192(%rsp,%rsi), %r15
-; SSE-NEXT: movq %r15, %r14
-; SSE-NEXT: shldq %cl, %rdx, %r14
-; SSE-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1440(%rsp,%rsi), %r10
-; SSE-NEXT: movq 1448(%rsp,%rsi), %rdx
-; SSE-NEXT: movq %rdx, %r14
-; SSE-NEXT: shldq %cl, %r10, %r14
-; SSE-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1312(%rsp,%rsi), %r14
-; SSE-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq 1320(%rsp,%rsi), %rbp
-; SSE-NEXT: movq %rbp, %r12
-; SSE-NEXT: shldq %cl, %r14, %r12
-; SSE-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r8, (%rsp) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq 1064(%rsp,%rsi), %rbx
-; SSE-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %rbp, %r14
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE-NEXT: shldq %cl, %rdx, %r11
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r15, %rdx
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r15, %r9
-; SSE-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r15, %r8
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r15, %rbp
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r15, %r9
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r15, %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r15, %r13
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r12, %r15
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
-; SSE-NEXT: shldq %cl, %r12, %r10
-; SSE-NEXT: andq 384(%rdi), %r10
-; SSE-NEXT: andq 128(%rdi), %r15
-; SSE-NEXT: andq 320(%rdi), %r13
-; SSE-NEXT: andq 64(%rdi), %rax
-; SSE-NEXT: orq %r10, %r15
-; SSE-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: orq %r13, %rax
-; SSE-NEXT: andq 448(%rdi), %r9
-; SSE-NEXT: andq 192(%rdi), %rbp
-; SSE-NEXT: orq %r9, %rbp
-; SSE-NEXT: orq %rax, %rbp
-; SSE-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: andq 288(%rdi), %r8
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; SSE-NEXT: andq 32(%rdi), %r9
-; SSE-NEXT: andq 416(%rdi), %rdx
-; SSE-NEXT: andq 160(%rdi), %r11
-; SSE-NEXT: orq %r8, %r9
-; SSE-NEXT: orq %rdx, %r11
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE-NEXT: andq 352(%rdi), %rdx
-; SSE-NEXT: orq %r9, %r11
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 96(%rdi), %rax
-; SSE-NEXT: orq %rdx, %rax
-; SSE-NEXT: movq %rax, %rdx
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 480(%rdi), %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: andq 224(%rdi), %r8
-; SSE-NEXT: orq %rax, %r8
-; SSE-NEXT: orq %rdx, %r8
-; SSE-NEXT: andq 272(%rdi), %r14
-; SSE-NEXT: orq %r11, %r8
-; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 16(%rdi), %rax
-; SSE-NEXT: orq %r14, %rax
-; SSE-NEXT: movq %rax, %r8
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE-NEXT: andq 400(%rdi), %rdx
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 144(%rdi), %rax
-; SSE-NEXT: orq %rdx, %rax
-; SSE-NEXT: orq %r8, %rax
-; SSE-NEXT: movq %rax, %r8
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; SSE-NEXT: andq 336(%rdi), %r9
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 80(%rdi), %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE-NEXT: andq 464(%rdi), %rdx
-; SSE-NEXT: orq %r9, %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; SSE-NEXT: andq 208(%rdi), %r11
-; SSE-NEXT: orq %rdx, %r11
-; SSE-NEXT: orq %rax, %r11
-; SSE-NEXT: orq %r8, %r11
-; SSE-NEXT: movq (%rsp), %rdx # 8-byte Reload
-; SSE-NEXT: andq 304(%rdi), %rdx
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 48(%rdi), %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; SSE-NEXT: andq 432(%rdi), %r9
-; SSE-NEXT: orq %rdx, %rax
-; SSE-NEXT: movq %rax, %r10
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: andq 176(%rdi), %r8
-; SSE-NEXT: orq %r9, %r8
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; SSE-NEXT: andq 368(%rdi), %r9
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 112(%rdi), %rax
-; SSE-NEXT: orq %r10, %r8
-; SSE-NEXT: movq %r8, %r10
-; SSE-NEXT: orq %r9, %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: andq 496(%rdi), %r8
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; SSE-NEXT: andq 240(%rdi), %rbp
-; SSE-NEXT: orq %r8, %rbp
-; SSE-NEXT: orq %rax, %rbp
-; SSE-NEXT: orq %r10, %rbp
-; SSE-NEXT: orq %r11, %rbp
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 392(%rdi), %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
-; SSE-NEXT: andq 136(%rdi), %r12
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE-NEXT: andq 328(%rdi), %rdx
-; SSE-NEXT: orq %rax, %r12
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 72(%rdi), %rax
-; SSE-NEXT: orq %rdx, %rax
-; SSE-NEXT: movq %rax, %rdx
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 456(%rdi), %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; SSE-NEXT: andq 200(%rdi), %r13
-; SSE-NEXT: orq %rax, %r13
-; SSE-NEXT: orq %rdx, %r13
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE-NEXT: andq 296(%rdi), %rdx
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 40(%rdi), %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: andq 424(%rdi), %r8
-; SSE-NEXT: orq %rdx, %rax
-; SSE-NEXT: movq %rax, %r9
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE-NEXT: andq 168(%rdi), %rdx
-; SSE-NEXT: orq %r8, %rdx
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: andq 360(%rdi), %r8
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 104(%rdi), %rax
-; SSE-NEXT: orq %r9, %rdx
-; SSE-NEXT: orq %r8, %rax
-; SSE-NEXT: movq %rax, %r8
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 488(%rdi), %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE-NEXT: andq 232(%rdi), %r15
-; SSE-NEXT: orq %rax, %r15
-; SSE-NEXT: orq %r8, %r15
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: andq 280(%rdi), %r8
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 24(%rdi), %rax
-; SSE-NEXT: orq %rdx, %r15
-; SSE-NEXT: orq %r8, %rax
-; SSE-NEXT: movq %rax, %r10
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: andq 408(%rdi), %r8
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 152(%rdi), %rax
-; SSE-NEXT: orq %r8, %rax
-; SSE-NEXT: orq %r10, %rax
-; SSE-NEXT: movq %rax, %r10
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; SSE-NEXT: andq 344(%rdi), %r11
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: andq 88(%rdi), %r8
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 472(%rdi), %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; SSE-NEXT: andq 216(%rdi), %r14
-; SSE-NEXT: orq %r11, %r8
-; SSE-NEXT: orq %rax, %r14
-; SSE-NEXT: orq %r8, %r14
-; SSE-NEXT: orq %r10, %r14
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; SSE-NEXT: andq 312(%rdi), %r11
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; SSE-NEXT: andq 56(%rdi), %r10
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: andq 440(%rdi), %r8
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; SSE-NEXT: andq 184(%rdi), %r9
-; SSE-NEXT: orq %r11, %r10
-; SSE-NEXT: orq %r8, %r9
-; SSE-NEXT: orq %r10, %r9
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE-NEXT: shldq %cl, %rax, %rdx
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; SSE-NEXT: andq 376(%rdi), %r10
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE-NEXT: andq 120(%rdi), %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; SSE-NEXT: andq 504(%rdi), %r11
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE-NEXT: andq 248(%rdi), %r8
-; SSE-NEXT: orq %r10, %rax
-; SSE-NEXT: movq %rax, %r10
-; SSE-NEXT: orq %r11, %r8
-; SSE-NEXT: movq 1056(%rsp,%rsi), %rax
-; SSE-NEXT: shldq %cl, %rax, %rbx
-; SSE-NEXT: # kill: def $cl killed $cl killed $ecx
-; SSE-NEXT: shlq %cl, %rax
-; SSE-NEXT: orq %r10, %r8
-; SSE-NEXT: orq %r9, %r8
-; SSE-NEXT: andq 256(%rdi), %rdx
-; SSE-NEXT: orq %r14, %r8
-; SSE-NEXT: andq (%rdi), %rax
-; SSE-NEXT: orq %rdx, %rax
-; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
-; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
-; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
-; SSE-NEXT: orq %rbp, %rax
-; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE-NEXT: andq 264(%rdi), %rcx
-; SSE-NEXT: andq 8(%rdi), %rbx
-; SSE-NEXT: orq %rcx, %rbx
-; SSE-NEXT: orq %r12, %rbx
-; SSE-NEXT: orq %r13, %rbx
-; SSE-NEXT: orq %r15, %rbx
-; SSE-NEXT: orq %r8, %rbx
-; SSE-NEXT: orq %rax, %rbx
-; SSE-NEXT: setne %al
-; SSE-NEXT: addq $1576, %rsp # imm = 0x628
-; SSE-NEXT: popq %rbx
-; SSE-NEXT: popq %r12
-; SSE-NEXT: popq %r13
-; SSE-NEXT: popq %r14
-; SSE-NEXT: popq %r15
-; SSE-NEXT: popq %rbp
-; SSE-NEXT: retq
-;
-; AVX2-LABEL: test_ne_i4096:
-; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: subq $1560, %rsp # imm = 0x618
-; AVX2-NEXT: movl %esi, %ecx
-; AVX2-NEXT: movl %esi, %eax
-; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: andl $4032, %eax # imm = 0xFC0
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: vmovss {{.*#+}} xmm0 = [1,0,0,0]
-; AVX2-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT: andl $63, %ecx
-; AVX2-NEXT: shrl $3, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: movslq %eax, %rsi
-; AVX2-NEXT: movq 1280(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1288(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1536(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1544(%rsp,%rsi), %rax
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1152(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1160(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1408(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1416(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1216(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, (%rsp) # 8-byte Spill
-; AVX2-NEXT: movq 1224(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1472(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1480(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1088(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1096(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1344(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1352(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1248(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1256(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1504(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1512(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1120(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1128(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1376(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1384(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1184(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1192(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1440(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1448(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1056(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1064(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1312(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1320(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1264(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1272(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1520(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1528(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1136(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1144(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1392(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1400(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1200(%rsp,%rsi), %r11
-; AVX2-NEXT: movq 1208(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %r11, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1456(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1464(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1072(%rsp,%rsi), %r12
-; AVX2-NEXT: movq 1080(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %r12, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1328(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1336(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rdx, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1232(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1240(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rax, %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1488(%rsp,%rsi), %rbp
-; AVX2-NEXT: movq 1496(%rsp,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rbp, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1104(%rsp,%rsi), %rax
-; AVX2-NEXT: movq 1112(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: shldq %cl, %rax, %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1360(%rsp,%rsi), %r10
-; AVX2-NEXT: movq 1368(%rsp,%rsi), %r8
-; AVX2-NEXT: movq %r8, %rdx
-; AVX2-NEXT: shldq %cl, %r10, %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1168(%rsp,%rsi), %r9
-; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1176(%rsp,%rsi), %rbx
-; AVX2-NEXT: movq %rbx, %rdx
-; AVX2-NEXT: shldq %cl, %r9, %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1424(%rsp,%rsi), %r9
-; AVX2-NEXT: movq 1432(%rsp,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, %r14
-; AVX2-NEXT: shldq %cl, %r9, %r14
-; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1296(%rsp,%rsi), %r15
-; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq 1304(%rsp,%rsi), %r14
-; AVX2-NEXT: movq %r14, %r13
-; AVX2-NEXT: shldq %cl, %r15, %r13
-; AVX2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, (%rsp) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq 1048(%rsp,%rsi), %rdx
-; AVX2-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: shldq %cl, %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r14, %rbx
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r14, %r11
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r14, %r12
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r14, %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r14, %r13
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r14, %rbp
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r14, %rax
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r14, %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, %r14
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r15, %r9
-; AVX2-NEXT: andq 384(%rdi), %r9
-; AVX2-NEXT: andq 128(%rdi), %r14
-; AVX2-NEXT: andq 320(%rdi), %r10
-; AVX2-NEXT: orq %r9, %r14
-; AVX2-NEXT: movq %r14, %r15
-; AVX2-NEXT: andq 64(%rdi), %rax
-; AVX2-NEXT: orq %r10, %rax
-; AVX2-NEXT: andq 448(%rdi), %rbp
-; AVX2-NEXT: andq 192(%rdi), %r13
-; AVX2-NEXT: orq %rbp, %r13
-; AVX2-NEXT: orq %rax, %r13
-; AVX2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: andq 288(%rdi), %r8
-; AVX2-NEXT: andq 32(%rdi), %r12
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 416(%rdi), %rax
-; AVX2-NEXT: orq %r8, %r12
-; AVX2-NEXT: andq 160(%rdi), %r11
-; AVX2-NEXT: orq %rax, %r11
-; AVX2-NEXT: andq 352(%rdi), %rbx
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 96(%rdi), %rax
-; AVX2-NEXT: orq %r12, %r11
-; AVX2-NEXT: orq %rbx, %rax
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX2-NEXT: andq 480(%rdi), %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX2-NEXT: andq 224(%rdi), %r13
-; AVX2-NEXT: orq %r10, %r13
-; AVX2-NEXT: orq %rax, %r13
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 272(%rdi), %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 16(%rdi), %rax
-; AVX2-NEXT: orq %r11, %r13
-; AVX2-NEXT: orq %r8, %rax
-; AVX2-NEXT: movq %rax, %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; AVX2-NEXT: andq 400(%rdi), %r9
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 144(%rdi), %rax
-; AVX2-NEXT: orq %r9, %rax
-; AVX2-NEXT: orq %r8, %rax
-; AVX2-NEXT: movq %rax, %r9
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX2-NEXT: andq 336(%rdi), %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 80(%rdi), %rax
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 464(%rdi), %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; AVX2-NEXT: andq 208(%rdi), %r11
-; AVX2-NEXT: orq %r10, %rax
-; AVX2-NEXT: orq %r8, %r11
-; AVX2-NEXT: orq %rax, %r11
-; AVX2-NEXT: orq %r9, %r11
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; AVX2-NEXT: andq 304(%rdi), %r9
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 48(%rdi), %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX2-NEXT: andq 432(%rdi), %r10
-; AVX2-NEXT: movq (%rsp), %rax # 8-byte Reload
-; AVX2-NEXT: andq 176(%rdi), %rax
-; AVX2-NEXT: orq %r9, %r8
-; AVX2-NEXT: movq %r8, %r9
-; AVX2-NEXT: orq %r10, %rax
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 368(%rdi), %r8
-; AVX2-NEXT: orq %r9, %rax
-; AVX2-NEXT: movq %rax, %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 112(%rdi), %rax
-; AVX2-NEXT: orq %r8, %rax
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 496(%rdi), %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; AVX2-NEXT: andq 240(%rdi), %r9
-; AVX2-NEXT: orq %r8, %r9
-; AVX2-NEXT: orq %rax, %r9
-; AVX2-NEXT: orq %r10, %r9
-; AVX2-NEXT: orq %r11, %r9
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX2-NEXT: andq 392(%rdi), %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; AVX2-NEXT: andq 136(%rdi), %rbp
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 328(%rdi), %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 72(%rdi), %rax
-; AVX2-NEXT: orq %r10, %rbp
-; AVX2-NEXT: orq %r8, %rax
-; AVX2-NEXT: movq %rax, %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 456(%rdi), %rax
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
-; AVX2-NEXT: andq 200(%rdi), %r12
-; AVX2-NEXT: orq %rax, %r12
-; AVX2-NEXT: orq %r8, %r12
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX2-NEXT: andq 296(%rdi), %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 40(%rdi), %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; AVX2-NEXT: andq 424(%rdi), %r11
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 168(%rdi), %rax
-; AVX2-NEXT: orq %r10, %r8
-; AVX2-NEXT: movq %r8, %r10
-; AVX2-NEXT: orq %r11, %rax
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 360(%rdi), %r8
-; AVX2-NEXT: orq %r10, %rax
-; AVX2-NEXT: movq %rax, %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 104(%rdi), %rax
-; AVX2-NEXT: orq %r8, %rax
-; AVX2-NEXT: movq %rax, %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 488(%rdi), %rax
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX2-NEXT: andq 232(%rdi), %r14
-; AVX2-NEXT: orq %rax, %r14
-; AVX2-NEXT: orq %r8, %r14
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 280(%rdi), %r8
-; AVX2-NEXT: orq %r10, %r14
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 24(%rdi), %rax
-; AVX2-NEXT: orq %r8, %rax
-; AVX2-NEXT: movq %rax, %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 408(%rdi), %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 152(%rdi), %rax
-; AVX2-NEXT: orq %r8, %rax
-; AVX2-NEXT: orq %r10, %rax
-; AVX2-NEXT: movq %rax, %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; AVX2-NEXT: andq 344(%rdi), %r11
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 88(%rdi), %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 472(%rdi), %rax
-; AVX2-NEXT: orq %r11, %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX2-NEXT: andq 216(%rdi), %rbx
-; AVX2-NEXT: orq %rax, %rbx
-; AVX2-NEXT: orq %r8, %rbx
-; AVX2-NEXT: orq %r10, %rbx
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 312(%rdi), %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 56(%rdi), %rax
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX2-NEXT: andq 440(%rdi), %r10
-; AVX2-NEXT: orq %r8, %rax
-; AVX2-NEXT: movq %rax, %r11
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 184(%rdi), %r8
-; AVX2-NEXT: orq %r10, %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX2-NEXT: andq 376(%rdi), %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 120(%rdi), %rax
-; AVX2-NEXT: orq %r11, %r8
-; AVX2-NEXT: movq %r8, %r11
-; AVX2-NEXT: orq %r10, %rax
-; AVX2-NEXT: movq %rax, %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: andq 504(%rdi), %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: andq 248(%rdi), %rax
-; AVX2-NEXT: orq %r8, %rax
-; AVX2-NEXT: orq %r10, %rax
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX2-NEXT: shldq %cl, %r8, %r10
-; AVX2-NEXT: orq %r11, %rax
-; AVX2-NEXT: movq 1040(%rsp,%rsi), %rsi
-; AVX2-NEXT: orq %rbx, %rax
-; AVX2-NEXT: movq %rax, %r8
-; AVX2-NEXT: shlxq %rcx, %rsi, %rax
-; AVX2-NEXT: andq 256(%rdi), %r10
-; AVX2-NEXT: andq (%rdi), %rax
-; AVX2-NEXT: orq %r10, %rax
-; AVX2-NEXT: orq %r15, %rax
-; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
-; AVX2-NEXT: orq %r13, %rax
-; AVX2-NEXT: # kill: def $cl killed $cl killed $rcx
-; AVX2-NEXT: shldq %cl, %rsi, %rdx
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: andq 264(%rdi), %rcx
-; AVX2-NEXT: andq 8(%rdi), %rdx
-; AVX2-NEXT: orq %r9, %rax
-; AVX2-NEXT: orq %rcx, %rdx
-; AVX2-NEXT: orq %rbp, %rdx
-; AVX2-NEXT: orq %r12, %rdx
-; AVX2-NEXT: orq %r14, %rdx
-; AVX2-NEXT: orq %r8, %rdx
-; AVX2-NEXT: orq %rax, %rdx
-; AVX2-NEXT: setne %al
-; AVX2-NEXT: addq $1560, %rsp # imm = 0x618
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: test_ne_i4096:
-; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
-; AVX512-NEXT: pushq %r15
-; AVX512-NEXT: pushq %r14
-; AVX512-NEXT: pushq %r13
-; AVX512-NEXT: pushq %r12
-; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: subq $1560, %rsp # imm = 0x618
-; AVX512-NEXT: movl %esi, %ecx
-; AVX512-NEXT: movl %esi, %eax
-; AVX512-NEXT: andl $4032, %eax # imm = 0xFC0
-; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: vmovaps {{.*#+}} xmm0 = [1,0,0,0]
-; AVX512-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp)
-; AVX512-NEXT: andl $63, %ecx
-; AVX512-NEXT: shrl $3, %eax
-; AVX512-NEXT: negl %eax
-; AVX512-NEXT: movslq %eax, %rsi
-; AVX512-NEXT: movq 1280(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1288(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1536(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1544(%rsp,%rsi), %rax
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1152(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1160(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1408(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1416(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1216(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, (%rsp) # 8-byte Spill
-; AVX512-NEXT: movq 1224(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1472(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1480(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1088(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1096(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1344(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1352(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1248(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1256(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1504(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1512(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1120(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1128(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1376(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1384(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1184(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1192(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1440(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1448(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1056(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1064(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1312(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1320(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1264(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1272(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1520(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1528(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1136(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1144(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1392(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1400(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1200(%rsp,%rsi), %r10
-; AVX512-NEXT: movq 1208(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %r10, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1456(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1464(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1072(%rsp,%rsi), %r14
-; AVX512-NEXT: movq 1080(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %r14, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1328(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1336(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rdx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1232(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1240(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rax, %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1488(%rsp,%rsi), %r12
-; AVX512-NEXT: movq 1496(%rsp,%rsi), %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %r12, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1104(%rsp,%rsi), %rax
-; AVX512-NEXT: movq 1112(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: shldq %cl, %rax, %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1360(%rsp,%rsi), %r11
-; AVX512-NEXT: movq 1368(%rsp,%rsi), %rbx
-; AVX512-NEXT: movq %rbx, %rdx
-; AVX512-NEXT: shldq %cl, %r11, %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1168(%rsp,%rsi), %r9
-; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1176(%rsp,%rsi), %r8
-; AVX512-NEXT: movq %r8, %rdx
-; AVX512-NEXT: shldq %cl, %r9, %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1424(%rsp,%rsi), %r9
-; AVX512-NEXT: movq 1432(%rsp,%rsi), %rdx
-; AVX512-NEXT: movq %rdx, %r15
-; AVX512-NEXT: shldq %cl, %r9, %r15
-; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1296(%rsp,%rsi), %rbp
-; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq 1304(%rsp,%rsi), %r15
-; AVX512-NEXT: movq %r15, %r13
-; AVX512-NEXT: shldq %cl, %rbp, %r13
-; AVX512-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r13, (%rsp) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: shldq %cl, %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq 1048(%rsp,%rsi), %rdx
-; AVX512-NEXT: shldq %cl, %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r15, %rbx
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r15, %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r15, %r14
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r15, %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r15, %r13
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r15, %r12
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r15, %rax
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %r15, %r11
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %rbp, %r15
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %rbp, %r9
-; AVX512-NEXT: andq 384(%rdi), %r9
-; AVX512-NEXT: andq 128(%rdi), %r15
-; AVX512-NEXT: orq %r9, %r15
-; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: andq 320(%rdi), %r11
-; AVX512-NEXT: andq 64(%rdi), %rax
-; AVX512-NEXT: orq %r11, %rax
-; AVX512-NEXT: andq 448(%rdi), %r12
-; AVX512-NEXT: andq 192(%rdi), %r13
-; AVX512-NEXT: orq %r12, %r13
-; AVX512-NEXT: orq %rax, %r13
-; AVX512-NEXT: andq 288(%rdi), %r8
-; AVX512-NEXT: andq 32(%rdi), %r14
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 416(%rdi), %rax
-; AVX512-NEXT: orq %r8, %r14
-; AVX512-NEXT: andq 160(%rdi), %r10
-; AVX512-NEXT: orq %rax, %r10
-; AVX512-NEXT: andq 352(%rdi), %rbx
-; AVX512-NEXT: orq %r14, %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 96(%rdi), %rax
-; AVX512-NEXT: orq %rbx, %rax
-; AVX512-NEXT: movq %rax, %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 480(%rdi), %rax
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT: andq 224(%rdi), %r15
-; AVX512-NEXT: orq %rax, %r15
-; AVX512-NEXT: orq %r8, %r15
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 272(%rdi), %r8
-; AVX512-NEXT: orq %r10, %r15
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 16(%rdi), %rax
-; AVX512-NEXT: orq %r8, %rax
-; AVX512-NEXT: movq %rax, %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; AVX512-NEXT: andq 400(%rdi), %r9
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 144(%rdi), %rax
-; AVX512-NEXT: orq %r9, %rax
-; AVX512-NEXT: orq %r8, %rax
-; AVX512-NEXT: movq %rax, %r9
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX512-NEXT: andq 336(%rdi), %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 80(%rdi), %rax
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 464(%rdi), %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; AVX512-NEXT: andq 208(%rdi), %r11
-; AVX512-NEXT: orq %r10, %rax
-; AVX512-NEXT: orq %r8, %r11
-; AVX512-NEXT: orq %rax, %r11
-; AVX512-NEXT: orq %r9, %r11
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX512-NEXT: andq 304(%rdi), %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 48(%rdi), %rax
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; AVX512-NEXT: andq 432(%rdi), %r9
-; AVX512-NEXT: movq (%rsp), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 176(%rdi), %r8
-; AVX512-NEXT: orq %r10, %rax
-; AVX512-NEXT: movq %rax, %r10
-; AVX512-NEXT: orq %r9, %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; AVX512-NEXT: andq 368(%rdi), %r9
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 112(%rdi), %rax
-; AVX512-NEXT: orq %r10, %r8
-; AVX512-NEXT: movq %r8, %r10
-; AVX512-NEXT: orq %r9, %rax
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 496(%rdi), %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; AVX512-NEXT: andq 240(%rdi), %r9
-; AVX512-NEXT: orq %r8, %r9
-; AVX512-NEXT: orq %rax, %r9
-; AVX512-NEXT: orq %r10, %r9
-; AVX512-NEXT: orq %r11, %r9
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX512-NEXT: andq 392(%rdi), %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; AVX512-NEXT: andq 136(%rdi), %rbp
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 328(%rdi), %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 72(%rdi), %rax
-; AVX512-NEXT: orq %r10, %rbp
-; AVX512-NEXT: orq %r8, %rax
-; AVX512-NEXT: movq %rax, %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 456(%rdi), %rax
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
-; AVX512-NEXT: andq 200(%rdi), %r12
-; AVX512-NEXT: orq %rax, %r12
-; AVX512-NEXT: orq %r8, %r12
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 296(%rdi), %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 40(%rdi), %rax
-; AVX512-NEXT: orq %r8, %rax
-; AVX512-NEXT: movq %rax, %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 424(%rdi), %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 168(%rdi), %rax
-; AVX512-NEXT: orq %r8, %rax
-; AVX512-NEXT: orq %r10, %rax
-; AVX512-NEXT: movq %rax, %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 360(%rdi), %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 104(%rdi), %rax
-; AVX512-NEXT: orq %r8, %rax
-; AVX512-NEXT: movq %rax, %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 488(%rdi), %rax
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX512-NEXT: andq 232(%rdi), %r14
-; AVX512-NEXT: orq %rax, %r14
-; AVX512-NEXT: orq %r8, %r14
-; AVX512-NEXT: orq %r10, %r14
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 280(%rdi), %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 24(%rdi), %rax
-; AVX512-NEXT: orq %r8, %rax
-; AVX512-NEXT: movq %rax, %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 408(%rdi), %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 152(%rdi), %rax
-; AVX512-NEXT: orq %r8, %rax
-; AVX512-NEXT: orq %r10, %rax
-; AVX512-NEXT: movq %rax, %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; AVX512-NEXT: andq 344(%rdi), %r11
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 88(%rdi), %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 472(%rdi), %rax
-; AVX512-NEXT: orq %r11, %r8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX512-NEXT: andq 216(%rdi), %rbx
-; AVX512-NEXT: orq %rax, %rbx
-; AVX512-NEXT: orq %r8, %rbx
-; AVX512-NEXT: orq %r10, %rbx
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX512-NEXT: andq 312(%rdi), %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 56(%rdi), %rax
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 440(%rdi), %r8
-; AVX512-NEXT: orq %r10, %rax
-; AVX512-NEXT: movq %rax, %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 184(%rdi), %rax
-; AVX512-NEXT: orq %r8, %rax
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 376(%rdi), %r8
-; AVX512-NEXT: orq %r10, %rax
-; AVX512-NEXT: movq %rax, %r11
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 120(%rdi), %rax
-; AVX512-NEXT: orq %r8, %rax
-; AVX512-NEXT: movq %rax, %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 504(%rdi), %rax
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: andq 248(%rdi), %r8
-; AVX512-NEXT: orq %rax, %r8
-; AVX512-NEXT: orq %r10, %r8
-; AVX512-NEXT: orq %r11, %r8
-; AVX512-NEXT: movq 1040(%rsp,%rsi), %rax
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX512-NEXT: shldq %cl, %rsi, %r10
-; AVX512-NEXT: orq %rbx, %r8
-; AVX512-NEXT: shlxq %rcx, %rax, %rsi
-; AVX512-NEXT: andq 256(%rdi), %r10
-; AVX512-NEXT: andq (%rdi), %rsi
-; AVX512-NEXT: orq %r10, %rsi
-; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; AVX512-NEXT: orq %r13, %rsi
-; AVX512-NEXT: orq %r15, %rsi
-; AVX512-NEXT: # kill: def $cl killed $cl killed $rcx
-; AVX512-NEXT: shldq %cl, %rax, %rdx
-; AVX512-NEXT: orq %r9, %rsi
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: andq 264(%rdi), %rax
-; AVX512-NEXT: andq 8(%rdi), %rdx
-; AVX512-NEXT: orq %rax, %rdx
-; AVX512-NEXT: orq %rbp, %rdx
-; AVX512-NEXT: orq %r12, %rdx
-; AVX512-NEXT: orq %r14, %rdx
-; AVX512-NEXT: orq %r8, %rdx
-; AVX512-NEXT: orq %rsi, %rdx
-; AVX512-NEXT: setne %al
-; AVX512-NEXT: addq $1560, %rsp # imm = 0x618
-; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %r12
-; AVX512-NEXT: popq %r13
-; AVX512-NEXT: popq %r14
-; AVX512-NEXT: popq %r15
-; AVX512-NEXT: popq %rbp
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; X64-LABEL: test_ne_i4096:
+; X64: # %bb.0:
+; X64-NEXT: movl %esi, %eax
+; X64-NEXT: andl $4064, %eax # imm = 0xFE0
+; X64-NEXT: shrl $3, %eax
+; X64-NEXT: movl (%rdi,%rax), %eax
+; X64-NEXT: btl %esi, %eax
+; X64-NEXT: setb %al
+; X64-NEXT: retq
%rem = and i32 %position, 4095
%ofs = zext nneg i32 %rem to i4096
%bit = shl nuw i4096 1, %ofs
diff --git a/llvm/test/CodeGen/X86/ldexp-avx512.ll b/llvm/test/CodeGen/X86/ldexp-avx512.ll
new file mode 100644
index 0000000..ea93a91
--- /dev/null
+++ b/llvm/test/CodeGen/X86/ldexp-avx512.ll
@@ -0,0 +1,467 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,AVX512
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512VL
+
+define half @test_half(half %x, i32 %exp) nounwind {
+; CHECK-LABEL: test_half:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
+entry:
+ %r = tail call fast half @llvm.ldexp.f16.i32(half %x, i32 %exp)
+ ret half %r
+}
+declare half @llvm.ldexp.f16.i32(half, i32) memory(none)
+
+define float @test_float(float %x, i32 %exp) nounwind {
+; CHECK-LABEL: test_float:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: jmp ldexpf@PLT # TAILCALL
+entry:
+ %r = tail call fast float @ldexpf(float %x, i32 %exp)
+ ret float %r
+}
+declare float @ldexpf(float, i32) memory(none)
+
+define double @test_double(double %x, i32 %exp) nounwind {
+; CHECK-LABEL: test_double:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: jmp ldexp@PLT # TAILCALL
+entry:
+ %r = tail call fast double @ldexp(double %x, i32 %exp)
+ ret double %r
+}
+declare double @ldexp(double, i32) memory(none)
+
+define fp128 @testExpl(fp128 %x, i32 %exp) nounwind {
+; CHECK-LABEL: testExpl:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: jmp ldexpl@PLT # TAILCALL
+entry:
+ %r = tail call fast fp128 @ldexpl(fp128 %x, i32 %exp)
+ ret fp128 %r
+}
+declare fp128 @ldexpl(fp128, i32) memory(none)
+
+define <4 x float> @test_ldexp_4xfloat(<4 x float> %x, <4 x i32> %exp) nounwind {
+; CHECK-LABEL: test_ldexp_4xfloat:
+; CHECK: # %bb.0:
+; CHECK-NEXT: subq $56, %rsp
+; CHECK-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovd %xmm1, %edi
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $1, %xmm0, %edi
+; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $2, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $3, %xmm0, %edi
+; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[3,3,3,3]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; CHECK-NEXT: addq $56, %rsp
+; CHECK-NEXT: retq
+ %r = call <4 x float> @llvm.ldexp.v4f32.v4i32(<4 x float> %x, <4 x i32> %exp)
+ ret <4 x float> %r
+}
+declare <4 x float> @llvm.ldexp.v4f32.v4i32(<4 x float>, <4 x i32>)
+
+define <2 x double> @test_ldexp_2xdouble(<2 x double> %x, <2 x i32> %exp) nounwind {
+; CHECK-LABEL: test_ldexp_2xdouble:
+; CHECK: # %bb.0:
+; CHECK-NEXT: subq $56, %rsp
+; CHECK-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovd %xmm1, %edi
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $1, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; CHECK-NEXT: addq $56, %rsp
+; CHECK-NEXT: retq
+ %r = call <2 x double> @llvm.ldexp.v2f64.v2i32(<2 x double> %x, <2 x i32> %exp)
+ ret <2 x double> %r
+}
+declare <2 x double> @llvm.ldexp.v2f64.v2i32(<2 x double>, <2 x i32>)
+
+define <8 x float> @test_ldexp_8xfloat(<8 x float> %x, <8 x i32> %exp) nounwind {
+; CHECK-LABEL: test_ldexp_8xfloat:
+; CHECK: # %bb.0:
+; CHECK-NEXT: subq $120, %rsp
+; CHECK-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm1
+; CHECK-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovd %xmm1, %edi
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $1, %xmm0, %edi
+; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $2, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $3, %xmm0, %edi
+; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[3,3,3,3]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; CHECK-NEXT: vmovd %xmm0, %edi
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; CHECK-NEXT: vextractps $1, %xmm0, %edi
+; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; CHECK-NEXT: vextractps $2, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; CHECK-NEXT: vextractps $3, %xmm0, %edi
+; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[3,3,3,3]
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; CHECK-NEXT: addq $120, %rsp
+; CHECK-NEXT: retq
+ %r = call <8 x float> @llvm.ldexp.v8f32.v8i32(<8 x float> %x, <8 x i32> %exp)
+ ret <8 x float> %r
+}
+declare <8 x float> @llvm.ldexp.v8f32.v8i32(<8 x float>, <8 x i32>)
+
+define <4 x double> @test_ldexp_4xdouble(<4 x double> %x, <4 x i32> %exp) nounwind {
+; CHECK-LABEL: test_ldexp_4xdouble:
+; CHECK: # %bb.0:
+; CHECK-NEXT: subq $88, %rsp
+; CHECK-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; CHECK-NEXT: vextractps $2, %xmm1, %edi
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $3, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; CHECK-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vmovd %xmm0, %edi
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $1, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovapd (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; CHECK-NEXT: addq $88, %rsp
+; CHECK-NEXT: retq
+ %r = call <4 x double> @llvm.ldexp.v4f64.v4i32(<4 x double> %x, <4 x i32> %exp)
+ ret <4 x double> %r
+}
+declare <4 x double> @llvm.ldexp.v4f64.v4i32(<4 x double>, <4 x i32>)
+
+define <16 x float> @test_ldexp_16xfloat(<16 x float> %x, <16 x i32> %exp) nounwind {
+; CHECK-LABEL: test_ldexp_16xfloat:
+; CHECK: # %bb.0:
+; CHECK-NEXT: subq $216, %rsp
+; CHECK-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; CHECK-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm0
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vextracti32x4 $3, %zmm1, %xmm1
+; CHECK-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovd %xmm1, %edi
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $1, %xmm0, %edi
+; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $2, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $3, %xmm0, %edi
+; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[3,3,3,3]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm1
+; CHECK-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm0
+; CHECK-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovd %xmm0, %edi
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $1, %xmm0, %edi
+; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $2, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $3, %xmm0, %edi
+; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[3,3,3,3]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
+; CHECK-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovd %xmm0, %edi
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $1, %xmm0, %edi
+; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $2, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $3, %xmm0, %edi
+; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[3,3,3,3]
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; CHECK-NEXT: vmovd %xmm0, %edi
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; CHECK-NEXT: vextractps $1, %xmm0, %edi
+; CHECK-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,1,3,3]
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; CHECK-NEXT: vextractps $2, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; CHECK-NEXT: vextractps $3, %xmm0, %edi
+; CHECK-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[3,3,3,3]
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexpf@PLT
+; CHECK-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; CHECK-NEXT: vinsertf64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; CHECK-NEXT: addq $216, %rsp
+; CHECK-NEXT: retq
+ %r = call <16 x float> @llvm.ldexp.v16f32.v16i32(<16 x float> %x, <16 x i32> %exp)
+ ret <16 x float> %r
+}
+declare <16 x float> @llvm.ldexp.v16f32.v16i32(<16 x float>, <16 x i32>)
+
+define <8 x double> @test_ldexp_8xdouble(<8 x double> %x, <8 x i32> %exp) nounwind {
+; CHECK-LABEL: test_ldexp_8xdouble:
+; CHECK: # %bb.0:
+; CHECK-NEXT: subq $184, %rsp
+; CHECK-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; CHECK-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm0
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1
+; CHECK-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vextractps $2, %xmm1, %edi
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $3, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; CHECK-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm1
+; CHECK-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vmovd %xmm0, %edi
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; CHECK-NEXT: vextractps $1, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovapd (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; CHECK-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; CHECK-NEXT: vextractps $2, %xmm0, %edi
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; CHECK-NEXT: vextractps $3, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; CHECK-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; CHECK-NEXT: vmovd %xmm0, %edi
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; CHECK-NEXT: vextractps $1, %xmm0, %edi
+; CHECK-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = mem[1,0]
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: callq ldexp@PLT
+; CHECK-NEXT: vmovapd (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; CHECK-NEXT: vinsertf64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; CHECK-NEXT: addq $184, %rsp
+; CHECK-NEXT: retq
+ %r = call <8 x double> @llvm.ldexp.v8f64.v8i32(<8 x double> %x, <8 x i32> %exp)
+ ret <8 x double> %r
+}
+declare <8 x double> @llvm.ldexp.v8f64.v8i32(<8 x double>, <8 x i32>)
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; AVX512: {{.*}}
+; AVX512VL: {{.*}}
diff --git a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
index 9816fa7..044327d 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
@@ -875,28 +875,12 @@ define i1 @mask_v8i32(<8 x i32> %a0) {
; SSE41-NEXT: sete %al
; SSE41-NEXT: retq
;
-; AVX1-LABEL: mask_v8i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
-; AVX1-NEXT: sete %al
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: mask_v8i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372039002259456,9223372039002259456,9223372039002259456,9223372039002259456]
-; AVX2-NEXT: vptest %ymm1, %ymm0
-; AVX2-NEXT: sete %al
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: mask_v8i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372039002259456,9223372039002259456,9223372039002259456,9223372039002259456]
-; AVX512-NEXT: vptest %ymm1, %ymm0
-; AVX512-NEXT: sete %al
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX-LABEL: mask_v8i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vtestps %ymm0, %ymm0
+; AVX-NEXT: sete %al
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
%1 = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %a0)
%2 = and i32 %1, 2147483648
%3 = icmp eq i32 %2, 0
@@ -965,28 +949,12 @@ define i1 @signtest_v8i32(<8 x i32> %a0) {
; SSE41-NEXT: sete %al
; SSE41-NEXT: retq
;
-; AVX1-LABEL: signtest_v8i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
-; AVX1-NEXT: sete %al
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: signtest_v8i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372039002259456,9223372039002259456,9223372039002259456,9223372039002259456]
-; AVX2-NEXT: vptest %ymm1, %ymm0
-; AVX2-NEXT: sete %al
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: signtest_v8i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372039002259456,9223372039002259456,9223372039002259456,9223372039002259456]
-; AVX512-NEXT: vptest %ymm1, %ymm0
-; AVX512-NEXT: sete %al
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX-LABEL: signtest_v8i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vtestps %ymm0, %ymm0
+; AVX-NEXT: sete %al
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
%1 = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %a0)
%2 = icmp sgt i32 %1, -1
ret i1 %2
@@ -1010,28 +978,12 @@ define i1 @signtest_v4i64(<4 x i64> %a0) {
; SSE41-NEXT: sete %al
; SSE41-NEXT: retq
;
-; AVX1-LABEL: signtest_v4i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0
-; AVX1-NEXT: sete %al
-; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: signtest_v4i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
-; AVX2-NEXT: vptest %ymm1, %ymm0
-; AVX2-NEXT: sete %al
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: signtest_v4i64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpbroadcastq {{.*#+}} ymm1 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
-; AVX512-NEXT: vptest %ymm1, %ymm0
-; AVX512-NEXT: sete %al
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX-LABEL: signtest_v4i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vtestpd %ymm0, %ymm0
+; AVX-NEXT: sete %al
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
%1 = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %a0)
%2 = icmp sgt i64 %1, -1
ret i1 %2
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop1.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop1.s
index d85ea79..399a644 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_vop1.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop1.s
@@ -1,8 +1,8 @@
// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --unique --version 5
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -show-encoding -comment-column=0 %s | FileCheck --strict-whitespace --check-prefixes=GFX12,GFX12-ASM %s
-// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -show-encoding %s | sed -n 's#.*\(\[0x[0-9a-fx,]\{1,\}\]\)#\1#p' | llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -disassemble -show-encoding -comment-column=0 | FileCheck --strict-whitespace --check-prefixes=GFX12,GFX12-DIS %s
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -show-encoding %s | %extract-encodings | llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize32,+real-true16 -disassemble -show-encoding -comment-column=0 | FileCheck --strict-whitespace --check-prefixes=GFX12,GFX12-DIS %s
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -show-encoding -comment-column=0 %s | FileCheck --strict-whitespace --check-prefixes=GFX12,GFX12-ASM %s
-// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -show-encoding %s | sed -n 's#.*\(\[0x[0-9a-fx,]\{1,\}\]\)#\1#p' | llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -disassemble -show-encoding -comment-column=0 | FileCheck --strict-whitespace --check-prefixes=GFX12,GFX12-DIS %s
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -show-encoding %s | %extract-encodings | llvm-mc -triple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64,+real-true16 -disassemble -show-encoding -comment-column=0 | FileCheck --strict-whitespace --check-prefixes=GFX12,GFX12-DIS %s
v_bfrev_b32_e32 v5, v1
// GFX12: v_bfrev_b32_e32 v5, v1 ; encoding: [0x01,0x71,0x0a,0x7e]
diff --git a/llvm/test/Transforms/FixIrreducible/bug45623.ll b/llvm/test/Transforms/FixIrreducible/bug45623.ll
index 5872443..b6dd6fb 100644
--- a/llvm/test/Transforms/FixIrreducible/bug45623.ll
+++ b/llvm/test/Transforms/FixIrreducible/bug45623.ll
@@ -90,3 +90,112 @@ for.end626: ; preds = %for.cond616
if.else629: ; preds = %backtrack
br label %retry
}
+
+define void @tre_tnfa_run_backtrack_callbr(i1 %arg) {
+; CHECK-LABEL: @tre_tnfa_run_backtrack_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[RETRY:%.*]] []
+; CHECK: retry:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[ARG:%.*]])
+; CHECK-NEXT: to label [[RETRY_TARGET_BACKTRACK:%.*]] [label %retry.target.while.body248]
+; CHECK: while.body248:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[ARG]])
+; CHECK-NEXT: to label [[IF_THEN250:%.*]] [label %if.end275]
+; CHECK: if.then250:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[FOR_COND264:%.*]] []
+; CHECK: for.cond264:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[ARG]])
+; CHECK-NEXT: to label [[FOR_BODY267:%.*]] [label %backtrack]
+; CHECK: for.body267:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[FOR_COND264]] []
+; CHECK: if.end275:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[FOR_COND342:%.*]] []
+; CHECK: for.cond342:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[ARG]])
+; CHECK-NEXT: to label [[FOR_BODY345:%.*]] [label %for.end580]
+; CHECK: for.body345:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[FOR_COND342]] []
+; CHECK: for.end580:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[BACKTRACK:%.*]] []
+; CHECK: backtrack:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[ARG]])
+; CHECK-NEXT: to label [[IF_THEN595:%.*]] [label %if.else629]
+; CHECK: if.then595:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[FOR_COND616:%.*]] []
+; CHECK: for.cond616:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[ARG]])
+; CHECK-NEXT: to label [[FOR_BODY619:%.*]] [label %for.end626]
+; CHECK: for.body619:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[FOR_COND616]] []
+; CHECK: for.end626:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[FOR_END626_TARGET_WHILE_BODY248:%.*]] []
+; CHECK: if.else629:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[RETRY]] []
+; CHECK: for.end626.target.while.body248:
+; CHECK-NEXT: br label [[IRR_GUARD:%.*]]
+; CHECK: retry.target.backtrack:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: retry.target.while.body248:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: irr.guard:
+; CHECK-NEXT: [[GUARD_WHILE_BODY248:%.*]] = phi i1 [ true, [[FOR_END626_TARGET_WHILE_BODY248]] ], [ false, [[RETRY_TARGET_BACKTRACK]] ], [ true, [[RETRY_TARGET_WHILE_BODY248:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_WHILE_BODY248]], label [[WHILE_BODY248:%.*]], label [[BACKTRACK]]
+;
+entry:
+ callbr void asm "", ""() to label %retry []
+
+retry:
+ callbr void asm "", "r,!i"(i1 %arg) to label %backtrack [label %while.body248]
+
+while.body248: ; preds = %for.end626, %retry
+ callbr void asm "", "r,!i"(i1 %arg) to label %if.then250 [label %if.end275]
+
+if.then250: ; preds = %while.body248
+ callbr void asm "", ""() to label %for.cond264 []
+
+for.cond264: ; preds = %for.body267, %if.then250
+ callbr void asm "", "r,!i"(i1 %arg) to label %for.body267 [label %backtrack]
+
+for.body267: ; preds = %for.cond264
+ callbr void asm "", ""() to label %for.cond264 []
+
+if.end275: ; preds = %while.body248
+ callbr void asm "", ""() to label %for.cond342 []
+
+for.cond342: ; preds = %for.body345, %if.end275
+ callbr void asm "", "r,!i"(i1 %arg) to label %for.body345 [label %for.end580]
+
+for.body345: ; preds = %for.cond342
+ callbr void asm "", ""() to label %for.cond342 []
+
+for.end580: ; preds = %for.cond342
+ callbr void asm "", ""() to label %backtrack []
+
+backtrack: ; preds = %for.end580, %for.cond264, %retry
+ callbr void asm "", "r,!i"(i1 %arg) to label %if.then595 [label %if.else629]
+
+if.then595: ; preds = %backtrack
+ callbr void asm "", ""() to label %for.cond616 []
+
+for.cond616: ; preds = %for.body619, %if.then595
+ callbr void asm "", "r,!i"(i1 %arg) to label %for.body619 [label %for.end626]
+
+for.body619: ; preds = %for.cond616
+ callbr void asm "", ""() to label %for.cond616 []
+
+for.end626: ; preds = %for.cond616
+ callbr void asm "", ""() to label %while.body248 []
+
+if.else629: ; preds = %backtrack
+ callbr void asm "", ""() to label %retry []
+}
diff --git a/llvm/test/Transforms/FixIrreducible/callbr.ll b/llvm/test/Transforms/FixIrreducible/callbr.ll
new file mode 100644
index 0000000..26ca6c7
--- /dev/null
+++ b/llvm/test/Transforms/FixIrreducible/callbr.ll
@@ -0,0 +1,869 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes='fix-irreducible,verify<loops>' -S | FileCheck %s
+; RUN: opt < %s -passes='verify<loops>,fix-irreducible,verify<loops>' -S | FileCheck %s
+; RUN: opt < %s -passes='print<cycles>' -disable-output 2>&1 | FileCheck %s --check-prefix CYCLES-BEFORE
+; RUN: opt < %s -passes='fix-irreducible,print<cycles>' -disable-output 2>&1 | FileCheck %s --check-prefix CYCLES-AFTER
+
+; CYCLES-BEFORE: CycleInfo for function: callbr_entry
+; CYCLES-BEFORE-NEXT: depth=1: entries(indirect fallthrough)
+; CYCLES-AFTER: CycleInfo for function: callbr_entry
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) indirect fallthrough
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_entry_targets_with_phi_nodes
+; CYCLES-BEFORE-NEXT: depth=1: entries(block1 block)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_entry_targets_with_phi_nodes
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) block1 block
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_entry_multiple_indirect_targets
+; CYCLES-BEFORE-NEXT: depth=1: entries(indirect fallthrough)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_entry_multiple_indirect_targets
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) indirect fallthrough
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_entry_multiple_indirect_targets1
+; CYCLES-BEFORE-NEXT: depth=1: entries(indirect1 indirect fallthrough)
+; CYCLES-BEFORE-NEXT: depth=2: entries(indirect fallthrough)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_entry_multiple_indirect_targets1
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) indirect1 indirect fallthrough irr.guard1 irr.guard2
+; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard2) indirect fallthrough
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_header_no_indirect
+; CYCLES-BEFORE-NEXT: depth=1: entries(fallthrough callbr)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_header_no_indirect
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) fallthrough callbr callbr.target.fallthrough
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_header
+; CYCLES-BEFORE-NEXT: depth=1: entries(fallthrough callbr)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_header
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) fallthrough callbr callbr.target.fallthrough
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_header_multiple_indirect_targets
+; CYCLES-BEFORE-NEXT: depth=1: entries(fallthrough callbr) indirect1
+; CYCLES-BEFORE-NEXT: depth=2: entries(callbr) indirect1
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_header_multiple_indirect_targets
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) fallthrough callbr indirect1 callbr.target.fallthrough
+; CYCLES-AFTER-NEXT: depth=2: entries(callbr) indirect1
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_regular
+; CYCLES-BEFORE-NEXT: depth=1: entries(fallthrough2 fallthrough1)
+; CYCLES-BEFORE-NEXT: depth=1: entries(indirect2 indirect1)
+; CYCLES-BEFORE-NEXT: depth=1: entries(nocallbr2 nocallbr1)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_regular
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) fallthrough2 fallthrough1
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard1) indirect2 indirect1
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard2) nocallbr2 nocallbr1
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_regular1
+; CYCLES-BEFORE-NEXT: depth=1: entries(callbr nocallbr)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_regular1
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) callbr nocallbr
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_regular2
+; CYCLES-BEFORE-NEXT: depth=1: entries(callbr nocallbr)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_regular2
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) callbr nocallbr
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_header_and_regular
+; CYCLES-BEFORE-NEXT: depth=1: entries(callbr_header) callbr_regular mid
+; CYCLES-BEFORE-NEXT: depth=2: entries(callbr_regular mid)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_header_and_regular
+; CYCLES-AFTER-NEXT: depth=1: entries(callbr_header) callbr_regular mid callbr_header.target.mid callbr_header.target.callbr_regular irr.guard
+; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard) callbr_regular mid
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_only
+; CYCLES-BEFORE-NEXT: depth=1: entries(callbr_block callbr_header)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_only
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) callbr_block callbr_header callbr_header.target.callbr_block
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: entry_multiple_callbr
+; CYCLES-BEFORE-NEXT: depth=1: entries(cb2 block block1)
+; CYCLES-BEFORE-NEXT: depth=2: entries(block block1)
+; CYCLES-AFTER-NEXT: CycleInfo for function: entry_multiple_callbr
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) cb2 block block1 irr.guard1 cb2.target.block1 cb2.target.block irr.guard2
+; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard2) block block1
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_exit_with_separate_entries
+; CYCLES-BEFORE-NEXT: depth=1: entries(l2 l1) cb
+; CYCLES-BEFORE-NEXT: depth=2: entries(l1 cb)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_exit_with_separate_entries
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) l2 l1 cb cb.target.l1 irr.guard1
+; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard1) l1 cb cb.target.l1
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_exit_with_separate_entries1
+; CYCLES-BEFORE-NEXT: depth=1: entries(loop2 loop1) cb
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_exit_with_separate_entries1
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) loop2 loop1 cb cb.target.loop2
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_only_multiple
+; CYCLES-BEFORE-NEXT: depth=1: entries(cb3 cb1 cb2)
+; CYCLES-BEFORE-NEXT: depth=2: entries(cb1 cb2)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_only_multiple
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) cb3 cb1 cb2 cb2.target.cb3 cb1.target.cb3 irr.guard1 cb2.target.cb1 cb3.target.cb1 irr.guard2
+; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard2) cb1 cb2 cb2.target.cb1
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_bypass
+; CYCLES-BEFORE-NEXT: depth=1: entries(l1 cb) l2
+; CYCLES-BEFORE-NEXT: depth=2: entries(cb l2)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_bypass
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) l1 cb l2 cb.target.l1 irr.guard1
+; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard1) cb l2
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_multiple_with_exit
+; CYCLES-BEFORE-NEXT: depth=1: entries(l3 l1 l2)
+; CYCLES-BEFORE-NEXT: depth=2: entries(l1 l2)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_multiple_with_exit
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) l3 l1 l2 irr.guard1 irr.guard2
+; CYCLES-AFTER-NEXT: depth=2: entries(irr.guard2) l1 l2
+
+; CYCLES-BEFORE-NEXT: CycleInfo for function: callbr_nested
+; CYCLES-BEFORE-NEXT: depth=1: entries(bb bh)
+; CYCLES-BEFORE-NEXT: depth=1: entries(b h)
+; CYCLES-AFTER-NEXT: CycleInfo for function: callbr_nested
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard) bb bh
+; CYCLES-AFTER-NEXT: depth=1: entries(irr.guard1) b h
+
+; Fix the irreducible loop in which callbr is the entry (see description at the
+; top of FixIrreducible.cpp).
+define void @callbr_entry(i1 %c) {
+; CHECK-LABEL: define void @callbr_entry(
+; CHECK-SAME: i1 [[C:%.*]]) {
+; CHECK-NEXT: [[CALLBR:.*:]]
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[CALLBR_TARGET_FALLTHROUGH:.*]] [label %callbr.target.indirect]
+; CHECK: [[FALLTHROUGH:.*]]:
+; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD:.*]], label %[[RET:.*]]
+; CHECK: [[INDIRECT:.*]]:
+; CHECK-NEXT: br label %[[FALLTHROUGH]]
+; CHECK: [[RET]]:
+; CHECK-NEXT: ret void
+; CHECK: [[CALLBR_TARGET_FALLTHROUGH]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[CALLBR_TARGET_INDIRECT:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_INDIRECT:%.*]] = phi i1 [ true, %[[FALLTHROUGH]] ], [ false, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ true, %[[CALLBR_TARGET_INDIRECT]] ]
+; CHECK-NEXT: br i1 [[GUARD_INDIRECT]], label %[[INDIRECT]], label %[[FALLTHROUGH]]
+;
+callbr:
+ callbr void asm "", "!i"() to label %fallthrough [label %indirect]
+fallthrough:
+ br i1 %c, label %indirect, label %ret
+indirect:
+ br label %fallthrough
+ret:
+ ret void
+}
+
+define i32 @callbr_entry_targets_with_phi_nodes(i1 %c) {
+; CHECK-LABEL: define i32 @callbr_entry_targets_with_phi_nodes(
+; CHECK-SAME: i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[ENTRY_TARGET_BLOCK:.*]] [label %entry.target.block1]
+; CHECK: [[BLOCK:.*]]:
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ 1, %[[BLOCK1:.*]] ], [ [[A_MOVED:%.*]], %[[IRR_GUARD:.*]] ]
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[BLOCK1]]:
+; CHECK-NEXT: br i1 [[C]], label %[[BLOCK]], label %[[RET:.*]]
+; CHECK: [[RET]]:
+; CHECK-NEXT: ret i32 [[B_MOVED:%.*]]
+; CHECK: [[ENTRY_TARGET_BLOCK]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[ENTRY_TARGET_BLOCK1:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[A_MOVED]] = phi i32 [ poison, %[[BLOCK]] ], [ 42, %[[ENTRY_TARGET_BLOCK]] ], [ poison, %[[ENTRY_TARGET_BLOCK1]] ]
+; CHECK-NEXT: [[B_MOVED]] = phi i32 [ [[A]], %[[BLOCK]] ], [ poison, %[[ENTRY_TARGET_BLOCK]] ], [ 43, %[[ENTRY_TARGET_BLOCK1]] ]
+; CHECK-NEXT: [[GUARD_BLOCK1:%.*]] = phi i1 [ true, %[[BLOCK]] ], [ false, %[[ENTRY_TARGET_BLOCK]] ], [ true, %[[ENTRY_TARGET_BLOCK1]] ]
+; CHECK-NEXT: br i1 [[GUARD_BLOCK1]], label %[[BLOCK1]], label %[[BLOCK]]
+;
+entry:
+ callbr void asm "", "!i"() to label %block [label %block1]
+block:
+ %a = phi i32 [42, %entry], [1, %block1]
+ br label %block1
+block1:
+ %b = phi i32 [43, %entry], [%a, %block]
+ br i1 %c, label %block, label %ret
+ret:
+ ret i32 %b
+}
+
+define void @callbr_entry_multiple_indirect_targets(i1 %c) {
+; CHECK-LABEL: define void @callbr_entry_multiple_indirect_targets(
+; CHECK-SAME: i1 [[C:%.*]]) {
+; CHECK-NEXT: [[CALLBR:.*:]]
+; CHECK-NEXT: callbr void asm "", "!i,!i,!i"()
+; CHECK-NEXT: to label %[[CALLBR_TARGET_FALLTHROUGH:.*]] [label %[[CALLBR_TARGET_INDIRECT:.*]], label %[[INDIRECT1:.*]], label %indirect2]
+; CHECK: [[INDIRECT3:.*]]:
+; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD:.*]], label %[[RET:.*]]
+; CHECK: [[INDIRECT:.*]]:
+; CHECK-NEXT: br label %[[INDIRECT3]]
+; CHECK: [[INDIRECT1]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[INDIRECT2:.*:]]
+; CHECK-NEXT: br label %[[RET]]
+; CHECK: [[RET]]:
+; CHECK-NEXT: ret void
+; CHECK: [[CALLBR_TARGET_FALLTHROUGH]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[CALLBR_TARGET_INDIRECT]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_INDIRECT:%.*]] = phi i1 [ true, %[[INDIRECT3]] ], [ true, %[[INDIRECT1]] ], [ false, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ true, %[[CALLBR_TARGET_INDIRECT]] ]
+; CHECK-NEXT: br i1 [[GUARD_INDIRECT]], label %[[INDIRECT]], label %[[INDIRECT3]]
+;
+callbr:
+ callbr void asm "", "!i,!i,!i"() to label %fallthrough [label %indirect, label %indirect1, label %indirect2]
+fallthrough:
+ br i1 %c, label %indirect, label %ret
+indirect:
+ br label %fallthrough
+indirect1:
+ br label %indirect
+indirect2:
+ br label %ret
+ret:
+ ret void
+}
+
+define void @callbr_entry_multiple_indirect_targets1(i1 %c, i1 %d) {
+; CHECK-LABEL: define void @callbr_entry_multiple_indirect_targets1(
+; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]]) {
+; CHECK-NEXT: [[CALLBR:.*:]]
+; CHECK-NEXT: callbr void asm "", "!i,!i,!i"()
+; CHECK-NEXT: to label %[[CALLBR_TARGET_FALLTHROUGH:.*]] [label %[[CALLBR_TARGET_INDIRECT:.*]], label %[[CALLBR_TARGET_INDIRECT1:.*]], label %indirect2]
+; CHECK: [[INDIRECT3:.*]]:
+; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD2:.*]], label %[[RET:.*]]
+; CHECK: [[INDIRECT:.*]]:
+; CHECK-NEXT: br i1 [[D]], label %[[INDIRECT3]], label %[[IRR_GUARD:.*]]
+; CHECK: [[INDIRECT1:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD2]]
+; CHECK: [[INDIRECT2:.*:]]
+; CHECK-NEXT: br label %[[RET]]
+; CHECK: [[RET]]:
+; CHECK-NEXT: ret void
+; CHECK: [[CALLBR_TARGET_FALLTHROUGH]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[CALLBR_TARGET_INDIRECT]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[CALLBR_TARGET_INDIRECT1]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_INDIRECT1:%.*]] = phi i1 [ true, %[[INDIRECT]] ], [ false, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ false, %[[CALLBR_TARGET_INDIRECT]] ], [ true, %[[CALLBR_TARGET_INDIRECT1]] ]
+; CHECK-NEXT: [[GUARD_FALLTHROUGH:%.*]] = phi i1 [ false, %[[INDIRECT]] ], [ true, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ false, %[[CALLBR_TARGET_INDIRECT]] ], [ false, %[[CALLBR_TARGET_INDIRECT1]] ]
+; CHECK-NEXT: [[GUARD_FALLTHROUGH_INV:%.*]] = xor i1 [[GUARD_FALLTHROUGH]], true
+; CHECK-NEXT: br i1 [[GUARD_INDIRECT1]], label %[[INDIRECT1]], label %[[IRR_GUARD1:.*]]
+; CHECK: [[IRR_GUARD1]]:
+; CHECK-NEXT: br label %[[IRR_GUARD2]]
+; CHECK: [[IRR_GUARD2]]:
+; CHECK-NEXT: [[GUARD_INDIRECT:%.*]] = phi i1 [ true, %[[INDIRECT3]] ], [ [[GUARD_FALLTHROUGH_INV]], %[[IRR_GUARD1]] ], [ true, %[[INDIRECT1]] ]
+; CHECK-NEXT: br i1 [[GUARD_INDIRECT]], label %[[INDIRECT]], label %[[INDIRECT3]]
+;
+callbr:
+ callbr void asm "", "!i,!i,!i"() to label %fallthrough [label %indirect, label %indirect1, label %indirect2]
+fallthrough:
+ br i1 %c, label %indirect, label %ret
+indirect:
+ br i1 %d, label %fallthrough, label %indirect1
+indirect1:
+ br label %indirect
+indirect2:
+ br label %ret
+ret:
+ ret void
+}
+
+; Fix the irreducible loop in which callbr is the header (see the example at the
+; top of FixIrreducible.cpp).
+define void @callbr_header_no_indirect(i1 %c, i1 %d) {
+; CHECK-LABEL: define void @callbr_header_no_indirect(
+; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]]) {
+; CHECK-NEXT: [[D_INV:%.*]] = xor i1 [[D]], true
+; CHECK-NEXT: br label %[[IRR_GUARD:.*]]
+; CHECK: [[CALLBR:.*]]:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label %[[CALLBR_TARGET_FALLTHROUGH:.*]] []
+; CHECK: [[FALLTHROUGH:.*]]:
+; CHECK-NEXT: br i1 [[C]], label %[[CALLBR]], label %[[RET:.*]]
+; CHECK: [[RET]]:
+; CHECK-NEXT: ret void
+; CHECK: [[CALLBR_TARGET_FALLTHROUGH]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_FALLTHROUGH:%.*]] = phi i1 [ true, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ [[D_INV]], [[TMP0:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_FALLTHROUGH]], label %[[FALLTHROUGH]], label %[[CALLBR]]
+;
+ br i1 %d, label %callbr, label %fallthrough
+callbr:
+ callbr void asm "", ""() to label %fallthrough []
+fallthrough:
+ br i1 %c, label %callbr, label %ret
+ret:
+ ret void
+}
+
+; Fix the irreducible loop in which callbr is the header.
+define void @callbr_header(i1 %c, i1 %d) {
+; CHECK-LABEL: define void @callbr_header(
+; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]]) {
+; CHECK-NEXT: [[D_INV:%.*]] = xor i1 [[D]], true
+; CHECK-NEXT: br label %[[IRR_GUARD:.*]]
+; CHECK: [[CALLBR:.*]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[CALLBR_TARGET_FALLTHROUGH:.*]] [label %indirect]
+; CHECK: [[INDIRECT:.*:]]
+; CHECK-NEXT: br label %[[RET:.*]]
+; CHECK: [[FALLTHROUGH:.*]]:
+; CHECK-NEXT: br i1 [[C]], label %[[CALLBR]], label %[[RET]]
+; CHECK: [[RET]]:
+; CHECK-NEXT: ret void
+; CHECK: [[CALLBR_TARGET_FALLTHROUGH]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_FALLTHROUGH:%.*]] = phi i1 [ true, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ [[D_INV]], [[TMP0:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_FALLTHROUGH]], label %[[FALLTHROUGH]], label %[[CALLBR]]
+;
+ br i1 %d, label %callbr, label %fallthrough
+callbr:
+ callbr void asm "", "!i"() to label %fallthrough [label %indirect]
+indirect:
+ br label %ret
+fallthrough:
+ br i1 %c, label %callbr, label %ret
+ret:
+ ret void
+}
+
+define void @callbr_header_multiple_indirect_targets(i1 %c, i1 %d) {
+; CHECK-LABEL: define void @callbr_header_multiple_indirect_targets(
+; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]]) {
+; CHECK-NEXT: [[D_INV:%.*]] = xor i1 [[D]], true
+; CHECK-NEXT: br label %[[IRR_GUARD:.*]]
+; CHECK: [[CALLBR:.*]]:
+; CHECK-NEXT: callbr void asm "", "!i,!i"()
+; CHECK-NEXT: to label %[[CALLBR_TARGET_FALLTHROUGH:.*]] [label %[[INDIRECT1:.*]], label %indirect1]
+; CHECK: [[INDIRECT1]]:
+; CHECK-NEXT: br label %[[RET:.*]]
+; CHECK: [[INDIRECT2:.*:]]
+; CHECK-NEXT: br label %[[CALLBR]]
+; CHECK: [[FALLTHROUGH:.*]]:
+; CHECK-NEXT: br i1 [[C]], label %[[CALLBR]], label %[[RET]]
+; CHECK: [[RET]]:
+; CHECK-NEXT: ret void
+; CHECK: [[CALLBR_TARGET_FALLTHROUGH]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_FALLTHROUGH:%.*]] = phi i1 [ true, %[[CALLBR_TARGET_FALLTHROUGH]] ], [ [[D_INV]], [[TMP0:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_FALLTHROUGH]], label %[[FALLTHROUGH]], label %[[CALLBR]]
+;
+ br i1 %d, label %callbr, label %fallthrough
+callbr:
+ callbr void asm "", "!i,!i"() to label %fallthrough [label %indirect, label %indirect1]
+indirect:
+ br label %ret
+indirect1:
+ br label %callbr
+fallthrough:
+ br i1 %c, label %callbr, label %ret
+ret:
+ ret void
+}
+
+; Fix the three usual irreducible loops (callbr isn't a part of one of them):
+; - fallthrough, fallthrough1, fallthrough2
+; - indirect, indirect1, indirect2
+; - nocallbr, nocallbr1, nocallbr2
+define void @callbr_regular(i1 %c, i1 %d) {
+; CHECK-LABEL: define void @callbr_regular(
+; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]]) {
+; CHECK-NEXT: [[C_INV:%.*]] = xor i1 [[C]], true
+; CHECK-NEXT: br i1 [[D]], label %[[CALLBR:.*]], label %[[NOCALLBR:.*]]
+; CHECK: [[CALLBR]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[FALLTHROUGH:.*]] [label %indirect]
+; CHECK: [[FALLTHROUGH]]:
+; CHECK-NEXT: br label %[[IRR_GUARD:.*]]
+; CHECK: [[FALLTHROUGH1:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[FALLTHROUGH2:.*]]:
+; CHECK-NEXT: br i1 [[D]], label %[[FALLTHROUGH1]], label %[[RET:.*]]
+; CHECK: [[INDIRECT:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD1:.*]]
+; CHECK: [[INDIRECT1:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD1]]
+; CHECK: [[INDIRECT2:.*]]:
+; CHECK-NEXT: br i1 [[D]], label %[[INDIRECT1]], label %[[RET]]
+; CHECK: [[NOCALLBR]]:
+; CHECK-NEXT: br label %[[IRR_GUARD2:.*]]
+; CHECK: [[NOCALLBR1:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD2]]
+; CHECK: [[NOCALLBR2:.*]]:
+; CHECK-NEXT: br i1 [[D]], label %[[NOCALLBR1]], label %[[RET]]
+; CHECK: [[RET]]:
+; CHECK-NEXT: ret void
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_FALLTHROUGH2:%.*]] = phi i1 [ true, %[[FALLTHROUGH1]] ], [ [[C_INV]], %[[FALLTHROUGH]] ]
+; CHECK-NEXT: br i1 [[GUARD_FALLTHROUGH2]], label %[[FALLTHROUGH2]], label %[[FALLTHROUGH1]]
+; CHECK: [[IRR_GUARD1]]:
+; CHECK-NEXT: [[GUARD_INDIRECT2:%.*]] = phi i1 [ true, %[[INDIRECT1]] ], [ [[C_INV]], %[[INDIRECT]] ]
+; CHECK-NEXT: br i1 [[GUARD_INDIRECT2]], label %[[INDIRECT2]], label %[[INDIRECT1]]
+; CHECK: [[IRR_GUARD2]]:
+; CHECK-NEXT: [[GUARD_NOCALLBR2:%.*]] = phi i1 [ true, %[[NOCALLBR1]] ], [ [[C_INV]], %[[NOCALLBR]] ]
+; CHECK-NEXT: br i1 [[GUARD_NOCALLBR2]], label %[[NOCALLBR2]], label %[[NOCALLBR1]]
+;
+ br i1 %d, label %callbr, label %nocallbr
+callbr:
+ callbr void asm "", "!i"() to label %fallthrough [label %indirect]
+fallthrough:
+ br i1 %c, label %fallthrough1, label %fallthrough2
+fallthrough1:
+ br label %fallthrough2
+fallthrough2:
+ br i1 %d, label %fallthrough1, label %ret
+indirect:
+ br i1 %c, label %indirect1, label %indirect2
+indirect1:
+ br label %indirect2
+indirect2:
+ br i1 %d, label %indirect1, label %ret
+nocallbr:
+ br i1 %c, label %nocallbr1, label %nocallbr2
+nocallbr1:
+ br label %nocallbr2
+nocallbr2:
+ br i1 %d, label %nocallbr1, label %ret
+ret:
+ ret void
+}
+
+; Fix an irreducible loop in which callbr is a regular block (neither entry nor
+; header). See the example at the top of FixIrreducible.cpp.
+define void @callbr_regular1(i1 %c) {
+; CHECK-LABEL: define void @callbr_regular1(
+; CHECK-SAME: i1 [[C:%.*]]) {
+; CHECK-NEXT: [[C_INV:%.*]] = xor i1 [[C]], true
+; CHECK-NEXT: br label %[[IRR_GUARD:.*]]
+; CHECK: [[NOCALLBR:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[CALLBR:.*]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[RET:.*]] [label %nocallbr]
+; CHECK: [[RET]]:
+; CHECK-NEXT: ret void
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_CALLBR:%.*]] = phi i1 [ true, %[[NOCALLBR]] ], [ [[C_INV]], [[TMP0:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_CALLBR]], label %[[CALLBR]], label %[[NOCALLBR]]
+;
+ br i1 %c, label %nocallbr, label %callbr
+nocallbr:
+ br label %callbr
+callbr:
+ callbr void asm "", "!i"() to label %ret [label %nocallbr]
+ret:
+ ret void
+}
+
+; Fix an irreducible loop in which callbr is a regular block (neither entry nor
+; header). See the example at the top of FixIrreducible.cpp.
+define void @callbr_regular2(i1 %c) {
+; CHECK-LABEL: define void @callbr_regular2(
+; CHECK-SAME: i1 [[C:%.*]]) {
+; CHECK-NEXT: [[C_INV:%.*]] = xor i1 [[C]], true
+; CHECK-NEXT: br label %[[IRR_GUARD:.*]]
+; CHECK: [[NOCALLBR:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[CALLBR:.*]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[NOCALLBR]] [label %ret]
+; CHECK: [[RET:.*:]]
+; CHECK-NEXT: ret void
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_CALLBR:%.*]] = phi i1 [ true, %[[NOCALLBR]] ], [ [[C_INV]], [[TMP0:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_CALLBR]], label %[[CALLBR]], label %[[NOCALLBR]]
+;
+ br i1 %c, label %nocallbr, label %callbr
+nocallbr:
+ br label %callbr
+callbr:
+ callbr void asm "", "!i"() to label %nocallbr [label %ret]
+ret:
+ ret void
+}
+
+; Fix an irreducible loop with two callbr blocks, one as header and one as regular block.
+define void @callbr_header_and_regular(i1 %c) {
+; CHECK-LABEL: define void @callbr_header_and_regular(
+; CHECK-SAME: i1 [[C:%.*]]) {
+; CHECK-NEXT: br label %[[CALLBR_HEADER:.*]]
+; CHECK: [[CALLBR_HEADER]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[CALLBR_HEADER_TARGET_MID:.*]] [label %callbr_header.target.callbr_regular]
+; CHECK: [[MID:.*]]:
+; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD:.*]], label %[[RET:.*]]
+; CHECK: [[CALLBR_REGULAR:.*]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[CALLBR_HEADER]] [label %mid]
+; CHECK: [[RET]]:
+; CHECK-NEXT: ret void
+; CHECK: [[CALLBR_HEADER_TARGET_MID]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[CALLBR_HEADER_TARGET_CALLBR_REGULAR:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_CALLBR_REGULAR:%.*]] = phi i1 [ true, %[[MID]] ], [ false, %[[CALLBR_HEADER_TARGET_MID]] ], [ true, %[[CALLBR_HEADER_TARGET_CALLBR_REGULAR]] ]
+; CHECK-NEXT: br i1 [[GUARD_CALLBR_REGULAR]], label %[[CALLBR_REGULAR]], label %[[MID]]
+;
+ br label %callbr_header
+callbr_header:
+ callbr void asm "", "!i"() to label %mid [label %callbr_regular]
+mid:
+ br i1 %c, label %callbr_regular, label %ret
+callbr_regular:
+ callbr void asm "", "!i"() to label %callbr_header [label %mid]
+ret:
+ ret void
+}
+
+; Fix an irreducible loop consisting only of callbr blocks (and ret). See the
+; example at the top of FixIrreducible.cpp.
+define void @callbr_only(i1 %c) {
+; CHECK-LABEL: define void @callbr_only(
+; CHECK-SAME: i1 [[C:%.*]]) {
+; CHECK-NEXT: [[CALLBR:.*:]]
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[CALLBR_ENTRY_TARGET_CALLBR_HEADER:.*]] [label %callbr_entry.target.callbr_block]
+; CHECK: [[CALLBR_HEADER:.*]]:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label %[[CALLBR_HEADER_TARGET_CALLBR_BLOCK:.*]] []
+; CHECK: [[CALLBR_BLOCK:.*]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[CALLBR_HEADER]] [label %ret]
+; CHECK: [[RET:.*:]]
+; CHECK-NEXT: ret void
+; CHECK: [[CALLBR_HEADER_TARGET_CALLBR_BLOCK]]:
+; CHECK-NEXT: br label %[[IRR_GUARD:.*]]
+; CHECK: [[CALLBR_ENTRY_TARGET_CALLBR_HEADER]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[CALLBR_ENTRY_TARGET_CALLBR_BLOCK:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_CALLBR_BLOCK:%.*]] = phi i1 [ true, %[[CALLBR_HEADER_TARGET_CALLBR_BLOCK]] ], [ false, %[[CALLBR_ENTRY_TARGET_CALLBR_HEADER]] ], [ true, %[[CALLBR_ENTRY_TARGET_CALLBR_BLOCK]] ]
+; CHECK-NEXT: br i1 [[GUARD_CALLBR_BLOCK]], label %[[CALLBR_BLOCK]], label %[[CALLBR_HEADER]]
+;
+callbr_entry:
+ callbr void asm "", "!i"() to label %callbr_header [label %callbr_block]
+callbr_header:
+ callbr void asm "", ""() to label %callbr_block []
+callbr_block:
+ callbr void asm "", "!i"() to label %callbr_header [label %ret]
+ret:
+ ret void
+}
+
+; Irreducible loop: entry leading to multiple callbr blocks.
+define void @entry_multiple_callbr(i1 %a, i1 %b, i1 %c) {
+; CHECK-LABEL: define void @entry_multiple_callbr(
+; CHECK-SAME: i1 [[A:%.*]], i1 [[B:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br i1 [[A]], label %[[CB1:.*]], label %[[IRR_GUARD:.*]]
+; CHECK: [[CB1]]:
+; CHECK-NEXT: callbr void asm "", "!i,!i"()
+; CHECK-NEXT: to label %[[CB1_TARGET_BLOCK:.*]] [label %[[CB1_TARGET_CB2:.*]], label %cb1.target.block1]
+; CHECK: [[BLOCK:.*]]:
+; CHECK-NEXT: br i1 [[B]], label %[[IRR_GUARD]], label %[[BLOCK1:.*]]
+; CHECK: [[CB2:.*]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[CB2_TARGET_BLOCK1:.*]] [label %cb2.target.block]
+; CHECK: [[BLOCK1]]:
+; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD2:.*]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+; CHECK: [[CB1_TARGET_BLOCK]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[CB1_TARGET_CB2]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[CB1_TARGET_BLOCK1:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_CB2:%.*]] = phi i1 [ true, %[[BLOCK]] ], [ false, %[[CB1_TARGET_BLOCK]] ], [ true, %[[CB1_TARGET_CB2]] ], [ false, %[[CB1_TARGET_BLOCK1]] ], [ true, %[[ENTRY]] ]
+; CHECK-NEXT: [[GUARD_BLOCK:%.*]] = phi i1 [ false, %[[BLOCK]] ], [ true, %[[CB1_TARGET_BLOCK]] ], [ false, %[[CB1_TARGET_CB2]] ], [ false, %[[CB1_TARGET_BLOCK1]] ], [ false, %[[ENTRY]] ]
+; CHECK-NEXT: br i1 [[GUARD_CB2]], label %[[CB2]], label %[[IRR_GUARD1:.*]]
+; CHECK: [[IRR_GUARD1]]:
+; CHECK-NEXT: br label %[[IRR_GUARD2]]
+; CHECK: [[CB2_TARGET_BLOCK1]]:
+; CHECK-NEXT: br label %[[IRR_GUARD2]]
+; CHECK: [[CB2_TARGET_BLOCK:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD2]]
+; CHECK: [[IRR_GUARD2]]:
+; CHECK-NEXT: [[GUARD_BLOCK3:%.*]] = phi i1 [ true, %[[BLOCK1]] ], [ [[GUARD_BLOCK]], %[[IRR_GUARD1]] ], [ false, %[[CB2_TARGET_BLOCK1]] ], [ true, %[[CB2_TARGET_BLOCK]] ]
+; CHECK-NEXT: br i1 [[GUARD_BLOCK3]], label %[[BLOCK]], label %[[BLOCK1]]
+;
+entry:
+ br i1 %a, label %cb1, label %cb2
+cb1:
+ callbr void asm "", "!i,!i"() to label %block [label %cb2, label %block1]
+block:
+ br i1 %b, label %cb2, label %block1
+cb2:
+ callbr void asm "", "!i"() to label %block1 [label %block]
+block1:
+ br i1 %c, label %block, label %exit
+exit:
+ ret void
+}
+
+; Irreducible loop: callbr as loop exit, with multiple entries
+define void @callbr_exit_with_separate_entries(i1 %a, i1 %b, i1 %c) {
+; CHECK-LABEL: define void @callbr_exit_with_separate_entries(
+; CHECK-SAME: i1 [[A:%.*]], i1 [[B:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[C_INV:%.*]] = xor i1 [[C]], true
+; CHECK-NEXT: [[A_INV:%.*]] = xor i1 [[A]], true
+; CHECK-NEXT: br label %[[IRR_GUARD:.*]]
+; CHECK: [[L1:.*]]:
+; CHECK-NEXT: br i1 [[B]], label %[[CB:.*]], label %[[IRR_GUARD]]
+; CHECK: [[L2:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD1:.*]]
+; CHECK: [[CB]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[EXIT:.*]] [label %cb.target.l1]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_L2:%.*]] = phi i1 [ true, %[[L1]] ], [ [[A_INV]], %[[ENTRY]] ]
+; CHECK-NEXT: br i1 [[GUARD_L2]], label %[[L2]], label %[[IRR_GUARD1]]
+; CHECK: [[CB_TARGET_L1:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD1]]
+; CHECK: [[IRR_GUARD1]]:
+; CHECK-NEXT: [[GUARD_L1:%.*]] = phi i1 [ true, %[[CB_TARGET_L1]] ], [ true, %[[IRR_GUARD]] ], [ [[C_INV]], %[[L2]] ]
+; CHECK-NEXT: br i1 [[GUARD_L1]], label %[[L1]], label %[[CB]]
+;
+entry:
+ br i1 %a, label %l1, label %l2
+l1:
+ br i1 %b, label %cb, label %l2
+l2:
+ br i1 %c, label %cb, label %l1
+cb:
+ callbr void asm "", "!i"() to label %exit [label %l1]
+exit:
+ ret void
+}
+
+define void @callbr_exit_with_separate_entries1(i1 %a, i1 %b) {
+; CHECK-LABEL: define void @callbr_exit_with_separate_entries1(
+; CHECK-SAME: i1 [[A:%.*]], i1 [[B:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[A_INV:%.*]] = xor i1 [[A]], true
+; CHECK-NEXT: br label %[[IRR_GUARD:.*]]
+; CHECK: [[LOOP1:.*]]:
+; CHECK-NEXT: br i1 [[B]], label %[[CB:.*]], label %[[IRR_GUARD]]
+; CHECK: [[LOOP2:.*]]:
+; CHECK-NEXT: br label %[[LOOP1]]
+; CHECK: [[CB]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[EXIT:.*]] [label %cb.target.loop2]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+; CHECK: [[CB_TARGET_LOOP2:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_LOOP2:%.*]] = phi i1 [ true, %[[CB_TARGET_LOOP2]] ], [ true, %[[LOOP1]] ], [ [[A_INV]], %[[ENTRY]] ]
+; CHECK-NEXT: br i1 [[GUARD_LOOP2]], label %[[LOOP2]], label %[[LOOP1]]
+;
+entry:
+ br i1 %a, label %loop1, label %loop2
+loop1:
+ br i1 %b, label %cb, label %loop2
+loop2:
+ br label %loop1
+cb:
+ callbr void asm "", "!i"() to label %exit [label %loop2]
+exit:
+ ret void
+}
+
+; Irreducible loop: all blocks are callbrs, with cross-edges
+define void @callbr_only_multiple(i1 %a, i1 %b, i1 %c) {
+; CHECK-LABEL: define void @callbr_only_multiple(
+; CHECK-SAME: i1 [[A:%.*]], i1 [[B:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: callbr void asm "", "!i,!i"()
+; CHECK-NEXT: to label %[[ENTRY_TARGET_CB1:.*]] [label %[[ENTRY_TARGET_CB2:.*]], label %entry.target.cb3]
+; CHECK: [[CB1:.*]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[CB2:.*]] [label %cb1.target.cb3]
+; CHECK: [[CB2]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[CB2_TARGET_CB3:.*]] [label %cb2.target.cb1]
+; CHECK: [[CB3:.*]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[CB3_TARGET_CB1:.*]] [label %exit]
+; CHECK: [[EXIT:.*:]]
+; CHECK-NEXT: ret void
+; CHECK: [[CB2_TARGET_CB3]]:
+; CHECK-NEXT: br label %[[IRR_GUARD:.*]]
+; CHECK: [[CB1_TARGET_CB3:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[ENTRY_TARGET_CB1]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[ENTRY_TARGET_CB2]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[ENTRY_TARGET_CB3:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_CB3:%.*]] = phi i1 [ true, %[[CB2_TARGET_CB3]] ], [ true, %[[CB1_TARGET_CB3]] ], [ false, %[[ENTRY_TARGET_CB1]] ], [ false, %[[ENTRY_TARGET_CB2]] ], [ true, %[[ENTRY_TARGET_CB3]] ]
+; CHECK-NEXT: [[GUARD_CB1:%.*]] = phi i1 [ false, %[[CB2_TARGET_CB3]] ], [ false, %[[CB1_TARGET_CB3]] ], [ true, %[[ENTRY_TARGET_CB1]] ], [ false, %[[ENTRY_TARGET_CB2]] ], [ false, %[[ENTRY_TARGET_CB3]] ]
+; CHECK-NEXT: br i1 [[GUARD_CB3]], label %[[CB3]], label %[[IRR_GUARD1:.*]]
+; CHECK: [[IRR_GUARD1]]:
+; CHECK-NEXT: br label %[[IRR_GUARD2:.*]]
+; CHECK: [[CB2_TARGET_CB1:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD2]]
+; CHECK: [[CB3_TARGET_CB1]]:
+; CHECK-NEXT: br label %[[IRR_GUARD2]]
+; CHECK: [[IRR_GUARD2]]:
+; CHECK-NEXT: [[GUARD_CB13:%.*]] = phi i1 [ true, %[[CB2_TARGET_CB1]] ], [ [[GUARD_CB1]], %[[IRR_GUARD1]] ], [ true, %[[CB3_TARGET_CB1]] ]
+; CHECK-NEXT: br i1 [[GUARD_CB13]], label %[[CB1]], label %[[CB2]]
+;
+entry:
+ callbr void asm "", "!i,!i"() to label %cb1 [label %cb2, label %cb3]
+cb1:
+ callbr void asm "", "!i"() to label %cb2 [label %cb3]
+cb2:
+ callbr void asm "", "!i"() to label %cb3 [label %cb1]
+cb3:
+ callbr void asm "", "!i"() to label %cb1 [label %exit]
+exit:
+ ret void
+}
+
+; Irreducible loop: callbr as a "bypass" block
+define void @callbr_bypass(i1 %a, i1 %b, i1 %c) {
+; CHECK-LABEL: define void @callbr_bypass(
+; CHECK-SAME: i1 [[A:%.*]], i1 [[B:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[B_INV:%.*]] = xor i1 [[B]], true
+; CHECK-NEXT: [[A_INV:%.*]] = xor i1 [[A]], true
+; CHECK-NEXT: br label %[[IRR_GUARD:.*]]
+; CHECK: [[CB:.*]]:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[L2:.*]] [label %cb.target.l1]
+; CHECK: [[L1:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD1:.*]]
+; CHECK: [[L2]]:
+; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD1]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+; CHECK: [[CB_TARGET_L1:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_L1:%.*]] = phi i1 [ true, %[[CB_TARGET_L1]] ], [ [[A_INV]], %[[ENTRY]] ]
+; CHECK-NEXT: br i1 [[GUARD_L1]], label %[[L1]], label %[[IRR_GUARD1]]
+; CHECK: [[IRR_GUARD1]]:
+; CHECK-NEXT: [[GUARD_CB:%.*]] = phi i1 [ true, %[[L2]] ], [ true, %[[IRR_GUARD]] ], [ [[B_INV]], %[[L1]] ]
+; CHECK-NEXT: br i1 [[GUARD_CB]], label %[[CB]], label %[[L2]]
+;
+entry:
+ br i1 %a, label %cb, label %l1
+cb:
+ callbr void asm "", "!i"() to label %l2 [label %l1]
+l1:
+ br i1 %b, label %l2, label %cb
+l2:
+ br i1 %c, label %cb, label %exit
+exit:
+ ret void
+}
+
+; Irreducible loop: callbr with multiple indirect targets, some looping, some exiting
+define void @callbr_multiple_with_exit(i1 %a, i1 %b, i1 %c) {
+; CHECK-LABEL: define void @callbr_multiple_with_exit(
+; CHECK-SAME: i1 [[A:%.*]], i1 [[B:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: callbr void asm "", "!i,!i,!i"()
+; CHECK-NEXT: to label %[[ENTRY_TARGET_L1:.*]] [label %[[ENTRY_TARGET_L2:.*]], label %[[EXIT:.*]], label %entry.target.l3]
+; CHECK: [[L1:.*]]:
+; CHECK-NEXT: br i1 [[A]], label %[[L2:.*]], label %[[IRR_GUARD:.*]]
+; CHECK: [[L2]]:
+; CHECK-NEXT: br i1 [[B]], label %[[IRR_GUARD2:.*]], label %[[EXIT]]
+; CHECK: [[L3:.*]]:
+; CHECK-NEXT: br i1 [[C]], label %[[IRR_GUARD2]], label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+; CHECK: [[ENTRY_TARGET_L1]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[ENTRY_TARGET_L2]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[ENTRY_TARGET_L3:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_L3:%.*]] = phi i1 [ true, %[[L1]] ], [ false, %[[ENTRY_TARGET_L1]] ], [ false, %[[ENTRY_TARGET_L2]] ], [ true, %[[ENTRY_TARGET_L3]] ]
+; CHECK-NEXT: [[GUARD_L1:%.*]] = phi i1 [ false, %[[L1]] ], [ true, %[[ENTRY_TARGET_L1]] ], [ false, %[[ENTRY_TARGET_L2]] ], [ false, %[[ENTRY_TARGET_L3]] ]
+; CHECK-NEXT: br i1 [[GUARD_L3]], label %[[L3]], label %[[IRR_GUARD1:.*]]
+; CHECK: [[IRR_GUARD1]]:
+; CHECK-NEXT: br label %[[IRR_GUARD2]]
+; CHECK: [[IRR_GUARD2]]:
+; CHECK-NEXT: [[GUARD_L13:%.*]] = phi i1 [ true, %[[L2]] ], [ [[GUARD_L1]], %[[IRR_GUARD1]] ], [ true, %[[L3]] ]
+; CHECK-NEXT: br i1 [[GUARD_L13]], label %[[L1]], label %[[L2]]
+;
+entry:
+ callbr void asm "", "!i,!i,!i"() to label %l1 [label %l2, label %exit, label %l3]
+l1:
+ br i1 %a, label %l2, label %l3
+l2:
+ br i1 %b, label %l1, label %exit
+l3:
+ br i1 %c, label %l1, label %exit
+exit:
+ ret void
+}
+
+define void @callbr_nested(i1 %c, i1 %d) {
+; CHECK-LABEL: define void @callbr_nested(
+; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label %[[ENTRY_TARGET_H:.*]] [label %entry.target.b]
+; CHECK: [[H:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD1:.*]]
+; CHECK: [[B:.*]]:
+; CHECK-NEXT: callbr void asm "", "!i,!i"()
+; CHECK-NEXT: to label %[[H]] [label %[[B_TARGET_BH:.*]], label %b.target.bb]
+; CHECK: [[BH:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD:.*]]
+; CHECK: [[BB:.*]]:
+; CHECK-NEXT: br i1 [[C]], label %[[BH]], label %[[RET:.*]]
+; CHECK: [[RET]]:
+; CHECK-NEXT: ret void
+; CHECK: [[B_TARGET_BH]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[B_TARGET_BB:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD]]
+; CHECK: [[IRR_GUARD]]:
+; CHECK-NEXT: [[GUARD_BB:%.*]] = phi i1 [ true, %[[BH]] ], [ false, %[[B_TARGET_BH]] ], [ true, %[[B_TARGET_BB]] ]
+; CHECK-NEXT: br i1 [[GUARD_BB]], label %[[BB]], label %[[BH]]
+; CHECK: [[ENTRY_TARGET_H]]:
+; CHECK-NEXT: br label %[[IRR_GUARD1]]
+; CHECK: [[ENTRY_TARGET_B:.*]]:
+; CHECK-NEXT: br label %[[IRR_GUARD1]]
+; CHECK: [[IRR_GUARD1]]:
+; CHECK-NEXT: [[GUARD_B:%.*]] = phi i1 [ true, %[[H]] ], [ false, %[[ENTRY_TARGET_H]] ], [ true, %[[ENTRY_TARGET_B]] ]
+; CHECK-NEXT: br i1 [[GUARD_B]], label %[[B]], label %[[H]]
+;
+entry:
+ callbr void asm "","!i"() to label %h [label %b]
+h:
+ br label %b
+b:
+ callbr void asm "","!i,!i"() to label %h [label %bh, label %bb]
+bh:
+ br label %bb
+bb:
+ br i1 %c, label %bh, label %ret
+ret:
+ ret void
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; LOOPS-AFTER: {{.*}}
+; LOOPS-BEFORE: {{.*}}
diff --git a/llvm/test/Transforms/FixIrreducible/nested.ll b/llvm/test/Transforms/FixIrreducible/nested.ll
index 0cc6b47..c9161cc1 100644
--- a/llvm/test/Transforms/FixIrreducible/nested.ll
+++ b/llvm/test/Transforms/FixIrreducible/nested.ll
@@ -50,6 +50,69 @@ exit:
ret void
}
+define void @nested_irr_top_level_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5) {
+; CHECK-LABEL: @nested_irr_top_level_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]])
+; CHECK-NEXT: to label [[ENTRY_TARGET_A1:%.*]] [label %entry.target.A2]
+; CHECK: A1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]])
+; CHECK-NEXT: to label [[A1_TARGET_B1:%.*]] [label %A1.target.B2]
+; CHECK: B1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]])
+; CHECK-NEXT: to label [[B1_TARGET_B2:%.*]] [label %A3]
+; CHECK: B2:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED3:%.*]])
+; CHECK-NEXT: to label [[B1:%.*]] [label %A3]
+; CHECK: A3:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED4:%.*]])
+; CHECK-NEXT: to label [[A3_TARGET_A2:%.*]] [label %exit]
+; CHECK: A2:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED5:%.*]])
+; CHECK-NEXT: to label [[A1:%.*]] [label %exit]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+; CHECK: A3.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD:%.*]]
+; CHECK: entry.target.A1:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: entry.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: irr.guard:
+; CHECK-NEXT: [[GUARD_A2:%.*]] = phi i1 [ true, [[A3_TARGET_A2]] ], [ false, [[ENTRY_TARGET_A1]] ], [ true, [[ENTRY_TARGET_A2:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_A2]], label [[A2:%.*]], label [[A1]]
+; CHECK: B1.target.B2:
+; CHECK-NEXT: br label [[IRR_GUARD1:%.*]]
+; CHECK: A1.target.B1:
+; CHECK-NEXT: br label [[IRR_GUARD1]]
+; CHECK: A1.target.B2:
+; CHECK-NEXT: br label [[IRR_GUARD1]]
+; CHECK: irr.guard1:
+; CHECK-NEXT: [[GUARD_B2:%.*]] = phi i1 [ true, [[B1_TARGET_B2]] ], [ false, [[A1_TARGET_B1]] ], [ true, [[A1_TARGET_B2:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_B2]], label [[B2:%.*]], label [[B1]]
+;
+entry:
+ callbr void asm "", "r,!i"(i1 %Pred0) to label %A1 [label %A2]
+
+A1:
+ callbr void asm "", "r,!i"(i1 %Pred1) to label %B1 [label %B2]
+
+B1:
+ callbr void asm "", "r,!i"(i1 %Pred2) to label %B2 [label %A3]
+
+B2:
+ callbr void asm "", "r,!i"(i1 %Pred3) to label %B1 [label %A3]
+
+A3:
+ callbr void asm "", "r,!i"(i1 %Pred4) to label %A2 [label %exit]
+
+A2:
+ callbr void asm "", "r,!i"(i1 %Pred5) to label %A1 [label %exit]
+
+exit:
+ ret void
+}
+
define void @nested_irr_in_loop(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6) {
; CHECK-LABEL: @nested_irr_in_loop(
; CHECK-NEXT: entry:
@@ -107,6 +170,80 @@ exit:
ret void
}
+define void @nested_irr_in_loop_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6) {
+; CHECK-LABEL: @nested_irr_in_loop_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[H1:%.*]]
+; CHECK: H1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]])
+; CHECK-NEXT: to label [[H1_TARGET_A1:%.*]] [label %H1.target.A2]
+; CHECK: A1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]])
+; CHECK-NEXT: to label [[A1_TARGET_B1:%.*]] [label %A1.target.B2]
+; CHECK: B1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]])
+; CHECK-NEXT: to label [[B1_TARGET_B2:%.*]] [label %A3]
+; CHECK: B2:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED3:%.*]])
+; CHECK-NEXT: to label [[B1:%.*]] [label %A3]
+; CHECK: A3:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED4:%.*]])
+; CHECK-NEXT: to label [[A3_TARGET_A2:%.*]] [label %L1]
+; CHECK: A2:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED5:%.*]])
+; CHECK-NEXT: to label [[A1:%.*]] [label %L1]
+; CHECK: L1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED6:%.*]])
+; CHECK-NEXT: to label [[EXIT:%.*]] [label %H1]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+; CHECK: A3.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD:%.*]]
+; CHECK: H1.target.A1:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: H1.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: irr.guard:
+; CHECK-NEXT: [[GUARD_A2:%.*]] = phi i1 [ true, [[A3_TARGET_A2]] ], [ false, [[H1_TARGET_A1]] ], [ true, [[H1_TARGET_A2:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_A2]], label [[A2:%.*]], label [[A1]]
+; CHECK: B1.target.B2:
+; CHECK-NEXT: br label [[IRR_GUARD1:%.*]]
+; CHECK: A1.target.B1:
+; CHECK-NEXT: br label [[IRR_GUARD1]]
+; CHECK: A1.target.B2:
+; CHECK-NEXT: br label [[IRR_GUARD1]]
+; CHECK: irr.guard1:
+; CHECK-NEXT: [[GUARD_B2:%.*]] = phi i1 [ true, [[B1_TARGET_B2]] ], [ false, [[A1_TARGET_B1]] ], [ true, [[A1_TARGET_B2:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_B2]], label [[B2:%.*]], label [[B1]]
+;
+entry:
+ br label %H1
+
+H1:
+ callbr void asm "", "r,!i"(i1 %Pred0) to label %A1 [label %A2]
+
+A1:
+ callbr void asm "", "r,!i"(i1 %Pred1) to label %B1 [label %B2]
+
+B1:
+ callbr void asm "", "r,!i"(i1 %Pred2) to label %B2 [label %A3]
+
+B2:
+ callbr void asm "", "r,!i"(i1 %Pred3) to label %B1 [label %A3]
+
+A3:
+ callbr void asm "", "r,!i"(i1 %Pred4) to label %A2 [label %L1]
+
+A2:
+ callbr void asm "", "r,!i"(i1 %Pred5) to label %A1 [label %L1]
+
+L1:
+ callbr void asm "", "r,!i"(i1 %Pred6) to label %exit [label %H1]
+
+exit:
+ ret void
+}
+
define void @loop_in_irr(i1 %Pred0, i1 %Pred1, i1 %Pred2) {
; CHECK-LABEL: @loop_in_irr(
; CHECK-NEXT: entry:
@@ -150,6 +287,60 @@ exit:
ret void
}
+define void @loop_in_irr_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2) {
+; CHECK-LABEL: @loop_in_irr_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]])
+; CHECK-NEXT: to label [[ENTRY_TARGET_A1:%.*]] [label %entry.target.A2]
+; CHECK: A1:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[H1:%.*]] []
+; CHECK: H1:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[L1:%.*]] []
+; CHECK: L1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]])
+; CHECK-NEXT: to label [[H1]] [label %A3]
+; CHECK: A3:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]])
+; CHECK-NEXT: to label [[A3_TARGET_A2:%.*]] [label %exit]
+; CHECK: A2:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[A1:%.*]] []
+; CHECK: exit:
+; CHECK-NEXT: ret void
+; CHECK: A3.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD:%.*]]
+; CHECK: entry.target.A1:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: entry.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: irr.guard:
+; CHECK-NEXT: [[GUARD_A2:%.*]] = phi i1 [ true, [[A3_TARGET_A2]] ], [ false, [[ENTRY_TARGET_A1]] ], [ true, [[ENTRY_TARGET_A2:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_A2]], label [[A2:%.*]], label [[A1]]
+;
+entry:
+ callbr void asm "", "r,!i"(i1 %Pred0) to label %A1 [label %A2]
+
+A1:
+ callbr void asm "", ""() to label %H1 []
+
+H1:
+ callbr void asm "", ""() to label %L1 []
+
+L1:
+ callbr void asm "", "r,!i"(i1 %Pred1) to label %H1 [label %A3]
+
+A3:
+ callbr void asm "", "r,!i"(i1 %Pred2) to label %A2 [label %exit]
+
+A2:
+ callbr void asm "", ""() to label %A1 []
+
+exit:
+ ret void
+}
+
define void @loop_in_irr_shared_entry(i1 %Pred0, i1 %Pred1, i1 %Pred2) {
; CHECK-LABEL: @loop_in_irr_shared_entry(
; CHECK-NEXT: entry:
@@ -188,6 +379,54 @@ exit:
ret void
}
+define void @loop_in_irr_shared_entry_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2) {
+; CHECK-LABEL: @loop_in_irr_shared_entry_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]])
+; CHECK-NEXT: to label [[ENTRY_TARGET_H1:%.*]] [label %entry.target.A2]
+; CHECK: H1:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[L1:%.*]] []
+; CHECK: L1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]])
+; CHECK-NEXT: to label [[H1:%.*]] [label %A3]
+; CHECK: A3:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]])
+; CHECK-NEXT: to label [[A3_TARGET_A2:%.*]] [label %exit]
+; CHECK: A2:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[H1]] []
+; CHECK: exit:
+; CHECK-NEXT: ret void
+; CHECK: A3.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD:%.*]]
+; CHECK: entry.target.H1:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: entry.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: irr.guard:
+; CHECK-NEXT: [[GUARD_A2:%.*]] = phi i1 [ true, [[A3_TARGET_A2]] ], [ false, [[ENTRY_TARGET_H1]] ], [ true, [[ENTRY_TARGET_A2:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_A2]], label [[A2:%.*]], label [[H1]]
+;
+entry:
+ callbr void asm "", "r,!i"(i1 %Pred0) to label %H1 [label %A2]
+
+H1:
+ callbr void asm "", ""() to label %L1 []
+
+L1:
+ callbr void asm "", "r,!i"(i1 %Pred1) to label %H1 [label %A3]
+
+A3:
+ callbr void asm "", "r,!i"(i1 %Pred2) to label %A2 [label %exit]
+
+A2:
+ callbr void asm "", ""() to label %H1 []
+
+exit:
+ ret void
+}
+
define void @loop_in_irr_shared_header(i1 %Pred0, i1 %Pred1, i1 %Pred2) {
; CHECK-LABEL: @loop_in_irr_shared_header(
; CHECK-NEXT: entry:
@@ -226,6 +465,56 @@ exit:
ret void
}
+define void @loop_in_irr_shared_header_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2) {
+; CHECK-LABEL: @loop_in_irr_shared_header_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]])
+; CHECK-NEXT: to label [[ENTRY_TARGET_A2:%.*]] [label %entry.target.H1]
+; CHECK: H1:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[L1:%.*]] []
+; CHECK: L1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]])
+; CHECK-NEXT: to label [[L1_TARGET_H1:%.*]] [label %A3]
+; CHECK: A3:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]])
+; CHECK-NEXT: to label [[A2:%.*]] [label %exit]
+; CHECK: A2:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[A2_TARGET_H1:%.*]] []
+; CHECK: exit:
+; CHECK-NEXT: ret void
+; CHECK: A2.target.H1:
+; CHECK-NEXT: br label [[IRR_GUARD:%.*]]
+; CHECK: L1.target.H1:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: entry.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: entry.target.H1:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: irr.guard:
+; CHECK-NEXT: [[GUARD_H1:%.*]] = phi i1 [ true, [[A2_TARGET_H1]] ], [ true, [[L1_TARGET_H1]] ], [ false, [[ENTRY_TARGET_A2]] ], [ true, [[ENTRY_TARGET_H1:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_H1]], label [[H1:%.*]], label [[A2]]
+;
+entry:
+ callbr void asm "", "r,!i"(i1 %Pred0) to label %A2 [label %H1]
+
+H1:
+ callbr void asm "", ""() to label %L1 []
+
+L1:
+ callbr void asm "", "r,!i"(i1 %Pred1) to label %H1 [label %A3]
+
+A3:
+ callbr void asm "", "r,!i"(i1 %Pred2) to label %A2 [label %exit]
+
+A2:
+ callbr void asm "", ""() to label %H1 []
+
+exit:
+ ret void
+}
+
define void @loop_irr_loop_shared_header(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3) {
; CHECK-LABEL: @loop_irr_loop_shared_header(
; CHECK-NEXT: entry:
@@ -269,6 +558,62 @@ exit:
ret void
}
+define void @loop_irr_loop_shared_header_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3) {
+; CHECK-LABEL: @loop_irr_loop_shared_header_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[H2:%.*]] []
+; CHECK: H2:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]])
+; CHECK-NEXT: to label [[H2_TARGET_A2:%.*]] [label %H2.target.H1]
+; CHECK: H1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]])
+; CHECK-NEXT: to label [[A3:%.*]] [label %H1.target.H1]
+; CHECK: A3:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]])
+; CHECK-NEXT: to label [[A2:%.*]] [label %L2]
+; CHECK: A2:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[A2_TARGET_H1:%.*]] []
+; CHECK: L2:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED3:%.*]])
+; CHECK-NEXT: to label [[H2]] [label %exit]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+; CHECK: A2.target.H1:
+; CHECK-NEXT: br label [[IRR_GUARD:%.*]]
+; CHECK: H1.target.H1:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: H2.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: H2.target.H1:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: irr.guard:
+; CHECK-NEXT: [[GUARD_H1:%.*]] = phi i1 [ true, [[A2_TARGET_H1]] ], [ true, [[H1_TARGET_H1:%.*]] ], [ false, [[H2_TARGET_A2]] ], [ true, [[H2_TARGET_H1:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_H1]], label [[H1:%.*]], label [[A2]]
+;
+entry:
+ callbr void asm "", ""() to label %H2 []
+
+H2:
+ callbr void asm "", "r,!i"(i1 %Pred0) to label %A2 [label %H1]
+
+H1:
+ callbr void asm "", "r,!i"(i1 %Pred1) to label %A3 [label %H1]
+
+A3:
+ callbr void asm "", "r,!i"(i1 %Pred2) to label %A2 [label %L2]
+
+A2:
+ callbr void asm "", ""() to label %H1 []
+
+L2:
+ callbr void asm "", "r,!i"(i1 %Pred3) to label %H2 [label %exit]
+
+exit:
+ ret void
+}
+
define void @siblings_top_level(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6) {
; CHECK-LABEL: @siblings_top_level(
; CHECK-NEXT: entry:
@@ -336,6 +681,93 @@ exit:
ret void
}
+define void @siblings_top_level_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6) {
+; CHECK-LABEL: @siblings_top_level_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]])
+; CHECK-NEXT: to label [[H1:%.*]] [label %fork1]
+; CHECK: H1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]])
+; CHECK-NEXT: to label [[H1_TARGET_A1:%.*]] [label %H1.target.A2]
+; CHECK: A1:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[A1_TARGET_A2:%.*]] []
+; CHECK: A2:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]])
+; CHECK-NEXT: to label [[A1:%.*]] [label %L1]
+; CHECK: L1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED3:%.*]])
+; CHECK-NEXT: to label [[H1]] [label %exit]
+; CHECK: fork1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED4:%.*]])
+; CHECK-NEXT: to label [[FORK1_TARGET_B1:%.*]] [label %fork1.target.B2]
+; CHECK: B1:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[H2:%.*]] []
+; CHECK: H2:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[L2:%.*]] []
+; CHECK: L2:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED5:%.*]])
+; CHECK-NEXT: to label [[H2]] [label %L2.target.B2]
+; CHECK: B2:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED6:%.*]])
+; CHECK-NEXT: to label [[B1:%.*]] [label %exit]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+; CHECK: A1.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD:%.*]]
+; CHECK: H1.target.A1:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: H1.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: irr.guard:
+; CHECK-NEXT: [[GUARD_A2:%.*]] = phi i1 [ true, [[A1_TARGET_A2]] ], [ false, [[H1_TARGET_A1]] ], [ true, [[H1_TARGET_A2:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_A2]], label [[A2:%.*]], label [[A1]]
+; CHECK: L2.target.B2:
+; CHECK-NEXT: br label [[IRR_GUARD1:%.*]]
+; CHECK: fork1.target.B1:
+; CHECK-NEXT: br label [[IRR_GUARD1]]
+; CHECK: fork1.target.B2:
+; CHECK-NEXT: br label [[IRR_GUARD1]]
+; CHECK: irr.guard1:
+; CHECK-NEXT: [[GUARD_B2:%.*]] = phi i1 [ true, [[L2_TARGET_B2:%.*]] ], [ false, [[FORK1_TARGET_B1]] ], [ true, [[FORK1_TARGET_B2:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_B2]], label [[B2:%.*]], label [[B1]]
+;
+entry:
+ callbr void asm "", "r,!i"(i1 %Pred0) to label %H1 [label %fork1]
+
+H1:
+ callbr void asm "", "r,!i"(i1 %Pred1) to label %A1 [label %A2]
+
+A1:
+ callbr void asm "", ""() to label %A2 []
+
+A2:
+ callbr void asm "", "r,!i"(i1 %Pred2) to label %A1 [label %L1]
+
+L1:
+ callbr void asm "", "r,!i"(i1 %Pred3) to label %H1 [label %exit]
+
+fork1:
+ callbr void asm "", "r,!i"(i1 %Pred4) to label %B1 [label %B2]
+
+B1:
+ callbr void asm "", ""() to label %H2 []
+
+H2:
+ callbr void asm "", ""() to label %L2 []
+
+L2:
+ callbr void asm "", "r,!i"(i1 %Pred5) to label %H2 [label %B2]
+
+B2:
+ callbr void asm "", "r,!i"(i1 %Pred6) to label %B1 [label %exit]
+
+exit:
+ ret void
+}
+
define void @siblings_in_loop(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6, i1 %Pred7) {
; CHECK-LABEL: @siblings_in_loop(
; CHECK-NEXT: entry:
@@ -413,6 +845,105 @@ exit:
ret void
}
+define void @siblings_in_loop_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6, i1 %Pred7) {
+; CHECK-LABEL: @siblings_in_loop_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[H0:%.*]] []
+; CHECK: H0:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]])
+; CHECK-NEXT: to label [[H1:%.*]] [label %fork1]
+; CHECK: H1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]])
+; CHECK-NEXT: to label [[H1_TARGET_A1:%.*]] [label %H1.target.A2]
+; CHECK: A1:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[A1_TARGET_A2:%.*]] []
+; CHECK: A2:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]])
+; CHECK-NEXT: to label [[A1:%.*]] [label %L1]
+; CHECK: L1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED3:%.*]])
+; CHECK-NEXT: to label [[H1]] [label %L0]
+; CHECK: fork1:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED4:%.*]])
+; CHECK-NEXT: to label [[FORK1_TARGET_B1:%.*]] [label %fork1.target.B2]
+; CHECK: B1:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[H2:%.*]] []
+; CHECK: H2:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[L2:%.*]] []
+; CHECK: L2:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED5:%.*]])
+; CHECK-NEXT: to label [[H2]] [label %L2.target.B2]
+; CHECK: B2:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED6:%.*]])
+; CHECK-NEXT: to label [[B1:%.*]] [label %L0]
+; CHECK: L0:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED7:%.*]])
+; CHECK-NEXT: to label [[EXIT:%.*]] [label %H0]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+; CHECK: A1.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD:%.*]]
+; CHECK: H1.target.A1:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: H1.target.A2:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: irr.guard:
+; CHECK-NEXT: [[GUARD_A2:%.*]] = phi i1 [ true, [[A1_TARGET_A2]] ], [ false, [[H1_TARGET_A1]] ], [ true, [[H1_TARGET_A2:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_A2]], label [[A2:%.*]], label [[A1]]
+; CHECK: L2.target.B2:
+; CHECK-NEXT: br label [[IRR_GUARD1:%.*]]
+; CHECK: fork1.target.B1:
+; CHECK-NEXT: br label [[IRR_GUARD1]]
+; CHECK: fork1.target.B2:
+; CHECK-NEXT: br label [[IRR_GUARD1]]
+; CHECK: irr.guard1:
+; CHECK-NEXT: [[GUARD_B2:%.*]] = phi i1 [ true, [[L2_TARGET_B2:%.*]] ], [ false, [[FORK1_TARGET_B1]] ], [ true, [[FORK1_TARGET_B2:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_B2]], label [[B2:%.*]], label [[B1]]
+;
+entry:
+ callbr void asm "", ""() to label %H0 []
+
+H0:
+ callbr void asm "", "r,!i"(i1 %Pred0) to label %H1 [label %fork1]
+
+H1:
+ callbr void asm "", "r,!i"(i1 %Pred1) to label %A1 [label %A2]
+
+A1:
+ callbr void asm "", ""() to label %A2 []
+
+A2:
+ callbr void asm "", "r,!i"(i1 %Pred2) to label %A1 [label %L1]
+
+L1:
+ callbr void asm "", "r,!i"(i1 %Pred3) to label %H1 [label %L0]
+
+fork1:
+ callbr void asm "", "r,!i"(i1 %Pred4) to label %B1 [label %B2]
+
+B1:
+ callbr void asm "", ""() to label %H2 []
+
+H2:
+ callbr void asm "", ""() to label %L2 []
+
+L2:
+ callbr void asm "", "r,!i"(i1 %Pred5) to label %H2 [label %B2]
+
+B2:
+ callbr void asm "", "r,!i"(i1 %Pred6) to label %B1 [label %L0]
+
+L0:
+ callbr void asm "", "r,!i"(i1 %Pred7) to label %exit [label %H0]
+
+exit:
+ ret void
+}
+
define void @irr_in_irr_shared_entry(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6, i1 %Pred7, i1 %Pred8, i1 %Pred9, i1 %Pred10, i1 %Pred11, i1 %Pred12, i1 %Pred13) {
; CHECK-LABEL: @irr_in_irr_shared_entry(
; CHECK-NEXT: entry:
@@ -527,3 +1058,148 @@ if.end8.i:
exit:
ret void
}
+
+define void @irr_in_irr_shared_entry_callbr(i1 %Pred0, i1 %Pred1, i1 %Pred2, i1 %Pred3, i1 %Pred4, i1 %Pred5, i1 %Pred6, i1 %Pred7, i1 %Pred8, i1 %Pred9, i1 %Pred10, i1 %Pred11, i1 %Pred12, i1 %Pred13) {
+; CHECK-LABEL: @irr_in_irr_shared_entry_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED0:%.*]])
+; CHECK-NEXT: to label [[IF_END:%.*]] [label %if.then]
+; CHECK: if.end:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED1:%.*]])
+; CHECK-NEXT: to label [[IF_THEN7:%.*]] [label %if.else]
+; CHECK: if.then7:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[IF_END16:%.*]] []
+; CHECK: if.else:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[IF_END16]] []
+; CHECK: if.end16:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED2:%.*]])
+; CHECK-NEXT: to label [[WHILE_COND_PREHEADER:%.*]] [label %if.then39]
+; CHECK: while.cond.preheader:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[WHILE_COND:%.*]] []
+; CHECK: while.cond:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED3:%.*]])
+; CHECK-NEXT: to label [[WHILE_COND_TARGET_COND_TRUE49:%.*]] [label %lor.rhs]
+; CHECK: cond.true49:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED4:%.*]])
+; CHECK-NEXT: to label [[IF_THEN69:%.*]] [label %cond.true49.target.while.body63]
+; CHECK: while.body63:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED5:%.*]])
+; CHECK-NEXT: to label [[EXIT:%.*]] [label %while.cond47]
+; CHECK: while.cond47:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED6:%.*]])
+; CHECK-NEXT: to label [[COND_TRUE49:%.*]] [label %while.cond47.target.cond.end61]
+; CHECK: cond.end61:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED7:%.*]])
+; CHECK-NEXT: to label [[COND_END61_TARGET_WHILE_BODY63:%.*]] [label %while.cond]
+; CHECK: if.then69:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED8:%.*]])
+; CHECK-NEXT: to label [[EXIT]] [label %while.cond]
+; CHECK: lor.rhs:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED9:%.*]])
+; CHECK-NEXT: to label [[LOR_RHS_TARGET_COND_END61:%.*]] [label %while.end76]
+; CHECK: while.end76:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[EXIT]] []
+; CHECK: if.then39:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED10:%.*]])
+; CHECK-NEXT: to label [[EXIT]] [label %if.end.i145]
+; CHECK: if.end.i145:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED11:%.*]])
+; CHECK-NEXT: to label [[EXIT]] [label %if.end8.i149]
+; CHECK: if.end8.i149:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[EXIT]] []
+; CHECK: if.then:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED12:%.*]])
+; CHECK-NEXT: to label [[EXIT]] [label %if.end.i]
+; CHECK: if.end.i:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PRED13:%.*]])
+; CHECK-NEXT: to label [[EXIT]] [label %if.end8.i]
+; CHECK: if.end8.i:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[EXIT]] []
+; CHECK: exit:
+; CHECK-NEXT: ret void
+; CHECK: while.cond47.target.cond.end61:
+; CHECK-NEXT: br label [[IRR_GUARD:%.*]]
+; CHECK: lor.rhs.target.cond.end61:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: while.cond.target.cond.true49:
+; CHECK-NEXT: br label [[IRR_GUARD]]
+; CHECK: irr.guard:
+; CHECK-NEXT: [[GUARD_COND_END61:%.*]] = phi i1 [ true, [[WHILE_COND47_TARGET_COND_END61:%.*]] ], [ true, [[LOR_RHS_TARGET_COND_END61]] ], [ false, [[WHILE_COND_TARGET_COND_TRUE49]] ]
+; CHECK-NEXT: br i1 [[GUARD_COND_END61]], label [[COND_END61:%.*]], label [[IRR_GUARD1:%.*]]
+; CHECK: cond.true49.target.while.body63:
+; CHECK-NEXT: br label [[IRR_GUARD1]]
+; CHECK: cond.end61.target.while.body63:
+; CHECK-NEXT: br label [[IRR_GUARD1]]
+; CHECK: irr.guard1:
+; CHECK-NEXT: [[GUARD_WHILE_BODY63:%.*]] = phi i1 [ true, [[COND_TRUE49_TARGET_WHILE_BODY63:%.*]] ], [ true, [[COND_END61_TARGET_WHILE_BODY63]] ], [ false, [[IRR_GUARD]] ]
+; CHECK-NEXT: br i1 [[GUARD_WHILE_BODY63]], label [[WHILE_BODY63:%.*]], label [[COND_TRUE49]]
+;
+entry:
+ callbr void asm "", "r,!i"(i1 %Pred0) to label %if.end [label %if.then]
+
+if.end:
+ callbr void asm "", "r,!i"(i1 %Pred1) to label %if.then7 [label %if.else]
+
+if.then7:
+ callbr void asm "", ""() to label %if.end16 []
+
+if.else:
+ callbr void asm "", ""() to label %if.end16 []
+
+if.end16:
+ callbr void asm "", "r,!i"(i1 %Pred2) to label %while.cond.preheader [label %if.then39]
+
+while.cond.preheader:
+ callbr void asm "", ""() to label %while.cond []
+
+while.cond:
+ callbr void asm "", "r,!i"(i1 %Pred3) to label %cond.true49 [label %lor.rhs]
+
+cond.true49:
+ callbr void asm "", "r,!i"(i1 %Pred4) to label %if.then69 [label %while.body63]
+
+while.body63:
+ callbr void asm "", "r,!i"(i1 %Pred5) to label %exit [label %while.cond47]
+
+while.cond47:
+ callbr void asm "", "r,!i"(i1 %Pred6) to label %cond.true49 [label %cond.end61]
+
+cond.end61:
+ callbr void asm "", "r,!i"(i1 %Pred7) to label %while.body63 [label %while.cond]
+
+if.then69:
+ callbr void asm "", "r,!i"(i1 %Pred8) to label %exit [label %while.cond]
+
+lor.rhs:
+ callbr void asm "", "r,!i"(i1 %Pred9) to label %cond.end61 [label %while.end76]
+
+while.end76:
+ callbr void asm "", ""() to label %exit []
+
+if.then39:
+ callbr void asm "", "r,!i"(i1 %Pred10) to label %exit [label %if.end.i145]
+
+if.end.i145:
+ callbr void asm "", "r,!i"(i1 %Pred11) to label %exit [label %if.end8.i149]
+
+if.end8.i149:
+ callbr void asm "", ""() to label %exit []
+
+if.then:
+ callbr void asm "", "r,!i"(i1 %Pred12) to label %exit [label %if.end.i]
+
+if.end.i:
+ callbr void asm "", "r,!i"(i1 %Pred13) to label %exit [label %if.end8.i]
+
+if.end8.i:
+ callbr void asm "", ""() to label %exit []
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/FixIrreducible/unreachable.ll b/llvm/test/Transforms/FixIrreducible/unreachable.ll
index defbefb..845cf50 100644
--- a/llvm/test/Transforms/FixIrreducible/unreachable.ll
+++ b/llvm/test/Transforms/FixIrreducible/unreachable.ll
@@ -25,3 +25,26 @@ loop.latch:
loop.exit:
ret void
}
+
+; CHECK-LABEL: @unreachable_callbr(
+; CHECK: entry:
+; CHECK-NOT: irr.guard:
+define void @unreachable_callbr(i32 %n, i1 %arg) {
+entry:
+ callbr void asm "", ""() to label %loop.body []
+
+loop.body:
+ callbr void asm "", ""() to label %inner.block []
+
+unreachable.block:
+ callbr void asm "", ""() to label %inner.block []
+
+inner.block:
+ callbr void asm "", "r,!i"(i1 %arg) to label %loop.exit [label %loop.latch]
+
+loop.latch:
+ callbr void asm "", ""() to label %loop.body []
+
+loop.exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/GVN/assume-equal.ll b/llvm/test/Transforms/GVN/assume-equal.ll
index bbbc5c5..a389801 100644
--- a/llvm/test/Transforms/GVN/assume-equal.ll
+++ b/llvm/test/Transforms/GVN/assume-equal.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -passes=gvn -S | FileCheck %s
+target datalayout = "p1:64:64:64:32"
+
%struct.A = type { ptr }
@_ZTV1A = available_externally unnamed_addr constant [4 x ptr] [ptr null, ptr @_ZTI1A, ptr @_ZN1A3fooEv, ptr @_ZN1A3barEv], align 8
@_ZTI1A = external constant ptr
@@ -372,6 +374,20 @@ define i1 @assume_ptr_eq_different_prov_does_not_matter_icmp(ptr %p, ptr %p2) {
ret i1 %c
}
+define i1 @assume_ptr_eq_different_prov_does_not_matter_icmp_addrsize(ptr addrspace(1) %p, ptr addrspace(1) %p2) {
+; CHECK-LABEL: define i1 @assume_ptr_eq_different_prov_does_not_matter_icmp_addrsize(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]], ptr addrspace(1) [[P2:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[P]], [[P2]]
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[C:%.*]] = icmp eq ptr addrspace(1) [[P]], null
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %cmp = icmp eq ptr addrspace(1) %p, %p2
+ call void @llvm.assume(i1 %cmp)
+ %c = icmp eq ptr addrspace(1) %p2, null
+ ret i1 %c
+}
+
; This is not correct, as it may change the provenance exposed by ptrtoint.
; We still allow it for now.
define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoint(ptr %p, ptr %p2) {
@@ -388,6 +404,20 @@ define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoint(ptr %p, ptr %p
ret i64 %int
}
+define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoint_addrsize(ptr addrspace(1) %p, ptr addrspace(1) %p2) {
+; CHECK-LABEL: define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoint_addrsize(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]], ptr addrspace(1) [[P2:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[P]], [[P2]]
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[INT:%.*]] = ptrtoint ptr addrspace(1) [[P]] to i64
+; CHECK-NEXT: ret i64 [[INT]]
+;
+ %cmp = icmp eq ptr addrspace(1) %p, %p2
+ call void @llvm.assume(i1 %cmp)
+ %int = ptrtoint ptr addrspace(1) %p2 to i64
+ ret i64 %int
+}
+
define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoaddr(ptr %p, ptr %p2) {
; CHECK-LABEL: define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoaddr(
; CHECK-SAME: ptr [[P:%.*]], ptr [[P2:%.*]]) {
@@ -402,6 +432,20 @@ define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoaddr(ptr %p, ptr %
ret i64 %int
}
+define i32 @assume_ptr_eq_different_prov_does_not_matter_ptrtoaddr_addrsize(ptr addrspace(1) %p, ptr addrspace(1) %p2) {
+; CHECK-LABEL: define i32 @assume_ptr_eq_different_prov_does_not_matter_ptrtoaddr_addrsize(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]], ptr addrspace(1) [[P2:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(1) [[P]], [[P2]]
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[INT:%.*]] = ptrtoaddr ptr addrspace(1) [[P]] to i32
+; CHECK-NEXT: ret i32 [[INT]]
+;
+ %cmp = icmp eq ptr addrspace(1) %p, %p2
+ call void @llvm.assume(i1 %cmp)
+ %int = ptrtoaddr ptr addrspace(1) %p2 to i32
+ ret i32 %int
+}
+
define i8 @assume_ptr_eq_same_prov(ptr %p, i64 %x) {
; CHECK-LABEL: define i8 @assume_ptr_eq_same_prov(
; CHECK-SAME: ptr [[P:%.*]], i64 [[X:%.*]]) {
diff --git a/llvm/test/Transforms/IndVarSimplify/AMDGPU/addrspace-7-doesnt-crash.ll b/llvm/test/Transforms/IndVarSimplify/AMDGPU/addrspace-7-doesnt-crash.ll
index 08dcf1d..8e932e0 100644
--- a/llvm/test/Transforms/IndVarSimplify/AMDGPU/addrspace-7-doesnt-crash.ll
+++ b/llvm/test/Transforms/IndVarSimplify/AMDGPU/addrspace-7-doesnt-crash.ll
@@ -7,11 +7,11 @@ define void @f(ptr addrspace(7) %arg) {
; CHECK-LABEL: define void @f
; CHECK-SAME: (ptr addrspace(7) [[ARG:%.*]]) {
; CHECK-NEXT: bb:
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr addrspace(7) [[ARG]], i32 8
; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK: bb1:
; CHECK-NEXT: br i1 false, label [[BB2:%.*]], label [[BB1]]
; CHECK: bb2:
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr addrspace(7) [[ARG]], i32 8
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb3:
; CHECK-NEXT: [[I4:%.*]] = load i32, ptr addrspace(7) [[SCEVGEP]], align 4
diff --git a/llvm/test/Transforms/IndVarSimplify/ARM/code-size.ll b/llvm/test/Transforms/IndVarSimplify/ARM/code-size.ll
index 2003b1a..3c6535d 100644
--- a/llvm/test/Transforms/IndVarSimplify/ARM/code-size.ll
+++ b/llvm/test/Transforms/IndVarSimplify/ARM/code-size.ll
@@ -4,33 +4,31 @@
define i32 @remove_loop(i32 %size) #0 {
; CHECK-V8M-LABEL: @remove_loop(
-; CHECK-V8M-SAME: i32 [[SIZE:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-V8M-NEXT: entry:
-; CHECK-V8M-NEXT: br label %[[WHILE_COND:.*]]
-; CHECK-V8M: while.cond:
-; CHECK-V8M-NEXT: br i1 false, label %[[WHILE_COND]], label %[[WHILE_END:.*]]
-; CHECK-V8M: while.end:
-; CHECK-V8M-NEXT: [[TMP0:%.*]] = add i32 [[SIZE]], 31
+; CHECK-V8M-NEXT: [[TMP0:%.*]] = add i32 [[SIZE:%.*]], 31
; CHECK-V8M-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SIZE]], i32 31)
; CHECK-V8M-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[UMIN]]
; CHECK-V8M-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 5
; CHECK-V8M-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 5
; CHECK-V8M-NEXT: [[TMP4:%.*]] = sub i32 [[SIZE]], [[TMP3]]
+; CHECK-V8M-NEXT: br label [[WHILE_COND:%.*]]
+; CHECK-V8M: while.cond:
+; CHECK-V8M-NEXT: br i1 false, label [[WHILE_COND]], label [[WHILE_END:%.*]]
+; CHECK-V8M: while.end:
; CHECK-V8M-NEXT: ret i32 [[TMP4]]
;
; CHECK-V8A-LABEL: @remove_loop(
-; CHECK-V8A-SAME: i32 [[SIZE:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-V8A-NEXT: entry:
-; CHECK-V8A-NEXT: br label %[[WHILE_COND:.*]]
-; CHECK-V8A: while.cond:
-; CHECK-V8A-NEXT: br i1 false, label %[[WHILE_COND]], label %[[WHILE_END:.*]]
-; CHECK-V8A: while.end:
-; CHECK-V8A-NEXT: [[TMP0:%.*]] = add i32 [[SIZE]], 31
+; CHECK-V8A-NEXT: [[TMP0:%.*]] = add i32 [[SIZE:%.*]], 31
; CHECK-V8A-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SIZE]], i32 31)
; CHECK-V8A-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[UMIN]]
; CHECK-V8A-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 5
; CHECK-V8A-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 5
; CHECK-V8A-NEXT: [[TMP4:%.*]] = sub i32 [[SIZE]], [[TMP3]]
+; CHECK-V8A-NEXT: br label [[WHILE_COND:%.*]]
+; CHECK-V8A: while.cond:
+; CHECK-V8A-NEXT: br i1 false, label [[WHILE_COND]], label [[WHILE_END:%.*]]
+; CHECK-V8A: while.end:
; CHECK-V8A-NEXT: ret i32 [[TMP4]]
;
entry:
diff --git a/llvm/test/Transforms/IndVarSimplify/ARM/indvar-unroll-imm-cost.ll b/llvm/test/Transforms/IndVarSimplify/ARM/indvar-unroll-imm-cost.ll
index 2261423..382f026 100644
--- a/llvm/test/Transforms/IndVarSimplify/ARM/indvar-unroll-imm-cost.ll
+++ b/llvm/test/Transforms/IndVarSimplify/ARM/indvar-unroll-imm-cost.ll
@@ -77,6 +77,8 @@ define dso_local arm_aapcscc void @test(ptr nocapture %pDest, ptr nocapture read
; CHECK-NEXT: [[CMP2780:%.*]] = icmp ugt i32 [[ADD25]], [[J_0_LCSSA]]
; CHECK-NEXT: br i1 [[CMP2780]], label [[FOR_BODY29_PREHEADER:%.*]], label [[FOR_END40]]
; CHECK: for.body29.preheader:
+; CHECK-NEXT: [[TMP10:%.*]] = sub nsw i32 [[ADD25]], [[J_0_LCSSA]]
+; CHECK-NEXT: [[SCEVGEP93:%.*]] = getelementptr i16, ptr [[PSRCB_ADDR_1_LCSSA]], i32 [[TMP10]]
; CHECK-NEXT: br label [[FOR_BODY29:%.*]]
; CHECK: for.body29:
; CHECK-NEXT: [[J_184:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY29]] ], [ [[J_0_LCSSA]], [[FOR_BODY29_PREHEADER]] ]
@@ -100,8 +102,6 @@ define dso_local arm_aapcscc void @test(ptr nocapture %pDest, ptr nocapture read
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[ADD25]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END40_LOOPEXIT:%.*]], label [[FOR_BODY29]]
; CHECK: for.end40.loopexit:
-; CHECK-NEXT: [[TMP10:%.*]] = sub nsw i32 [[ADD25]], [[J_0_LCSSA]]
-; CHECK-NEXT: [[SCEVGEP93:%.*]] = getelementptr i16, ptr [[PSRCB_ADDR_1_LCSSA]], i32 [[TMP10]]
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, ptr [[PSRCA_ADDR_1_LCSSA]], i32 [[TMP10]]
; CHECK-NEXT: [[SCEVGEP94:%.*]] = getelementptr i32, ptr [[PDEST_ADDR_1_LCSSA]], i32 [[TMP10]]
; CHECK-NEXT: br label [[FOR_END40]]
diff --git a/llvm/test/Transforms/IndVarSimplify/X86/inner-loop-by-latch-cond.ll b/llvm/test/Transforms/IndVarSimplify/X86/inner-loop-by-latch-cond.ll
index 0fa6e34..0eb9deb 100644
--- a/llvm/test/Transforms/IndVarSimplify/X86/inner-loop-by-latch-cond.ll
+++ b/llvm/test/Transforms/IndVarSimplify/X86/inner-loop-by-latch-cond.ll
@@ -14,6 +14,7 @@ define void @test(i64 %a) {
; CHECK: outer_header:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[OUTER_LATCH:%.*]] ], [ 21, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[I:%.*]] = phi i64 [ 20, [[ENTRY]] ], [ [[I_NEXT:%.*]], [[OUTER_LATCH]] ]
+; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; CHECK-NEXT: br label [[INNER_HEADER:%.*]]
; CHECK: inner_header:
; CHECK-NEXT: [[J:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ [[J_NEXT:%.*]], [[INNER_HEADER]] ]
@@ -22,7 +23,6 @@ define void @test(i64 %a) {
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[J_NEXT]], [[INDVARS_IV]]
; CHECK-NEXT: br i1 [[EXITCOND]], label [[INNER_HEADER]], label [[OUTER_LATCH]]
; CHECK: outer_latch:
-; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
; CHECK-NEXT: [[COND2:%.*]] = icmp ne i64 [[I_NEXT]], 40
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: br i1 [[COND2]], label [[OUTER_HEADER]], label [[RETURN:%.*]]
diff --git a/llvm/test/Transforms/IndVarSimplify/exit-count-select.ll b/llvm/test/Transforms/IndVarSimplify/exit-count-select.ll
index 1592b84..829092f 100644
--- a/llvm/test/Transforms/IndVarSimplify/exit-count-select.ll
+++ b/llvm/test/Transforms/IndVarSimplify/exit-count-select.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=indvars -S | FileCheck %s
+; RUN: opt < %s -passes='require<scalar-evolution>,indvars,loop-mssa(licm)' -S | FileCheck %s
define i32 @logical_and_2ops(i32 %n, i32 %m) {
; CHECK-LABEL: @logical_and_2ops(
@@ -56,10 +56,10 @@ define i32 @logical_and_3ops(i32 %n, i32 %m, i32 %k) {
; CHECK: loop:
; CHECK-NEXT: br i1 false, label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
-; CHECK-NEXT: [[TMP0:%.*]] = freeze i32 [[K:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[M:%.*]]
-; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP0]], i32 [[TMP1]])
-; CHECK-NEXT: [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[UMIN]], i32 [[N:%.*]])
+; CHECK-NEXT: [[N:%.*]] = freeze i32 [[K:%.*]]
+; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP1]], i32 [[N]])
+; CHECK-NEXT: [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[UMIN]], i32 [[N1:%.*]])
; CHECK-NEXT: ret i32 [[UMIN1]]
;
entry:
@@ -84,10 +84,10 @@ define i32 @logical_or_3ops(i32 %n, i32 %m, i32 %k) {
; CHECK: loop:
; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[TMP0:%.*]] = freeze i32 [[K:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[M:%.*]]
-; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP0]], i32 [[TMP1]])
-; CHECK-NEXT: [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[UMIN]], i32 [[N:%.*]])
+; CHECK-NEXT: [[N:%.*]] = freeze i32 [[K:%.*]]
+; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP1]], i32 [[N]])
+; CHECK-NEXT: [[UMIN1:%.*]] = call i32 @llvm.umin.i32(i32 [[UMIN]], i32 [[N1:%.*]])
; CHECK-NEXT: ret i32 [[UMIN1]]
;
entry:
diff --git a/llvm/test/Transforms/IndVarSimplify/finite-exit-comparisons.ll b/llvm/test/Transforms/IndVarSimplify/finite-exit-comparisons.ll
index e006d9f..f798eb28 100644
--- a/llvm/test/Transforms/IndVarSimplify/finite-exit-comparisons.ll
+++ b/llvm/test/Transforms/IndVarSimplify/finite-exit-comparisons.ll
@@ -932,6 +932,9 @@ for.end: ; preds = %for.body, %entry
define i16 @ult_multiuse_profit(i16 %n.raw, i8 %start) mustprogress {
; CHECK-LABEL: @ult_multiuse_profit(
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[START:%.*]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[TMP2]] to i16
+; CHECK-NEXT: [[UMAX:%.*]] = call i16 @llvm.umax.i16(i16 [[TMP1]], i16 254)
; CHECK-NEXT: [[TMP0:%.*]] = trunc i16 254 to i8
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
@@ -940,9 +943,6 @@ define i16 @ult_multiuse_profit(i16 %n.raw, i8 %start) mustprogress {
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[TMP0]]
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
; CHECK: for.end:
-; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[START:%.*]], 1
-; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP1]] to i16
-; CHECK-NEXT: [[UMAX:%.*]] = call i16 @llvm.umax.i16(i16 [[TMP2]], i16 254)
; CHECK-NEXT: ret i16 [[UMAX]]
;
entry:
diff --git a/llvm/test/Transforms/IndVarSimplify/pr116483.ll b/llvm/test/Transforms/IndVarSimplify/pr116483.ll
index 093e25a..e9e0d22 100644
--- a/llvm/test/Transforms/IndVarSimplify/pr116483.ll
+++ b/llvm/test/Transforms/IndVarSimplify/pr116483.ll
@@ -4,16 +4,16 @@
define i32 @test() {
; CHECK-LABEL: define i32 @test() {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: br label %[[LOOP_BODY:.*]]
-; CHECK: [[LOOP_BODY]]:
-; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[LOOP_BODY]]
-; CHECK: [[EXIT]]:
; CHECK-NEXT: [[XOR:%.*]] = xor i32 0, 3
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[XOR]], 329
; CHECK-NEXT: [[CONV:%.*]] = trunc i32 [[MUL]] to i16
; CHECK-NEXT: [[SEXT:%.*]] = shl i16 [[CONV]], 8
; CHECK-NEXT: [[CONV1:%.*]] = ashr i16 [[SEXT]], 8
; CHECK-NEXT: [[CONV3:%.*]] = zext i16 [[CONV1]] to i32
+; CHECK-NEXT: br label %[[LOOP_BODY:.*]]
+; CHECK: [[LOOP_BODY]]:
+; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[LOOP_BODY]]
+; CHECK: [[EXIT]]:
; CHECK-NEXT: ret i32 [[CONV3]]
;
entry:
diff --git a/llvm/test/Transforms/IndVarSimplify/pr24783.ll b/llvm/test/Transforms/IndVarSimplify/pr24783.ll
index c521bca..37ecf42 100644
--- a/llvm/test/Transforms/IndVarSimplify/pr24783.ll
+++ b/llvm/test/Transforms/IndVarSimplify/pr24783.ll
@@ -7,11 +7,11 @@ target triple = "powerpc64-unknown-linux-gnu"
define void @f(ptr %end.s, ptr %loc, i32 %p) {
; CHECK-LABEL: @f(
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i32, ptr [[END_S:%.*]], i32 [[P:%.*]]
; CHECK-NEXT: br label [[WHILE_BODY_I:%.*]]
; CHECK: while.body.i:
; CHECK-NEXT: br i1 true, label [[LOOP_EXIT:%.*]], label [[WHILE_BODY_I]]
; CHECK: loop.exit:
-; CHECK-NEXT: [[END:%.*]] = getelementptr inbounds i32, ptr [[END_S:%.*]], i32 [[P:%.*]]
; CHECK-NEXT: store ptr [[END]], ptr [[LOC:%.*]], align 8
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/IndVarSimplify/pr39673.ll b/llvm/test/Transforms/IndVarSimplify/pr39673.ll
index 7b093b3..3cee1ab 100644
--- a/llvm/test/Transforms/IndVarSimplify/pr39673.ll
+++ b/llvm/test/Transforms/IndVarSimplify/pr39673.ll
@@ -148,6 +148,7 @@ loop2.end: ; preds = %loop2
define i16 @neg_loop_carried(i16 %arg) {
; CHECK-LABEL: @neg_loop_carried(
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[ARG:%.*]], 2
; CHECK-NEXT: br label [[LOOP1:%.*]]
; CHECK: loop1:
; CHECK-NEXT: [[L1:%.*]] = phi i16 [ 0, [[ENTRY:%.*]] ], [ [[L1_ADD:%.*]], [[LOOP1]] ]
@@ -155,7 +156,6 @@ define i16 @neg_loop_carried(i16 %arg) {
; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i16 [[L1_ADD]], 2
; CHECK-NEXT: br i1 [[CMP1]], label [[LOOP1]], label [[LOOP2_PREHEADER:%.*]]
; CHECK: loop2.preheader:
-; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[ARG:%.*]], 2
; CHECK-NEXT: br label [[LOOP2:%.*]]
; CHECK: loop2:
; CHECK-NEXT: [[K2:%.*]] = phi i16 [ [[K2_ADD:%.*]], [[LOOP2]] ], [ [[TMP0]], [[LOOP2_PREHEADER]] ]
diff --git a/llvm/test/Transforms/IndVarSimplify/pr63763.ll b/llvm/test/Transforms/IndVarSimplify/pr63763.ll
index 427db1e..a5fde67 100644
--- a/llvm/test/Transforms/IndVarSimplify/pr63763.ll
+++ b/llvm/test/Transforms/IndVarSimplify/pr63763.ll
@@ -16,13 +16,13 @@ define i32 @test(i1 %c) {
; CHECK-NEXT: [[CONV2:%.*]] = ashr exact i32 [[SEXT]], 24
; CHECK-NEXT: [[INVARIANT_OP:%.*]] = sub nsw i32 7, [[CONV2]]
; CHECK-NEXT: call void @use(i32 [[INVARIANT_OP]])
+; CHECK-NEXT: [[SEXT_US:%.*]] = shl i32 [[SEL]], 24
+; CHECK-NEXT: [[CONV2_US:%.*]] = ashr exact i32 [[SEXT_US]], 24
+; CHECK-NEXT: [[INVARIANT_OP_US:%.*]] = sub nsw i32 7, [[CONV2_US]]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[SEXT_US:%.*]] = shl i32 [[SEL]], 24
-; CHECK-NEXT: [[CONV2_US:%.*]] = ashr exact i32 [[SEXT_US]], 24
-; CHECK-NEXT: [[INVARIANT_OP_US:%.*]] = sub nsw i32 7, [[CONV2_US]]
; CHECK-NEXT: ret i32 [[INVARIANT_OP_US]]
;
entry:
diff --git a/llvm/test/Transforms/IndVarSimplify/replace-loop-exit-folds.ll b/llvm/test/Transforms/IndVarSimplify/replace-loop-exit-folds.ll
index b3162de..7cdc98a 100644
--- a/llvm/test/Transforms/IndVarSimplify/replace-loop-exit-folds.ll
+++ b/llvm/test/Transforms/IndVarSimplify/replace-loop-exit-folds.ll
@@ -4,22 +4,21 @@
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
define i32 @remove_loop(i32 %size) {
-; CHECK-LABEL: define i32 @remove_loop(
-; CHECK-SAME: i32 [[SIZE:%.*]]) {
-; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: br label %[[WHILE_COND:.*]]
-; CHECK: [[WHILE_COND]]:
-; CHECK-NEXT: [[SIZE_ADDR_0:%.*]] = phi i32 [ [[SIZE]], %[[ENTRY]] ], [ [[SUB:%.*]], %[[WHILE_COND]] ]
-; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[SIZE_ADDR_0]], 31
-; CHECK-NEXT: [[SUB]] = add i32 [[SIZE_ADDR_0]], -32
-; CHECK-NEXT: br i1 [[CMP]], label %[[WHILE_COND]], label %[[WHILE_END:.*]]
-; CHECK: [[WHILE_END]]:
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[SIZE]], 31
+; CHECK-LABEL: @remove_loop(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[SIZE:%.*]], 31
; CHECK-NEXT: [[UMIN:%.*]] = call i32 @llvm.umin.i32(i32 [[SIZE]], i32 31)
; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[UMIN]]
; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 5
; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i32 [[TMP2]], 5
; CHECK-NEXT: [[TMP4:%.*]] = sub i32 [[SIZE]], [[TMP3]]
+; CHECK-NEXT: br label [[WHILE_COND:%.*]]
+; CHECK: while.cond:
+; CHECK-NEXT: [[SIZE_ADDR_0:%.*]] = phi i32 [ [[SIZE]], [[ENTRY:%.*]] ], [ [[SUB:%.*]], [[WHILE_COND]] ]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[SIZE_ADDR_0]], 31
+; CHECK-NEXT: [[SUB]] = add i32 [[SIZE_ADDR_0]], -32
+; CHECK-NEXT: br i1 [[CMP]], label [[WHILE_COND]], label [[WHILE_END:%.*]]
+; CHECK: while.end:
; CHECK-NEXT: ret i32 [[TMP4]]
;
entry:
diff --git a/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-values-phi.ll b/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-values-phi.ll
index 84ae79d..41fce36 100644
--- a/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-values-phi.ll
+++ b/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-values-phi.ll
@@ -76,6 +76,10 @@ define i64 @narow_canonical_iv_wide_multiplied_iv(i32 %x, i64 %y, ptr %0) {
; CHECK-LABEL: @narow_canonical_iv_wide_multiplied_iv(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[SMAX:%.*]] = tail call i32 @llvm.smax.i32(i32 [[X:%.*]], i32 1)
+; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[SMAX]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[Y:%.*]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 1
+; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP3]], 1
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
@@ -84,10 +88,6 @@ define i64 @narow_canonical_iv_wide_multiplied_iv(i32 %x, i64 %y, ptr %0) {
; CHECK-NEXT: [[EC:%.*]] = icmp ne i32 [[IV_NEXT]], [[SMAX]]
; CHECK-NEXT: br i1 [[EC]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
-; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[SMAX]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[Y:%.*]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 1
-; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP3]], 1
; CHECK-NEXT: ret i64 [[TMP6]]
;
entry:
diff --git a/llvm/test/Transforms/IndVarSimplify/scev-expander-preserve-lcssa.ll b/llvm/test/Transforms/IndVarSimplify/scev-expander-preserve-lcssa.ll
index 14e06fe..aca553e 100644
--- a/llvm/test/Transforms/IndVarSimplify/scev-expander-preserve-lcssa.ll
+++ b/llvm/test/Transforms/IndVarSimplify/scev-expander-preserve-lcssa.ll
@@ -23,8 +23,8 @@ define void @test1(i8 %x, ptr %ptr) {
; CHECK-NEXT: br label [[WHILE_COND192:%.*]]
; CHECK: while.cond192:
; CHECK-NEXT: switch i8 [[X:%.*]], label [[WHILE_BODY205:%.*]] [
-; CHECK-NEXT: i8 59, label [[WHILE_COND215_PREHEADER:%.*]]
-; CHECK-NEXT: i8 10, label [[IF_END224_LOOPEXIT1:%.*]]
+; CHECK-NEXT: i8 59, label [[WHILE_COND215_PREHEADER:%.*]]
+; CHECK-NEXT: i8 10, label [[IF_END224_LOOPEXIT1:%.*]]
; CHECK-NEXT: ]
; CHECK: while.cond215.preheader:
; CHECK-NEXT: br label [[WHILE_COND215:%.*]]
@@ -103,8 +103,8 @@ define void @test2(i16 %x) {
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: switch i16 [[X:%.*]], label [[RETURN_LOOPEXIT1:%.*]] [
-; CHECK-NEXT: i16 41, label [[FOR_END:%.*]]
-; CHECK-NEXT: i16 43, label [[FOR_COND]]
+; CHECK-NEXT: i16 41, label [[FOR_END:%.*]]
+; CHECK-NEXT: i16 43, label [[FOR_COND]]
; CHECK-NEXT: ]
; CHECK: for.end:
; CHECK-NEXT: [[I_0_LCSSA2:%.*]] = phi i32 [ 0, [[FOR_COND]] ]
@@ -336,6 +336,7 @@ if.end1824: ; preds = %for.end1326
define void @test5(ptr %header, i32 %conv, i8 %n) {
; CHECK-LABEL: @test5(
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw nsw i32 [[CONV:%.*]], 2
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: br label [[FOR_INNER:%.*]]
@@ -358,7 +359,6 @@ define void @test5(ptr %header, i32 %conv, i8 %n) {
; CHECK-NEXT: br i1 false, label [[FOR_BODY]], label [[WHILE_COND_PREHEADER:%.*]]
; CHECK: while.cond.preheader:
; CHECK-NEXT: [[ADD85_LCSSA:%.*]] = phi i32 [ [[ADD85]], [[FOR_INC]] ]
-; CHECK-NEXT: [[SHL:%.*]] = shl nuw nsw i32 [[CONV:%.*]], 2
; CHECK-NEXT: br label [[WHILE_COND:%.*]]
; CHECK: while.cond:
; CHECK-NEXT: [[POS_8:%.*]] = phi i32 [ [[INC114:%.*]], [[WHILE_BODY:%.*]] ], [ [[ADD85_LCSSA]], [[WHILE_COND_PREHEADER]] ]
@@ -427,8 +427,8 @@ define void @test6(i8 %x) {
; CHECK-NEXT: br label [[WHILE_COND192:%.*]]
; CHECK: while.cond192:
; CHECK-NEXT: switch i8 [[X:%.*]], label [[WHILE_BODY205:%.*]] [
-; CHECK-NEXT: i8 59, label [[WHILE_COND215_PREHEADER:%.*]]
-; CHECK-NEXT: i8 10, label [[IF_END224:%.*]]
+; CHECK-NEXT: i8 59, label [[WHILE_COND215_PREHEADER:%.*]]
+; CHECK-NEXT: i8 10, label [[IF_END224:%.*]]
; CHECK-NEXT: ]
; CHECK: while.cond215.preheader:
; CHECK-NEXT: [[I_7_LCSSA:%.*]] = phi i32 [ 0, [[WHILE_COND192]] ]
diff --git a/llvm/test/Transforms/IndVarSimplify/scev-invalidation.ll b/llvm/test/Transforms/IndVarSimplify/scev-invalidation.ll
index a92d328..ad69812 100644
--- a/llvm/test/Transforms/IndVarSimplify/scev-invalidation.ll
+++ b/llvm/test/Transforms/IndVarSimplify/scev-invalidation.ll
@@ -46,12 +46,12 @@ for.end106: ; preds = %for.cond
define i32 @test_pr58439(i32 %a) {
; CHECK-LABEL: @test_pr58439(
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[A:%.*]], 1
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: br i1 false, label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: [[C_EXT_LCSSA:%.*]] = phi i32 [ 0, [[LOOP]] ]
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[A:%.*]], 1
; CHECK-NEXT: [[RES:%.*]] = add i32 [[C_EXT_LCSSA]], [[OR]]
; CHECK-NEXT: ret i32 [[RES]]
;
@@ -76,6 +76,7 @@ define i8 @l(i32 %inc, i1 %tobool.not.i) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[OUTER_HEADER:%.*]]
; CHECK: outer.header:
+; CHECK-NEXT: [[AND:%.*]] = and i32 1, [[INC:%.*]]
; CHECK-NEXT: br label [[INNER:%.*]]
; CHECK: inner:
; CHECK-NEXT: [[C_05_I:%.*]] = phi i32 [ [[INC_I:%.*]], [[INNER]] ], [ 0, [[OUTER_HEADER]] ]
@@ -86,7 +87,6 @@ define i8 @l(i32 %inc, i1 %tobool.not.i) {
; CHECK: outer.latch:
; CHECK-NEXT: [[C_05_I_LCSSA:%.*]] = phi i32 [ [[C_05_I]], [[INNER]] ]
; CHECK-NEXT: [[LCSSA:%.*]] = phi i32 [ 0, [[INNER]] ]
-; CHECK-NEXT: [[AND:%.*]] = and i32 1, [[INC:%.*]]
; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[AND]] to i8
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[C_05_I_LCSSA]] to i8
; CHECK-NEXT: [[TMP2:%.*]] = sub i8 [[TMP0]], [[TMP1]]
diff --git a/llvm/test/Transforms/IndVarSimplify/sentinel.ll b/llvm/test/Transforms/IndVarSimplify/sentinel.ll
index 5234141..4f12308 100644
--- a/llvm/test/Transforms/IndVarSimplify/sentinel.ll
+++ b/llvm/test/Transforms/IndVarSimplify/sentinel.ll
@@ -9,19 +9,19 @@ define void @test(i1 %arg) personality ptr @snork {
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB4:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add i32 [[INDVARS_IV:%.*]], 1
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[TMP6:%.*]], [[INDVARS_IV]]
-; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[SMAX:%.*]]
; CHECK-NEXT: br i1 [[ARG:%.*]], label [[BB2:%.*]], label [[BB4]]
; CHECK: bb2:
-; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[TMP1]], [[BB1:%.*]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[TMP1:%.*]], [[BB1:%.*]] ]
; CHECK-NEXT: ret void
; CHECK: bb4:
-; CHECK-NEXT: [[INDVARS_IV]] = phi i32 [ [[INDVARS_IV_NEXT]], [[BB1]] ], [ undef, [[BB:%.*]] ]
-; CHECK-NEXT: [[SMAX]] = call i32 @llvm.smax.i32(i32 [[INDVARS_IV]], i32 36)
-; CHECK-NEXT: [[TMP6]] = invoke i32 @quux() [ "deopt"(i32 0, i32 0, i32 0, i32 180, i32 0, i32 25, i32 0, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 3, i32 [[INDVARS_IV]], i32 3, i32 undef, i32 7, ptr null, i32 3, i32 undef, i32 3, i32 undef, i32 3, i32 undef, i32 3, i32 undef, i32 4, double undef, i32 7, ptr null, i32 4, i64 undef, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 3, i32 undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 7, ptr null) ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[BB1]] ], [ undef, [[BB:%.*]] ]
+; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[INDVARS_IV]], i32 36)
+; CHECK-NEXT: [[TMP6:%.*]] = invoke i32 @quux() [ "deopt"(i32 0, i32 0, i32 0, i32 180, i32 0, i32 25, i32 0, i32 7, ptr null, i32 7, ptr null, i32 7, ptr null, i32 3, i32 [[INDVARS_IV]], i32 3, i32 undef, i32 7, ptr null, i32 3, i32 undef, i32 3, i32 undef, i32 3, i32 undef, i32 3, i32 undef, i32 4, double undef, i32 7, ptr null, i32 4, i64 undef, i32 7, ptr null, i32 0, ptr addrspace(1) undef, i32 3, i32 undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 0, ptr addrspace(1) undef, i32 7, ptr null) ]
; CHECK-NEXT: to label [[BB7:%.*]] unwind label [[BB15:%.*]]
; CHECK: bb7:
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i32 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[TMP6]], [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP1]] = sub i32 [[TMP0]], [[SMAX]]
; CHECK-NEXT: br label [[BB9:%.*]]
; CHECK: bb9:
; CHECK-NEXT: br i1 true, label [[BB1]], label [[BB9]]
diff --git a/llvm/test/Transforms/IndVarSimplify/sink-from-preheader.ll b/llvm/test/Transforms/IndVarSimplify/sink-from-preheader.ll
deleted file mode 100644
index 89583f9..0000000
--- a/llvm/test/Transforms/IndVarSimplify/sink-from-preheader.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=indvars -indvars-predicate-loops=0 -S | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin10.0"
-
-; We make sinking here, Changed flag should be set properly.
-define i32 @test(i32 %a, i32 %b, i32 %N) {
-; CHECK-LABEL: @test(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]]
-; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]]
-; CHECK: exit:
-; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: ret i32 [[ADD]]
-;
-entry:
- %add = add i32 %a, %b
- br label %loop
-
-loop:
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
- %iv.next = add i32 %iv, 1
- %cmp = icmp slt i32 %iv.next, %N
- br i1 %cmp, label %loop, label %exit
-
-exit:
- ret i32 %add
-}
diff --git a/llvm/test/Transforms/IndVarSimplify/sink-trapping.ll b/llvm/test/Transforms/IndVarSimplify/sink-trapping.ll
deleted file mode 100644
index d2478be..0000000
--- a/llvm/test/Transforms/IndVarSimplify/sink-trapping.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: opt < %s -passes=indvars -S | FileCheck %s
-
-declare i1 @b()
-
-define i32 @a(i32 %x) nounwind {
-for.body.preheader:
- %y = sdiv i32 10, %x
- br label %for.body
-
-for.body:
- %cmp = call i1 @b()
- br i1 %cmp, label %for.body, label %for.end.loopexit
-
-for.end.loopexit:
- ret i32 %y
-}
-; CHECK: for.end.loopexit:
-; CHECK: sdiv
-; CHECK: ret
diff --git a/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll b/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll
index 17921af..abe7a3e 100644
--- a/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll
+++ b/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll
@@ -24,13 +24,13 @@ define void @_Z3fn1v() {
; CHECK-NEXT: [[X8:%.*]] = icmp ult i32 0, 4
; CHECK-NEXT: br i1 [[X8]], label [[DOTPREHEADER_LR_PH:%.*]], label [[X22]]
; CHECK: .preheader.lr.ph:
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[K_09]], i64 [[TMP5]]
; CHECK-NEXT: br label [[DOTPREHEADER:%.*]]
; CHECK: .preheader:
; CHECK-NEXT: br label [[X17:%.*]]
; CHECK: x17:
; CHECK-NEXT: br i1 false, label [[DOTPREHEADER]], label [[DOT_CRIT_EDGE_8:%.*]]
; CHECK: ._crit_edge.8:
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[K_09]], i64 [[TMP5]]
; CHECK-NEXT: br label [[X22]]
; CHECK: x22:
; CHECK-NEXT: [[K_1_LCSSA:%.*]] = phi ptr [ [[SCEVGEP]], [[DOT_CRIT_EDGE_8]] ], [ [[K_09]], [[DOTPREHEADER4]] ]
diff --git a/llvm/test/Transforms/LICM/scalar-promote.ll b/llvm/test/Transforms/LICM/scalar-promote.ll
index 3af65df..e6cc457 100644
--- a/llvm/test/Transforms/LICM/scalar-promote.ll
+++ b/llvm/test/Transforms/LICM/scalar-promote.ll
@@ -43,9 +43,9 @@ define void @test2(i32 %i) {
; CHECK-LABEL: define void @test2(
; CHECK-SAME: i32 [[I:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[X1:%.*]] = getelementptr i32, ptr @X, i64 1
; CHECK-NEXT: [[X2:%.*]] = getelementptr i32, ptr @X, i64 1
-; CHECK-NEXT: [[X1_PROMOTED:%.*]] = load i32, ptr [[X1]], align 4
+; CHECK-NEXT: [[X3:%.*]] = getelementptr i32, ptr @X, i64 1
+; CHECK-NEXT: [[X1_PROMOTED:%.*]] = load i32, ptr [[X2]], align 4
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[A1:%.*]] = phi i32 [ [[V:%.*]], %[[LOOP]] ], [ [[X1_PROMOTED]], %[[ENTRY]] ]
@@ -53,7 +53,7 @@ define void @test2(i32 %i) {
; CHECK-NEXT: br i1 false, label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: [[V_LCSSA:%.*]] = phi i32 [ [[V]], %[[LOOP]] ]
-; CHECK-NEXT: store i32 [[V_LCSSA]], ptr [[X1]], align 4
+; CHECK-NEXT: store i32 [[V_LCSSA]], ptr [[X2]], align 4
; CHECK-NEXT: ret void
;
Entry:
diff --git a/llvm/test/Transforms/IndVarSimplify/sink-alloca.ll b/llvm/test/Transforms/LICM/sink-alloca.ll
index 0997bf6..2bf9350 100644
--- a/llvm/test/Transforms/IndVarSimplify/sink-alloca.ll
+++ b/llvm/test/Transforms/LICM/sink-alloca.ll
@@ -1,9 +1,9 @@
-; RUN: opt < %s -passes=indvars -S | FileCheck %s
+; RUN: opt < %s -passes=licm -verify-memoryssa -S | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin10.0"
; PR4775
-; Indvars shouldn't sink the alloca out of the entry block, even though
+; LICM shouldn't sink the alloca out of the entry block, even though
; it's not used until after the loop.
define i32 @main() nounwind {
; CHECK: entry:
@@ -25,7 +25,7 @@ while.end: ; preds = %while.cond
declare i32 @bar()
; <rdar://problem/10352360>
-; Indvars shouldn't sink the first alloca between the stacksave and stackrestore
+; LICM shouldn't sink the first alloca between the stacksave and stackrestore
; intrinsics.
declare ptr @a(...)
declare ptr @llvm.stacksave() nounwind
diff --git a/llvm/test/Transforms/LICM/sink-from-preheader.ll b/llvm/test/Transforms/LICM/sink-from-preheader.ll
new file mode 100644
index 0000000..bbe3d3b
--- /dev/null
+++ b/llvm/test/Transforms/LICM/sink-from-preheader.ll
@@ -0,0 +1,185 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=licm -verify-memoryssa -S | FileCheck %s
+
+; We perform sinking here, Changed flag should be set properly.
+define i32 @test(i32 %a, i32 %b, i32 %N) {
+; CHECK-LABEL: @test(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+entry:
+ %add = add i32 %a, %b
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i32 %iv, 1
+ %cmp = icmp slt i32 %iv.next, %N
+ br i1 %cmp, label %loop, label %exit
+
+exit:
+ ret i32 %add
+}
+
+define i32 @test_with_unused_load(i32 %a, ptr %b, i32 %N) {
+; CHECK-LABEL: @test_with_unused_load(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[B:%.*]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[LOAD]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+entry:
+ %load = load i32, ptr %b
+ %add = add i32 %a, %load
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i32 %iv, 1
+ %cmp = icmp slt i32 %iv.next, %N
+ br i1 %cmp, label %loop, label %exit
+
+exit:
+ ret i32 %add
+}
+
+define i32 @test_with_unused_load_modified_store(i32 %a, ptr %b, i32 %N) {
+; CHECK-LABEL: @test_with_unused_load_modified_store(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[B:%.*]], align 4
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[A:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: [[SMAX:%.*]] = phi i32 [ [[IV_NEXT]], [[LOOP]] ]
+; CHECK-NEXT: store i32 [[SMAX]], ptr [[B]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A]], [[LOAD]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+entry:
+ %load = load i32, ptr %b
+ %add = add i32 %a, %load
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i32 %iv, %a
+ store i32 %iv.next, ptr %b
+ %cmp = icmp slt i32 %iv.next, %N
+ br i1 %cmp, label %loop, label %exit
+
+exit:
+ ret i32 %add
+}
+
+; Volatile loads must not be sunk.
+define i32 @test_with_volatile_load_no_sink(i32 %a, ptr %b, i32 %N) {
+; CHECK-LABEL: @test_with_volatile_load_no_sink(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[LD:%.*]] = load volatile i32, ptr [[B:%.*]], align 4
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[LD]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+entry:
+ %ld = load volatile i32, ptr %b, align 4
+ %add = add i32 %a, %ld
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i32 %iv, 1
+ %cmp = icmp slt i32 %iv.next, %N
+ br i1 %cmp, label %loop, label %exit
+
+exit:
+ ret i32 %add
+}
+
+; Ordered/atomic loads must not be sunk.
+define i32 @test_with_atomic_load_no_sink(i32 %a, ptr %b, i32 %N) {
+; CHECK-LABEL: @test_with_atomic_load_no_sink(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[LD:%.*]] = load atomic i32, ptr [[B:%.*]] acquire, align 4
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[LD]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+entry:
+ %ld = load atomic i32, ptr %b acquire, align 4
+ %add = add i32 %a, %ld
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i32 %iv, 1
+ %cmp = icmp slt i32 %iv.next, %N
+ br i1 %cmp, label %loop, label %exit
+
+exit:
+ ret i32 %add
+}
+
+declare void @clobber(ptr)
+
+; Calls that may write memory in the loop should prevent sinking the load.
+define i32 @test_with_unused_load_clobbered_by_call(i32 %a, ptr %b, i32 %N) {
+; CHECK-LABEL: @test_with_unused_load_clobbered_by_call(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[LD:%.*]] = load i32, ptr [[B:%.*]], align 4
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: call void @clobber(ptr [[B]])
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[IV_NEXT]], [[N:%.*]]
+; CHECK-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[LD]]
+; CHECK-NEXT: ret i32 [[ADD]]
+;
+entry:
+ %ld = load i32, ptr %b, align 4
+ %add = add i32 %a, %ld
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i32 %iv, 1
+ call void @clobber(ptr %b)
+ %cmp = icmp slt i32 %iv.next, %N
+ br i1 %cmp, label %loop, label %exit
+
+exit:
+ ret i32 %add
+}
diff --git a/llvm/test/Transforms/LICM/sink-trapping.ll b/llvm/test/Transforms/LICM/sink-trapping.ll
new file mode 100644
index 0000000..f4d260d
--- /dev/null
+++ b/llvm/test/Transforms/LICM/sink-trapping.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt < %s -passes=licm -verify-memoryssa -S | FileCheck %s
+
+declare i1 @b()
+
+define i32 @a(i32 %x) nounwind {
+; CHECK-LABEL: define i32 @a(
+; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[FOR_BODY_PREHEADER:.*:]]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[CMP:%.*]] = call i1 @b()
+; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[FOR_END_LOOPEXIT:.*]]
+; CHECK: [[FOR_END_LOOPEXIT]]:
+; CHECK-NEXT: [[Y:%.*]] = sdiv i32 10, [[X]]
+; CHECK-NEXT: ret i32 [[Y]]
+;
+for.body.preheader:
+ %y = sdiv i32 10, %x
+ br label %for.body
+
+for.body:
+ %cmp = call i1 @b()
+ br i1 %cmp, label %for.body, label %for.end.loopexit
+
+for.end.loopexit:
+ ret i32 %y
+}
diff --git a/llvm/test/Transforms/LoopDeletion/invalidate-scev-after-hoisting.ll b/llvm/test/Transforms/LoopDeletion/invalidate-scev-after-hoisting.ll
index bdd51c2..6c19aaa 100644
--- a/llvm/test/Transforms/LoopDeletion/invalidate-scev-after-hoisting.ll
+++ b/llvm/test/Transforms/LoopDeletion/invalidate-scev-after-hoisting.ll
@@ -84,13 +84,13 @@ define i32 @scev_invalidation_after_deleting(ptr %src) {
; CHECK: inner.2.preheader:
; CHECK-NEXT: br label [[INNER_3_PH:%.*]]
; CHECK: inner.3.ph:
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 0 to i32
; CHECK-NEXT: br label [[INNER_3:%.*]]
; CHECK: inner.3:
; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[SRC:%.*]], align 4
; CHECK-NEXT: br i1 false, label [[OUTER_LATCH]], label [[INNER_3]]
; CHECK: outer.latch:
; CHECK-NEXT: [[L_LCSSA:%.*]] = phi i32 [ [[L]], [[INNER_3]] ]
-; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 0 to i32
; CHECK-NEXT: [[OUTER_IV_NEXT]] = add nsw i32 [[L_LCSSA]], [[TRUNC]]
; CHECK-NEXT: br label [[OUTER_HEADER]]
;
diff --git a/llvm/test/Transforms/LoopDistribute/laa-invalidation.ll b/llvm/test/Transforms/LoopDistribute/laa-invalidation.ll
index 62c5627..4a55c0e 100644
--- a/llvm/test/Transforms/LoopDistribute/laa-invalidation.ll
+++ b/llvm/test/Transforms/LoopDistribute/laa-invalidation.ll
@@ -4,11 +4,11 @@
define void @test_pr50940(ptr %A, ptr %B) {
; CHECK-LABEL: @test_pr50940(
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
; CHECK-NEXT: br label [[OUTER_HEADER:%.*]]
; CHECK: outer.header:
; CHECK-NEXT: br i1 false, label [[OUTER_LATCH:%.*]], label [[INNER_PH:%.*]]
; CHECK: inner.ph:
-; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4
; CHECK-NEXT: [[GEP_A_3:%.*]] = getelementptr inbounds i16, ptr [[A]], i64 3
; CHECK-NEXT: br label [[INNER_LVER_CHECK:%.*]]
; CHECK: inner.lver.check:
diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/prefer-all.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/prefer-all.ll
index db30fd2..1944a9c 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/prefer-all.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/prefer-all.ll
@@ -119,8 +119,6 @@ for.end:
; We can't use postindex addressing on the conditional load of qval and can't
; convert the loop condition to a compare with zero, so we should instead use
; offset addressing.
-; FIXME: Currently we don't notice the load of qval is conditional, and attempt
-; postindex addressing anyway.
define i32 @conditional_load(ptr %p, ptr %q, ptr %n) {
; CHECK-LABEL: define i32 @conditional_load(
; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], ptr [[N:%.*]]) {
@@ -128,7 +126,6 @@ define i32 @conditional_load(ptr %p, ptr %q, ptr %n) {
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP2:%.*]], %[[FOR_INC:.*]] ], [ [[P]], %[[ENTRY]] ]
-; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[FOR_INC]] ], [ [[Q]], %[[ENTRY]] ]
; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ [[IDX_NEXT:%.*]], %[[FOR_INC]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[RET_NEXT:%.*]], %[[FOR_INC]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: [[PVAL:%.*]] = load i32, ptr [[LSR_IV1]], align 4
@@ -136,6 +133,8 @@ define i32 @conditional_load(ptr %p, ptr %q, ptr %n) {
; CHECK-NEXT: [[SCEVGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label %[[FOR_INC]], label %[[IF_THEN:.*]]
; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[IDX]], 2
+; CHECK-NEXT: [[LSR_IV:%.*]] = getelementptr i8, ptr [[Q]], i64 [[TMP0]]
; CHECK-NEXT: [[QVAL:%.*]] = load i32, ptr [[LSR_IV]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[RET]], [[QVAL]]
; CHECK-NEXT: br label %[[FOR_INC]]
@@ -143,7 +142,6 @@ define i32 @conditional_load(ptr %p, ptr %q, ptr %n) {
; CHECK-NEXT: [[RET_NEXT]] = phi i32 [ [[ADD]], %[[IF_THEN]] ], [ [[RET]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[IDX_NEXT]] = add nuw nsw i64 [[IDX]], 1
; CHECK-NEXT: [[NVAL:%.*]] = load volatile i64, ptr [[N]], align 8
-; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 4
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IDX_NEXT]], [[NVAL]]
; CHECK-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
@@ -176,3 +174,141 @@ for.inc:
exit:
ret i32 %ret.next
}
+
+; We can use postindex addressing for both loads here, even though the second
+; may not be executed on every loop iteration.
+define i32 @early_exit_load(ptr %p, ptr %q, ptr %n) {
+; CHECK-LABEL: define i32 @early_exit_load(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], ptr [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP2:%.*]], %[[FOR_INC:.*]] ], [ [[P]], %[[ENTRY]] ]
+; CHECK-NEXT: [[LSR_IV:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[FOR_INC]] ], [ [[Q]], %[[ENTRY]] ]
+; CHECK-NEXT: [[RET_PHI:%.*]] = phi i32 [ [[ADD:%.*]], %[[FOR_INC]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ [[IDX_NEXT:%.*]], %[[FOR_INC]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[PVAL:%.*]] = load i32, ptr [[LSR_IV1]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[PVAL]], 0
+; CHECK-NEXT: [[SCEVGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
+; CHECK-NEXT: br i1 [[CMP1]], label %[[FOR_INC]], label %[[EXIT:.*]]
+; CHECK: [[FOR_INC]]:
+; CHECK-NEXT: [[QVAL:%.*]] = load i32, ptr [[LSR_IV]], align 4
+; CHECK-NEXT: [[ADD]] = add nsw i32 [[QVAL]], [[RET_PHI]]
+; CHECK-NEXT: [[IDX_NEXT]] = add nuw nsw i64 [[IDX]], 1
+; CHECK-NEXT: [[NVAL:%.*]] = load volatile i64, ptr [[N]], align 8
+; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV]], i64 4
+; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i64 [[IDX_NEXT]], [[NVAL]]
+; CHECK-NEXT: br i1 [[CMP2]], label %[[FOR_BODY]], label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[RET_PHI]], %[[FOR_BODY]] ], [ [[ADD]], %[[FOR_INC]] ]
+; CHECK-NEXT: ret i32 [[RET]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %ret.phi = phi i32 [ %add, %for.inc ], [ 0, %entry ]
+ %idx = phi i64 [ %idx.next, %for.inc ], [ 0, %entry ]
+ %paddr = getelementptr inbounds nuw i32, ptr %p, i64 %idx
+ %pval = load i32, ptr %paddr, align 4
+ %cmp1 = icmp eq i32 %pval, 0
+ br i1 %cmp1, label %for.inc, label %exit
+
+for.inc:
+ %qaddr = getelementptr inbounds nuw i32, ptr %q, i64 %idx
+ %qval = load i32, ptr %qaddr, align 4
+ %add = add nsw i32 %qval, %ret.phi
+ %idx.next = add nuw nsw i64 %idx, 1
+ %nval = load volatile i64, ptr %n, align 8
+ %cmp2 = icmp slt i64 %idx.next, %nval
+ br i1 %cmp2, label %for.body, label %exit
+
+exit:
+ %ret = phi i32 [ %ret.phi, %for.body ], [ %add, %for.inc ]
+ ret i32 %ret
+}
+
+; The control-flow before and after the load of qval shouldn't prevent postindex
+; addressing from happening.
+; FIXME: We choose postindex addressing, but the scevgep is placed in for.inc so
+; during codegen we will fail to actually generate a postindex load.
+define void @middle_block_load(ptr %p, ptr %q, i64 %n) {
+; CHECK-LABEL: define void @middle_block_load(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i64 [[N:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[LSR_IV2:%.*]] = phi ptr [ [[SCEVGEP3:%.*]], %[[FOR_INC:.*]] ], [ [[P]], %[[ENTRY]] ]
+; CHECK-NEXT: [[LSR_IV1:%.*]] = phi ptr [ [[SCEVGEP:%.*]], %[[FOR_INC]] ], [ [[Q]], %[[ENTRY]] ]
+; CHECK-NEXT: [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], %[[FOR_INC]] ], [ [[N]], %[[ENTRY]] ]
+; CHECK-NEXT: [[PVAL:%.*]] = load i32, ptr [[LSR_IV2]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[PVAL]], 0
+; CHECK-NEXT: [[SCEVGEP3]] = getelementptr i8, ptr [[LSR_IV2]], i64 4
+; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN1:.*]], label %[[IF_ELSE1:.*]]
+; CHECK: [[IF_THEN1]]:
+; CHECK-NEXT: tail call void @otherfn1()
+; CHECK-NEXT: br label %[[IF_END:.*]]
+; CHECK: [[IF_ELSE1]]:
+; CHECK-NEXT: tail call void @otherfn2()
+; CHECK-NEXT: br label %[[IF_END]]
+; CHECK: [[IF_END]]:
+; CHECK-NEXT: [[QVAL:%.*]] = load i32, ptr [[LSR_IV1]], align 4
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[QVAL]], 0
+; CHECK-NEXT: br i1 [[CMP2]], label %[[IF_THEN2:.*]], label %[[IF_ELSE2:.*]]
+; CHECK: [[IF_THEN2]]:
+; CHECK-NEXT: tail call void @otherfn1()
+; CHECK-NEXT: br label %[[FOR_INC]]
+; CHECK: [[IF_ELSE2]]:
+; CHECK-NEXT: tail call void @otherfn2()
+; CHECK-NEXT: br label %[[FOR_INC]]
+; CHECK: [[FOR_INC]]:
+; CHECK-NEXT: [[LSR_IV_NEXT]] = add i64 [[LSR_IV]], -1
+; CHECK-NEXT: [[SCEVGEP]] = getelementptr i8, ptr [[LSR_IV1]], i64 4
+; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i64 [[LSR_IV_NEXT]], 0
+; CHECK-NEXT: br i1 [[CMP3]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %idx = phi i64 [ %idx.next, %for.inc ], [ 0, %entry ]
+ %paddr = getelementptr inbounds nuw i32, ptr %p, i64 %idx
+ %pval = load i32, ptr %paddr, align 4
+ %cmp1 = icmp sgt i32 %pval, 0
+ br i1 %cmp1, label %if.then1, label %if.else1
+
+if.then1:
+ tail call void @otherfn1()
+ br label %if.end
+
+if.else1:
+ tail call void @otherfn2()
+ br label %if.end
+
+if.end:
+ %qaddr = getelementptr inbounds nuw i32, ptr %q, i64 %idx
+ %qval = load i32, ptr %qaddr, align 4
+ %cmp2 = icmp sgt i32 %qval, 0
+ br i1 %cmp2, label %if.then2, label %if.else2
+
+if.then2:
+ tail call void @otherfn1()
+ br label %for.inc
+
+if.else2:
+ tail call void @otherfn2()
+ br label %for.inc
+
+for.inc:
+ %idx.next = add nuw nsw i64 %idx, 1
+ %cmp3 = icmp eq i64 %idx.next, %n
+ br i1 %cmp3, label %exit, label %for.body
+
+exit:
+ ret void
+}
+
+declare dso_local void @otherfn1()
+declare dso_local void @otherfn2()
diff --git a/llvm/test/Transforms/LoopUnroll/followup.ll b/llvm/test/Transforms/LoopUnroll/followup.ll
index 051e43d..9dda76e 100644
--- a/llvm/test/Transforms/LoopUnroll/followup.ll
+++ b/llvm/test/Transforms/LoopUnroll/followup.ll
@@ -1,9 +1,20 @@
-; RUN: opt < %s -S -passes=loop-unroll -unroll-count=2 | FileCheck %s -check-prefixes=COUNT,COMMON
-; RUN: opt < %s -S -passes=loop-unroll -unroll-runtime=true -unroll-runtime-epilog=true | FileCheck %s -check-prefixes=EPILOG,COMMON
-; RUN: opt < %s -S -passes=loop-unroll -unroll-runtime=true -unroll-runtime-epilog=false | FileCheck %s -check-prefixes=PROLOG,COMMON
-;
-; Check that followup-attributes are applied after LoopUnroll.
+; Check that followup attributes are applied after LoopUnroll.
;
+; We choose -unroll-count=3 because it produces partial unrolling of remainder
+; loops. Complete unrolling would leave no remainder loop to which to copy
+; followup attributes.
+
+; DEFINE: %{unroll} = opt < %s -S -passes=loop-unroll -unroll-count=3
+; DEFINE: %{epilog} = %{unroll} -unroll-runtime -unroll-runtime-epilog=true
+; DEFINE: %{prolog} = %{unroll} -unroll-runtime -unroll-runtime-epilog=false
+; DEFINE: %{fc} = FileCheck %s -check-prefixes
+
+; RUN: %{unroll} | %{fc} COMMON,COUNT
+; RUN: %{epilog} | %{fc} COMMON,EPILOG,EPILOG-NO-UNROLL
+; RUN: %{prolog} | %{fc} COMMON,PROLOG,PROLOG-NO-UNROLL
+; RUN: %{epilog} -unroll-remainder | %{fc} COMMON,EPILOG,EPILOG-UNROLL
+; RUN: %{prolog} -unroll-remainder | %{fc} COMMON,PROLOG,PROLOG-UNROLL
+
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
define i32 @test(ptr nocapture %a, i32 %n) nounwind uwtable readonly {
@@ -36,15 +47,17 @@ for.end: ; preds = %for.body, %entry
; COMMON-LABEL: @test(
-; COUNT: br i1 %exitcond.1, label %for.end.loopexit, label %for.body, !llvm.loop ![[LOOP:[0-9]+]]
+; COUNT: br i1 %exitcond.2, label %for.end.loopexit, label %for.body, !llvm.loop ![[LOOP:[0-9]+]]
; COUNT: ![[FOLLOWUP_ALL:[0-9]+]] = !{!"FollowupAll"}
; COUNT: ![[FOLLOWUP_UNROLLED:[0-9]+]] = !{!"FollowupUnrolled"}
; COUNT: ![[LOOP]] = distinct !{![[LOOP]], ![[FOLLOWUP_ALL]], ![[FOLLOWUP_UNROLLED]]}
-; EPILOG: br i1 %niter.ncmp.7, label %for.end.loopexit.unr-lcssa, label %for.body, !llvm.loop ![[LOOP_0:[0-9]+]]
-; EPILOG: br i1 %epil.iter.cmp, label %for.body.epil, label %for.end.loopexit.epilog-lcssa, !llvm.loop ![[LOOP_2:[0-9]+]]
+; EPILOG: br i1 %niter.ncmp.2, label %for.end.loopexit.unr-lcssa, label %for.body, !llvm.loop ![[LOOP_0:[0-9]+]]
+; EPILOG-NO-UNROLL: br i1 %epil.iter.cmp, label %for.body.epil, label %for.end.loopexit.epilog-lcssa, !llvm.loop ![[LOOP_2:[0-9]+]]
+; EPILOG-UNROLL: br i1 %epil.iter.cmp, label %for.body.epil.1, label %for.end.loopexit.epilog-lcssa
+; EPILOG-UNROLL: br i1 %epil.iter.cmp.1, label %for.body.epil, label %for.end.loopexit.epilog-lcssa, !llvm.loop ![[LOOP_2:[0-9]+]]
; EPILOG: ![[LOOP_0]] = distinct !{![[LOOP_0]], ![[FOLLOWUP_ALL:[0-9]+]], ![[FOLLOWUP_UNROLLED:[0-9]+]]}
; EPILOG: ![[FOLLOWUP_ALL]] = !{!"FollowupAll"}
@@ -53,8 +66,10 @@ for.end: ; preds = %for.body, %entry
; EPILOG: ![[FOLLOWUP_REMAINDER]] = !{!"FollowupRemainder"}
-; PROLOG: br i1 %prol.iter.cmp, label %for.body.prol, label %for.body.prol.loopexit.unr-lcssa, !llvm.loop ![[LOOP_0:[0-9]+]]
-; PROLOG: br i1 %exitcond.7, label %for.end.loopexit.unr-lcssa, label %for.body, !llvm.loop ![[LOOP_2:[0-9]+]]
+; PROLOG-UNROLL: br i1 %prol.iter.cmp, label %for.body.prol.1, label %for.body.prol.loopexit.unr-lcssa
+; PROLOG-UNROLL: br i1 %prol.iter.cmp.1, label %for.body.prol, label %for.body.prol.loopexit.unr-lcssa, !llvm.loop ![[LOOP_0:[0-9]+]]
+; PROLOG-NO-UNROLL: br i1 %prol.iter.cmp, label %for.body.prol, label %for.body.prol.loopexit.unr-lcssa, !llvm.loop ![[LOOP_0:[0-9]+]]
+; PROLOG: br i1 %exitcond.2, label %for.end.loopexit.unr-lcssa, label %for.body, !llvm.loop ![[LOOP_2:[0-9]+]]
; PROLOG: ![[LOOP_0]] = distinct !{![[LOOP_0]], ![[FOLLOWUP_ALL:[0-9]+]], ![[FOLLOWUP_REMAINDER:[0-9]+]]}
; PROLOG: ![[FOLLOWUP_ALL]] = !{!"FollowupAll"}
diff --git a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll
index eea2237..abed18a 100644
--- a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll
+++ b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll
@@ -380,7 +380,6 @@ define void @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP8]], 8589934588
-; CHECK-NEXT: [[IND_END:%.*]] = add nuw nsw i64 [[N_VEC]], [[TMP4]]
; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i32> <i32 poison, i32 0, i32 0, i32 0>, i32 [[ARRAYIDX5_PROMOTED]], i64 0
; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i32, ptr [[VAR2]], i64 [[TMP4]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
@@ -396,6 +395,7 @@ define void @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi <4 x i32> [ [[TMP17]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[IND_END:%.*]] = add nuw nsw i64 [[N_VEC]], [[TMP4]]
; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[DOTLCSSA]])
; CHECK-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX5]], align 4, !alias.scope [[META27:![0-9]+]], !noalias [[META23]]
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP8]], [[N_VEC]]
diff --git a/llvm/test/Transforms/MemCpyOpt/stack-move.ll b/llvm/test/Transforms/MemCpyOpt/stack-move.ll
index 940e30e..0c2e05f 100644
--- a/llvm/test/Transforms/MemCpyOpt/stack-move.ll
+++ b/llvm/test/Transforms/MemCpyOpt/stack-move.ll
@@ -1729,3 +1729,61 @@ define i32 @test_ret_only_capture() {
%v = load i32, ptr %a
ret i32 %v
}
+
+declare ptr @captures_address_only(ptr captures(address))
+
+; Can transform: Only one address captured.
+define void @test_captures_address_captures_none() {
+; CHECK-LABEL: define void @test_captures_address_captures_none() {
+; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
+; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
+; CHECK-NEXT: call void @captures_address_only(ptr [[SRC]])
+; CHECK-NEXT: call void @use_nocapture(ptr [[SRC]])
+; CHECK-NEXT: ret void
+;
+ %src = alloca %struct.Foo, align 4
+ %dst = alloca %struct.Foo, align 4
+ store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
+ call void @captures_address_only(ptr %src)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %src, i64 12, i1 false)
+ call void @use_nocapture(ptr %dst)
+ ret void
+}
+
+; Can transform: Only one address captured.
+define void @test_captures_none_and_captures_address() {
+; CHECK-LABEL: define void @test_captures_none_and_captures_address() {
+; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
+; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
+; CHECK-NEXT: call void @use_nocapture(ptr [[SRC]])
+; CHECK-NEXT: call void @captures_address_only(ptr [[SRC]])
+; CHECK-NEXT: ret void
+;
+ %src = alloca %struct.Foo, align 4
+ %dst = alloca %struct.Foo, align 4
+ store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
+ call void @use_nocapture(ptr %src)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %src, i64 12, i1 false)
+ call void @captures_address_only(ptr %dst)
+ ret void
+}
+
+; Cannot transform: Both addresses captured.
+define void @test_captures_address_and_captures_address() {
+; CHECK-LABEL: define void @test_captures_address_and_captures_address() {
+; CHECK-NEXT: [[SRC:%.*]] = alloca [[STRUCT_FOO:%.*]], align 4
+; CHECK-NEXT: [[DST:%.*]] = alloca [[STRUCT_FOO]], align 4
+; CHECK-NEXT: store [[STRUCT_FOO]] { i32 10, i32 20, i32 30 }, ptr [[SRC]], align 4
+; CHECK-NEXT: call void @captures_address_only(ptr [[SRC]])
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[DST]], ptr align 4 [[SRC]], i64 12, i1 false)
+; CHECK-NEXT: call void @captures_address_only(ptr [[DST]])
+; CHECK-NEXT: ret void
+;
+ %src = alloca %struct.Foo, align 4
+ %dst = alloca %struct.Foo, align 4
+ store %struct.Foo { i32 10, i32 20, i32 30 }, ptr %src
+ call void @captures_address_only(ptr %src)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dst, ptr align 4 %src, i64 12, i1 false)
+ call void @captures_address_only(ptr %dst)
+ ret void
+}
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll
index 8d20a3b..d311f54 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll
@@ -43,7 +43,6 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY_PREHEADER13]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP8]], -8
-; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[N_VEC]], [[TMP0]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -64,6 +63,7 @@ define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef
; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
+; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[N_VEC]], [[TMP0]]
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP8]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END]], label [[FOR_BODY_PREHEADER13]]
; CHECK: for.body.preheader14:
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll
index 2dceb27..f2ae327 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll
@@ -1040,7 +1040,6 @@ define void @saxpy_5(i64 %n, float %a, ptr readonly %x, ptr noalias %y) {
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[LOOP_PREHEADER11:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP3]], 9223372036854775806
-; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[N_VEC]], 5
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[A]], i64 0
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT]], <2 x float> poison, <10 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -1058,10 +1057,11 @@ define void @saxpy_5(i64 %n, float %a, ptr readonly %x, ptr noalias %y) {
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[N_VEC]], 5
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT]], label %[[LOOP_PREHEADER11]]
; CHECK: [[LOOP_PREHEADER11]]:
-; CHECK-NEXT: [[I1_PH:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ [[TMP4]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: [[I1_PH:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ], [ [[TMP16]], %[[MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x float> [[TMP10]], <4 x float> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label %[[LOOP:.*]]
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll
index a3b8736..338d925 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll
@@ -9,7 +9,6 @@ define i64 @std_find_i16_constant_offset_with_assumptions(ptr %first.coerce, i16
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[FIRST_COERCE]], i64 2) ]
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[FIRST_COERCE]], i64 256) ]
-; CHECK-NEXT: [[COERCE_VAL_IP:%.*]] = getelementptr i8, ptr [[FIRST_COERCE]], i64 256
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[S]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -27,6 +26,7 @@ define i64 @std_find_i16_constant_offset_with_assumptions(ptr %first.coerce, i16
; CHECK-NEXT: [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]]
; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_SPLIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_SPLIT]]:
+; CHECK-NEXT: [[COERCE_VAL_IP:%.*]] = getelementptr i8, ptr [[FIRST_COERCE]], i64 256
; CHECK-NEXT: br i1 [[TMP2]], label %[[VECTOR_EARLY_EXIT:.*]], label %[[RETURN:.*]]
; CHECK: [[VECTOR_EARLY_EXIT]]:
; CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.experimental.cttz.elts.i64.v8i1(<8 x i1> [[TMP0]], i1 true)
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
index 5127b7d..7c349fb 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll
@@ -18,22 +18,15 @@ define void @arm_mult_q15(ptr %pSrcA, ptr %pSrcB, ptr noalias %pDst, i32 %blockS
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[WHILE_BODY_PREHEADER15:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[N_VEC:%.*]] = and i32 [[BLOCKSIZE]], -8
-; CHECK-NEXT: [[IND_END:%.*]] = and i32 [[BLOCKSIZE]], 7
-; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[N_VEC]], 1
-; CHECK-NEXT: [[IND_END7:%.*]] = getelementptr i8, ptr [[PSRCA:%.*]], i32 [[TMP0]]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[N_VEC]], 1
-; CHECK-NEXT: [[IND_END9:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i32 [[TMP1]]
-; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[N_VEC]], 1
-; CHECK-NEXT: [[IND_END11:%.*]] = getelementptr i8, ptr [[PSRCB:%.*]], i32 [[TMP2]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRCA]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRCA:%.*]], i32 [[OFFSET_IDX]]
; CHECK-NEXT: [[OFFSET_IDX13:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP14:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[OFFSET_IDX13]]
+; CHECK-NEXT: [[NEXT_GEP14:%.*]] = getelementptr i8, ptr [[PDST:%.*]], i32 [[OFFSET_IDX13]]
; CHECK-NEXT: [[OFFSET_IDX15:%.*]] = shl i32 [[INDEX]], 1
-; CHECK-NEXT: [[NEXT_GEP16:%.*]] = getelementptr i8, ptr [[PSRCB]], i32 [[OFFSET_IDX15]]
+; CHECK-NEXT: [[NEXT_GEP16:%.*]] = getelementptr i8, ptr [[PSRCB:%.*]], i32 [[OFFSET_IDX15]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2
; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
; CHECK-NEXT: [[WIDE_LOAD17:%.*]] = load <8 x i16>, ptr [[NEXT_GEP16]], align 2
@@ -47,6 +40,13 @@ define void @arm_mult_q15(ptr %pSrcA, ptr %pSrcB, ptr noalias %pDst, i32 %blockS
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
+; CHECK-NEXT: [[IND_END:%.*]] = and i32 [[BLOCKSIZE]], 7
+; CHECK-NEXT: [[TMP13:%.*]] = shl i32 [[N_VEC]], 1
+; CHECK-NEXT: [[IND_END7:%.*]] = getelementptr i8, ptr [[PSRCA]], i32 [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = shl i32 [[N_VEC]], 1
+; CHECK-NEXT: [[IND_END9:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[TMP14]]
+; CHECK-NEXT: [[TMP12:%.*]] = shl i32 [[N_VEC]], 1
+; CHECK-NEXT: [[IND_END11:%.*]] = getelementptr i8, ptr [[PSRCB]], i32 [[TMP12]]
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[BLOCKSIZE]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[WHILE_BODY_PREHEADER15]]
; CHECK: while.body.preheader15:
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll
index dcfebe3..6e95b63 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/pr48844-br-to-switch-vectorization.ll
@@ -46,7 +46,6 @@ define dso_local void @test(ptr %start, ptr %end) #0 {
; AVX2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 124
; AVX2-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[BB12_PREHEADER11:%.*]], label [[VECTOR_PH:%.*]]
; AVX2: vector.ph:
-; AVX2-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[TMP3]], 24
; AVX2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP3]], 9223372036854775776
; AVX2-NEXT: br label [[VECTOR_BODY:%.*]]
; AVX2: vector.body:
@@ -80,6 +79,7 @@ define dso_local void @test(ptr %start, ptr %end) #0 {
; AVX2-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; AVX2-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; AVX2: middle.block:
+; AVX2-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[TMP3]], 24
; AVX2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
; AVX2-NEXT: br i1 [[CMP_N]], label [[EXIT]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; AVX2: vec.epilog.iter.check:
@@ -90,8 +90,6 @@ define dso_local void @test(ptr %start, ptr %end) #0 {
; AVX2: vec.epilog.ph:
; AVX2-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; AVX2-NEXT: [[N_VEC10:%.*]] = and i64 [[TMP3]], 9223372036854775800
-; AVX2-NEXT: [[TMP21:%.*]] = shl i64 [[N_VEC10]], 2
-; AVX2-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP21]]
; AVX2-NEXT: br label [[BB12:%.*]]
; AVX2: vec.epilog.vector.body:
; AVX2-NEXT: [[INDEX12:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[BB12_PREHEADER11]] ], [ [[INDEX_NEXT16:%.*]], [[BB12]] ]
@@ -106,6 +104,8 @@ define dso_local void @test(ptr %start, ptr %end) #0 {
; AVX2-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT16]], [[N_VEC10]]
; AVX2-NEXT: br i1 [[TMP25]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[BB12]], !llvm.loop [[LOOP4:![0-9]+]]
; AVX2: vec.epilog.middle.block:
+; AVX2-NEXT: [[TMP27:%.*]] = shl i64 [[N_VEC10]], 2
+; AVX2-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP27]]
; AVX2-NEXT: [[CMP_N17:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC10]]
; AVX2-NEXT: br i1 [[CMP_N17]], label [[EXIT]], label [[BB12_PREHEADER1]]
; AVX2: bb12.preheader:
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll b/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll
index bfb8554..4562072 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/vdiv.ll
@@ -16,8 +16,8 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-SAME: ptr writeonly captures(none) [[X:%.*]], ptr readonly captures(none) [[Y:%.*]], double [[A:%.*]], i32 [[N:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[N]], 0
-; CHECK-NEXT: br i1 [[CMP1]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_END:.*]]
-; CHECK: [[FOR_BODY_PREHEADER]]:
+; CHECK-NEXT: br i1 [[CMP1]], label %[[ITER_CHECK:.*]], label %[[FOR_END:.*]]
+; CHECK: [[ITER_CHECK]]:
; CHECK-NEXT: [[X4:%.*]] = ptrtoint ptr [[X]] to i64
; CHECK-NEXT: [[Y5:%.*]] = ptrtoint ptr [[Y]] to i64
; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[N]] to i64
@@ -25,12 +25,11 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[X4]], [[Y5]]
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 128
; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[MIN_ITERS_CHECK]], i1 true, i1 [[DIFF_CHECK]]
-; CHECK-NEXT: br i1 [[OR_COND]], label %[[FOR_BODY_PREHEADER9:.*]], label %[[VECTOR_PH:.*]]
-; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: br i1 [[OR_COND]], label %[[FOR_BODY_PREHEADER:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
+; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
; CHECK-NEXT: [[MIN_ITERS_CHECK6:%.*]] = icmp ult i32 [[N]], 16
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK6]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH1:.*]]
-; CHECK: [[VECTOR_PH1]]:
-; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 12
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK6]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 2147483632
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x double> poison, double [[A]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x double> [[BROADCAST_SPLATINSERT]], <4 x double> poison, <4 x i32> zeroinitializer
@@ -40,7 +39,7 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-NEXT: [[TMP4:%.*]] = fdiv fast <4 x double> splat (double 1.000000e+00), [[BROADCAST_SPLAT]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 32
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 64
@@ -65,13 +64,14 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 12
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[WIDE_TRIP_COUNT]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
; CHECK: [[VEC_EPILOG_ITER_CHECK]]:
; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp eq i64 [[N_VEC_REMAINING]], 0
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[FOR_BODY_PREHEADER9]], label %[[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[FOR_BODY_PREHEADER]], label %[[VEC_EPILOG_PH]], !prof [[PROF10:![0-9]+]]
; CHECK: [[VEC_EPILOG_PH]]:
-; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_PH]] ]
+; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: [[N_VEC11:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 2147483644
; CHECK-NEXT: [[BROADCAST_SPLATINSERT14:%.*]] = insertelement <4 x double> poison, double [[A]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT15:%.*]] = shufflevector <4 x double> [[BROADCAST_SPLATINSERT14]], <4 x double> poison, <4 x i32> zeroinitializer
@@ -86,12 +86,12 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-NEXT: store <4 x double> [[TMP40]], ptr [[TMP41]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[INDEX_NEXT16]] = add nuw i64 [[INDEX12]], 4
; CHECK-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT16]], [[N_VEC11]]
-; CHECK-NEXT: br i1 [[TMP42]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP42]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N17:%.*]] = icmp eq i64 [[N_VEC11]], [[WIDE_TRIP_COUNT]]
-; CHECK-NEXT: br i1 [[CMP_N17]], label %[[FOR_END]], label %[[FOR_BODY_PREHEADER9]]
-; CHECK: [[FOR_BODY_PREHEADER9]]:
-; CHECK-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ 0, %[[FOR_BODY_PREHEADER]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[N_VEC11]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
+; CHECK-NEXT: br i1 [[CMP_N17]], label %[[FOR_END]], label %[[FOR_BODY_PREHEADER]]
+; CHECK: [[FOR_BODY_PREHEADER]]:
+; CHECK-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ 0, %[[ITER_CHECK]] ], [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ [[N_VEC11]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ]
; CHECK-NEXT: [[TMP43:%.*]] = sub nsw i64 [[WIDE_TRIP_COUNT]], [[INDVARS_IV_PH]]
; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP43]], 7
; CHECK-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 0
@@ -110,13 +110,13 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-NEXT: [[INDVARS_IV_NEXT_PROL]] = add nuw nsw i64 [[INDVARS_IV_PROL]], 1
; CHECK-NEXT: [[PROL_ITER_NEXT]] = add i64 [[PROL_ITER]], 1
; CHECK-NEXT: [[PROL_ITER_CMP_NOT:%.*]] = icmp eq i64 [[PROL_ITER_NEXT]], [[XTRAITER]]
-; CHECK-NEXT: br i1 [[PROL_ITER_CMP_NOT]], label %[[FOR_BODY_PROL_LOOPEXIT]], label %[[FOR_BODY_PROL]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[PROL_ITER_CMP_NOT]], label %[[FOR_BODY_PROL_LOOPEXIT]], label %[[FOR_BODY_PROL]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[FOR_BODY_PROL_LOOPEXIT]]:
-; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ [[INDVARS_IV_PH]], %[[FOR_BODY_PREHEADER9]] ], [ [[INDVARS_IV_NEXT_PROL]], %[[FOR_BODY_PROL]] ]
+; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ [[INDVARS_IV_PH]], %[[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT_PROL]], %[[FOR_BODY_PROL]] ]
; CHECK-NEXT: [[TMP20:%.*]] = sub nsw i64 [[INDVARS_IV_PH]], [[WIDE_TRIP_COUNT]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp ugt i64 [[TMP20]], -8
-; CHECK-NEXT: br i1 [[TMP21]], label %[[FOR_END]], label %[[FOR_BODY_PREHEADER9_NEW:.*]]
-; CHECK: [[FOR_BODY_PREHEADER9_NEW]]:
+; CHECK-NEXT: br i1 [[TMP21]], label %[[FOR_END]], label %[[FOR_BODY_PREHEADER_NEW:.*]]
+; CHECK: [[FOR_BODY_PREHEADER_NEW]]:
; CHECK-NEXT: [[TMP22:%.*]] = fdiv fast double 1.000000e+00, [[A]]
; CHECK-NEXT: [[TMP23:%.*]] = fdiv fast double 1.000000e+00, [[A]]
; CHECK-NEXT: [[TMP24:%.*]] = fdiv fast double 1.000000e+00, [[A]]
@@ -127,7 +127,7 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-NEXT: [[TMP29:%.*]] = fdiv fast double 1.000000e+00, [[A]]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_UNR]], %[[FOR_BODY_PREHEADER9_NEW]] ], [ [[INDVARS_IV_NEXT_7:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_UNR]], %[[FOR_BODY_PREHEADER_NEW]] ], [ [[INDVARS_IV_NEXT_7:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[T0:%.*]] = load double, ptr [[ARRAYIDX]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[TMP30:%.*]] = fmul fast double [[T0]], [[TMP22]]
@@ -177,7 +177,7 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
; CHECK-NEXT: store double [[TMP37]], ptr [[ARRAYIDX2_7]], align 8, !tbaa [[DOUBLE_TBAA3]]
; CHECK-NEXT: [[INDVARS_IV_NEXT_7]] = add nuw nsw i64 [[INDVARS_IV]], 8
; CHECK-NEXT: [[EXITCOND_NOT_7:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_7]], [[WIDE_TRIP_COUNT]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT_7]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT_7]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: [[FOR_END]]:
; CHECK-NEXT: ret void
;
@@ -232,8 +232,9 @@ attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="
; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META8:![0-9]+]], [[META9:![0-9]+]]}
; CHECK: [[META8]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META9]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META8]], [[META9]]}
-; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META12:![0-9]+]]}
-; CHECK: [[META12]] = !{!"llvm.loop.unroll.disable"}
-; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META8]]}
+; CHECK: [[PROF10]] = !{!"branch_weights", i32 4, i32 12}
+; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META8]], [[META9]]}
+; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META13:![0-9]+]]}
+; CHECK: [[META13]] = !{!"llvm.loop.unroll.disable"}
+; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META8]]}
;.
diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-profile.ll b/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-profile.ll
new file mode 100644
index 0000000..9cc417f
--- /dev/null
+++ b/llvm/test/Transforms/SimpleLoopUnswitch/nontrivial-unswitch-profile.ll
@@ -0,0 +1,89 @@
+; RUN: split-file %s %t
+; RUN: cat %t/main.ll %t/probable-or.prof > %t/probable-or.ll
+; RUN: cat %t/main.ll %t/probable-and.prof > %t/probable-and.ll
+; RUN: opt -passes='loop(simple-loop-unswitch<nontrivial>)' -S %t/probable-or.ll -o -| FileCheck %t/probable-or.prof
+; RUN: opt -passes='loop(simple-loop-unswitch<nontrivial>)' -S %t/probable-and.ll -o -| FileCheck %t/probable-and.prof
+
+;--- main.ll
+declare i32 @a()
+declare i32 @b()
+
+define i32 @or(ptr %ptr, i1 %cond) !prof !0 {
+entry:
+ br label %loop_begin
+
+loop_begin:
+ %v1 = load i1, ptr %ptr
+ %cond_or = or i1 %v1, %cond
+ br i1 %cond_or, label %loop_a, label %loop_b, !prof !1
+
+loop_a:
+ call i32 @a()
+ br label %latch
+
+loop_b:
+ call i32 @b()
+ br label %latch
+
+latch:
+ %v2 = load i1, ptr %ptr
+ br i1 %v2, label %loop_begin, label %loop_exit, !prof !2
+
+loop_exit:
+ ret i32 0
+}
+
+define i32 @and(ptr %ptr, i1 %cond) !prof !0 {
+entry:
+ br label %loop_begin
+
+loop_begin:
+ %v1 = load i1, ptr %ptr
+ %cond_and = and i1 %v1, %cond
+ br i1 %cond_and, label %loop_a, label %loop_b, !prof !1
+
+loop_a:
+ call i32 @a()
+ br label %latch
+
+loop_b:
+ call i32 @b()
+ br label %latch
+
+latch:
+ %v2 = load i1, ptr %ptr
+ br i1 %v2, label %loop_begin, label %loop_exit, !prof !2
+
+loop_exit:
+ ret i32 0
+}
+
+;--- probable-or.prof
+!0 = !{!"function_entry_count", i32 10}
+!1 = !{!"branch_weights", i32 1, i32 1000}
+!2 = !{!"branch_weights", i32 5, i32 7}
+; CHECK-LABEL: @or
+; CHECK-LABEL: entry:
+; CHECK-NEXT: %cond.fr = freeze i1 %cond
+; CHECK-NEXT: br i1 %cond.fr, label %entry.split.us, label %entry.split, !prof !1
+; CHECK-LABEL: @and
+; CHECK-LABEL: entry:
+; CHECK-NEXT: %cond.fr = freeze i1 %cond
+; CHECK-NEXT: br i1 %cond.fr, label %entry.split, label %entry.split.us, !prof !3
+; CHECK: !1 = !{!"branch_weights", i32 1, i32 1000}
+; CHECK: !3 = !{!"unknown", !"simple-loop-unswitch"}
+
+;--- probable-and.prof
+!0 = !{!"function_entry_count", i32 10}
+!1 = !{!"branch_weights", i32 1000, i32 1}
+!2 = !{!"branch_weights", i32 5, i32 7}
+; CHECK-LABEL: @or
+; CHECK-LABEL: entry:
+; CHECK-NEXT: %cond.fr = freeze i1 %cond
+; CHECK-NEXT: br i1 %cond.fr, label %entry.split.us, label %entry.split, !prof !1
+; CHECK-LABEL: @and
+; CHECK-LABEL: entry:
+; CHECK-NEXT: %cond.fr = freeze i1 %cond
+; CHECK-NEXT: br i1 %cond.fr, label %entry.split, label %entry.split.us, !prof !3
+; CHECK: !1 = !{!"unknown", !"simple-loop-unswitch"}
+; CHECK: !3 = !{!"branch_weights", i32 1000, i32 1}
diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/pr60736.ll b/llvm/test/Transforms/SimpleLoopUnswitch/pr60736.ll
index 0964c55..3760be4 100644
--- a/llvm/test/Transforms/SimpleLoopUnswitch/pr60736.ll
+++ b/llvm/test/Transforms/SimpleLoopUnswitch/pr60736.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt < %s -simple-loop-unswitch-inject-invariant-conditions=true -passes='loop(simple-loop-unswitch<nontrivial>,loop-instsimplify)' -S | FileCheck %s
define void @test() {
@@ -7,7 +7,7 @@ define void @test() {
; CHECK-NEXT: [[TMP:%.*]] = call i1 @llvm.experimental.widenable.condition()
; CHECK-NEXT: [[TMP1:%.*]] = load atomic i32, ptr addrspace(1) poison unordered, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load atomic i32, ptr addrspace(1) poison unordered, align 8
-; CHECK-NEXT: br i1 [[TMP]], label [[BB_SPLIT:%.*]], label [[BB3_SPLIT_US:%.*]]
+; CHECK-NEXT: br i1 [[TMP]], label [[BB_SPLIT:%.*]], label [[BB3_SPLIT_US:%.*]], !prof [[PROF0:![0-9]+]]
; CHECK: bb.split:
; CHECK-NEXT: br label [[BB3:%.*]]
; CHECK: bb3:
@@ -19,7 +19,7 @@ define void @test() {
; CHECK-NEXT: [[TMP6_US:%.*]] = phi i32 [ poison, [[BB3_SPLIT_US]] ]
; CHECK-NEXT: [[TMP7_US:%.*]] = add nuw nsw i32 [[TMP6_US]], 2
; CHECK-NEXT: [[TMP8_US:%.*]] = icmp ult i32 [[TMP7_US]], [[TMP2]]
-; CHECK-NEXT: br i1 [[TMP8_US]], label [[BB9_US:%.*]], label [[BB16_SPLIT_US:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP8_US]], label [[BB9_US:%.*]], label [[BB16_SPLIT_US:%.*]], !prof [[PROF0]]
; CHECK: bb9.us:
; CHECK-NEXT: br label [[BB17_SPLIT_US:%.*]]
; CHECK: bb16.split.us:
@@ -96,3 +96,8 @@ declare i1 @llvm.experimental.widenable.condition()
!0 = !{!"branch_weights", i32 1048576, i32 1}
+;.
+; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(inaccessiblemem: readwrite) }
+;.
+; CHECK: [[PROF0]] = !{!"branch_weights", i32 1048576, i32 1}
+;.
diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/simple-unswitch-profile.ll b/llvm/test/Transforms/SimpleLoopUnswitch/simple-unswitch-profile.ll
new file mode 100644
index 0000000..ec6baa5
--- /dev/null
+++ b/llvm/test/Transforms/SimpleLoopUnswitch/simple-unswitch-profile.ll
@@ -0,0 +1,157 @@
+; RUN: split-file %s %t
+; RUN: cat %t/main.ll %t/probable-or.prof > %t/probable-or.ll
+; RUN: cat %t/main.ll %t/probable-and.prof > %t/probable-and.ll
+; RUN: opt -passes='loop-mssa(simple-loop-unswitch)' -S %t/probable-or.ll -o - | FileCheck %t/probable-or.prof
+; RUN: opt -passes='loop-mssa(simple-loop-unswitch)' -S %t/probable-and.ll -o - | FileCheck %t/probable-and.prof
+;
+; RUN: opt -passes='module(print<block-freq>),function(loop-mssa(simple-loop-unswitch)),module(print<block-freq>)' \
+; RUN: %t/probable-or.ll -disable-output -simple-loop-unswitch-estimate-profile=0 2>&1 | FileCheck %t/probable-or.prof --check-prefixes=PROFILE-COM,PROFILE-REF
+
+; RUN: opt -passes='module(print<block-freq>),function(loop-mssa(simple-loop-unswitch)),module(print<block-freq>)' \
+; RUN: %t/probable-or.ll -disable-output -simple-loop-unswitch-estimate-profile=1 2>&1 | FileCheck %t/probable-or.prof --check-prefixes=PROFILE-COM,PROFILE-CHK
+
+; RUN: opt -passes='module(print<block-freq>),function(loop-mssa(simple-loop-unswitch)),module(print<block-freq>)' \
+; RUN: %t/probable-and.ll -disable-output -simple-loop-unswitch-estimate-profile=0 2>&1 | FileCheck %t/probable-and.prof --check-prefixes=PROFILE-COM,PROFILE-REF
+
+; RUN: opt -passes='module(print<block-freq>),function(loop-mssa(simple-loop-unswitch)),module(print<block-freq>)' \
+; RUN: %t/probable-and.ll -disable-output -simple-loop-unswitch-estimate-profile=1 2>&1 | FileCheck %t/probable-and.prof --check-prefixes=PROFILE-COM,PROFILE-CHK
+
+;--- main.ll
+declare void @some_func() noreturn
+
+define i32 @or(i1 %cond1, i32 %var1) !prof !0 {
+entry:
+ br label %loop_begin
+
+loop_begin:
+ %var3 = phi i32 [%var1, %entry], [%var2, %do_something]
+ %cond2 = icmp eq i32 %var3, 10
+ %cond.or = or i1 %cond1, %cond2
+ br i1 %cond.or, label %loop_exit, label %do_something, !prof !1
+
+do_something:
+ %var2 = add i32 %var3, 1
+ call void @some_func() noreturn nounwind
+ br label %loop_begin
+
+loop_exit:
+ ret i32 0
+}
+
+define i32 @and(i1 %cond1, i32 %var1) !prof !0 {
+entry:
+ br label %loop_begin
+
+loop_begin:
+ %var3 = phi i32 [%var1, %entry], [%var2, %do_something]
+ %cond2 = icmp eq i32 %var3, 10
+ %cond.and = and i1 %cond1, %cond2
+ br i1 %cond.and, label %do_something, label %loop_exit, !prof !1
+
+do_something:
+ %var2 = add i32 %var3, 1
+ call void @some_func() noreturn nounwind
+ br label %loop_begin
+
+loop_exit:
+ ret i32 0
+}
+
+;--- probable-or.prof
+!0 = !{!"function_entry_count", i32 10}
+!1 = !{!"branch_weights", i32 1, i32 1000}
+; CHECK-LABEL: @or
+; CHECK-LABEL: entry:
+; CHECK-NEXT: %cond1.fr = freeze i1 %cond1
+; CHECK-NEXT: br i1 %cond1.fr, label %loop_exit.split, label %entry.split, !prof !1
+; CHECK-LABEL: @and
+; CHECK-LABEL: entry:
+; CHECK-NEXT: %cond1.fr = freeze i1 %cond1
+; CHECK-NEXT: br i1 %cond1.fr, label %entry.split, label %loop_exit.split, !prof !2
+; CHECK: !1 = !{!"branch_weights", i32 1, i32 1000}
+; CHECK: !2 = !{!"unknown", !"simple-loop-unswitch"}
+
+; PROFILE-COM: Printing analysis results of BFI for function 'or':
+; PROFILE-COM: block-frequency-info: or
+ ; PROFILE-COM: - entry: {{.*}} count = 10
+ ; PROFILE-COM: - loop_begin: {{.*}} count = 10010
+ ; PROFILE-COM: - do_something: {{.*}} count = 10000
+ ; PROFILE-COM: - loop_exit: {{.*}} count = 10
+
+; PROFILE-COM: Printing analysis results of BFI for function 'and':
+; PROFILE-COM: block-frequency-info: and
+ ; PROFILE-COM: - entry: {{.*}} count = 10
+ ; PROFILE-COM: - loop_begin: {{.*}} count = 10
+ ; PROFILE-COM: - do_something: {{.*}} count = 0
+ ; PROFILE-COM: - loop_exit: {{.*}} count = 10
+
+; PROFILE-COM: Printing analysis results of BFI for function 'or':
+; PROFILE-COM: block-frequency-info: or
+ ; PROFILE-COM: - entry: {{.*}} count = 10
+ ; PROFILE-REF: - entry.split: {{.*}} count = 5
+ ; PROFILE-CHK: - entry.split: {{.*}} count = 10
+ ; PROFILE-REF: - loop_begin: {{.*}} count = 5005
+ ; PROFILE-CHK: - loop_begin: {{.*}} count = 10000
+ ; PROFILE-REF: - do_something: {{.*}} count = 5000
+ ; PROFILE-CHK: - do_something: {{.*}} count = 9990
+ ; PROFILE-REF: - loop_exit: {{.*}} count = 5
+ ; PROFILE-CHK: - loop_exit: {{.*}} count = 10
+ ; PROFILE-COM: - loop_exit.split: {{.*}} count = 10
+
+; PROFILE-COM: Printing analysis results of BFI for function 'and':
+; PROFILE-COM: block-frequency-info: and
+ ; PROFILE-COM: - entry: {{.*}} count = 10
+ ; PROFILE-COM: - entry.split: {{.*}} count = 5
+ ; PROFILE-COM: - loop_begin: {{.*}} count = 5
+ ; PROFILE-COM: - do_something: {{.*}} count = 0
+ ; PROFILE-COM: - loop_exit: {{.*}} count = 5
+ ; PROFILE-COM: - loop_exit.split: {{.*}} count = 10
+
+;--- probable-and.prof
+!0 = !{!"function_entry_count", i32 10}
+!1 = !{!"branch_weights", i32 1000, i32 1}
+; CHECK-LABEL: @or
+; CHECK-LABEL: entry:
+; CHECK-NEXT: %cond1.fr = freeze i1 %cond1
+; CHECK-NEXT: br i1 %cond1.fr, label %loop_exit.split, label %entry.split, !prof !1
+; CHECK-LABEL: @and
+; CHECK-LABEL: entry:
+; CHECK-NEXT: %cond1.fr = freeze i1 %cond1
+; CHECK-NEXT: br i1 %cond1.fr, label %entry.split, label %loop_exit.split, !prof !2
+; CHECK: !1 = !{!"unknown", !"simple-loop-unswitch"}
+; CHECK: !2 = !{!"branch_weights", i32 1000, i32 1}
+; PROFILE-COM: Printing analysis results of BFI for function 'or':
+; PROFILE-COM: block-frequency-info: or
+ ; PROFILE-COM: - entry: {{.*}}, count = 10
+ ; PROFILE-COM: - loop_begin: {{.*}}, count = 10
+ ; PROFILE-COM: - do_something: {{.*}}, count = 0
+ ; PROFILE-COM: - loop_exit: {{.*}}, count = 10
+
+; PROFILE-COM: Printing analysis results of BFI for function 'and':
+; PROFILE-COM: block-frequency-info: and
+ ; PROFILE-COM: - entry: {{.*}} count = 10
+ ; PROFILE-COM: - loop_begin: {{.*}} count = 10010
+ ; PROFILE-COM: - do_something: {{.*}} count = 10000
+ ; PROFILE-COM: - loop_exit: {{.*}} count = 10
+
+; PROFILE-COM: Printing analysis results of BFI for function 'or':
+; PROFILE-COM: block-frequency-info: or
+ ; PROFILE-COM: - entry: {{.*}} count = 10
+ ; PROFILE-COM: - entry.split: {{.*}} count = 5
+ ; PROFILE-COM: - loop_begin: {{.*}} count = 5
+ ; PROFILE-COM: - do_something: {{.*}} count = 0
+ ; PROFILE-COM: - loop_exit: {{.*}} count = 5
+ ; PROFILE-COM: - loop_exit.split: {{.*}} count = 10
+
+; PROFILE-COM: Printing analysis results of BFI for function 'and':
+; PROFILE-COM: block-frequency-info: and
+ ; PROFILE-COM: - entry: {{.*}} count = 10
+ ; PROFILE-REF: - entry.split: {{.*}} count = 5
+ ; PROFILE-CHK: - entry.split: {{.*}} count = 10
+ ; PROFILE-REF: - loop_begin: {{.*}} count = 5005
+ ; PROFILE-CHK: - loop_begin: {{.*}} count = 10000
+ ; PROFILE-REF: - do_something: {{.*}} count = 5000
+ ; PROFILE-CHK: - do_something: {{.*}} count = 9990
+ ; PROFILE-REF: - loop_exit: {{.*}} count = 5
+ ; PROFILE-CHK: - loop_exit: {{.*}} count = 10
+ ; PROFILE-COM: - loop_exit.split: {{.*}} count = 10
diff --git a/llvm/test/Transforms/UnifyLoopExits/basic.ll b/llvm/test/Transforms/UnifyLoopExits/basic.ll
index ccd15d4..d04d142 100644
--- a/llvm/test/Transforms/UnifyLoopExits/basic.ll
+++ b/llvm/test/Transforms/UnifyLoopExits/basic.ll
@@ -18,12 +18,12 @@ define void @loop_1(i1 %PredEntry, i1 %PredB, i1 %PredC, i1 %PredD) {
; CHECK: F:
; CHECK-NEXT: br label [[EXIT]]
; CHECK: G:
-; CHECK-NEXT: br label [[F:%.*]]
+; CHECK-NEXT: br label [[Y:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret void
; CHECK: loop.exit.guard:
-; CHECK-NEXT: [[GUARD_E:%.*]] = phi i1 [ true, [[B]] ], [ false, [[C]] ], [ false, [[D]] ]
-; CHECK-NEXT: br i1 [[GUARD_E]], label [[E:%.*]], label [[F]]
+; CHECK-NEXT: [[GUARD_X:%.*]] = phi i1 [ true, [[B]] ], [ false, [[C]] ], [ false, [[D]] ]
+; CHECK-NEXT: br i1 [[GUARD_X]], label [[X:%.*]], label [[Y]]
;
entry:
br i1 %PredEntry, label %A, label %G
@@ -53,6 +53,67 @@ exit:
ret void
}
+define void @loop_1_callbr(i1 %PredEntry, i1 %PredB, i1 %PredC, i1 %PredD) {
+; CHECK-LABEL: @loop_1_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[G:%.*]]
+; CHECK: A:
+; CHECK-NEXT: br label [[B:%.*]]
+; CHECK: B:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB:%.*]])
+; CHECK-NEXT: to label [[C:%.*]] [label %B.target.E]
+; CHECK: C:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDC:%.*]])
+; CHECK-NEXT: to label [[D:%.*]] [label %C.target.F]
+; CHECK: D:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDD:%.*]])
+; CHECK-NEXT: to label [[A]] [label %D.target.F]
+; CHECK: E:
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: F:
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: G:
+; CHECK-NEXT: br label [[Y:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+; CHECK: B.target.E:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]]
+; CHECK: C.target.F:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: D.target.F:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: loop.exit.guard:
+; CHECK-NEXT: [[GUARD_X:%.*]] = phi i1 [ true, [[B_TARGET_E:%.*]] ], [ false, [[C_TARGET_F:%.*]] ], [ false, [[D_TARGET_F:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_X]], label [[X:%.*]], label [[Y]]
+;
+entry:
+ br i1 %PredEntry, label %A, label %G
+
+A:
+ br label %B
+
+B:
+ callbr void asm "", "r,!i"(i1 %PredB) to label %C [label %E]
+
+C:
+ callbr void asm "", "r,!i"(i1 %PredC) to label %D [label %F]
+
+D:
+ callbr void asm "", "r,!i"(i1 %PredD) to label %A [label %F]
+
+E:
+ br label %exit
+
+F:
+ br label %exit
+
+G:
+ br label %F
+
+exit:
+ ret void
+}
+
define void @loop_2(i1 %PredA, i1 %PredB, i1 %PredC) {
; CHECK-LABEL: @loop_2(
; CHECK-NEXT: entry:
@@ -107,3 +168,67 @@ Z:
exit:
ret void
}
+
+define void @loop_2_callbr(i1 %PredA, i1 %PredB, i1 %PredC) {
+; CHECK-LABEL: @loop_2_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[A:%.*]]
+; CHECK: A:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]])
+; CHECK-NEXT: to label [[B:%.*]] [label %A.target.X]
+; CHECK: B:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB:%.*]])
+; CHECK-NEXT: to label [[C:%.*]] [label %B.target.Y]
+; CHECK: C:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDC:%.*]])
+; CHECK-NEXT: to label [[D:%.*]] [label %C.target.Z]
+; CHECK: D:
+; CHECK-NEXT: br label [[A]]
+; CHECK: X:
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: Y:
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: Z:
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+; CHECK: A.target.X:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]]
+; CHECK: B.target.Y:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: C.target.Z:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: loop.exit.guard:
+; CHECK-NEXT: [[GUARD_X:%.*]] = phi i1 [ true, [[A_TARGET_X:%.*]] ], [ false, [[B_TARGET_Y:%.*]] ], [ false, [[C_TARGET_Z:%.*]] ]
+; CHECK-NEXT: [[GUARD_Y:%.*]] = phi i1 [ false, [[A_TARGET_X]] ], [ true, [[B_TARGET_Y]] ], [ false, [[C_TARGET_Z]] ]
+; CHECK-NEXT: br i1 [[GUARD_X]], label [[X:%.*]], label [[LOOP_EXIT_GUARD1:%.*]]
+; CHECK: loop.exit.guard1:
+; CHECK-NEXT: br i1 [[GUARD_Y]], label [[Y:%.*]], label [[Z:%.*]]
+;
+entry:
+ br label %A
+
+A:
+ callbr void asm "", "r,!i"(i1 %PredA) to label %B [label %X]
+
+B:
+ callbr void asm "", "r,!i"(i1 %PredB) to label %C [label %Y]
+
+C:
+ callbr void asm "", "r,!i"(i1 %PredC) to label %D [label %Z]
+
+D:
+ br label %A
+
+X:
+ br label %exit
+
+Y:
+ br label %exit
+
+Z:
+ br label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/UnifyLoopExits/integer_guards.ll b/llvm/test/Transforms/UnifyLoopExits/integer_guards.ll
index f55639f..be982d5 100644
--- a/llvm/test/Transforms/UnifyLoopExits/integer_guards.ll
+++ b/llvm/test/Transforms/UnifyLoopExits/integer_guards.ll
@@ -71,6 +71,85 @@ E:
ret void
}
+define void @loop_two_exits_callbr(i1 %PredEntry, i1 %PredA) {
+; CHECK-LABEL: @loop_two_exits_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[E:%.*]]
+; CHECK: A:
+; CHECK-NEXT: [[INC1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC2:%.*]], [[C:%.*]] ]
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]])
+; CHECK-NEXT: to label [[A_TARGET_B:%.*]] [label %C]
+; CHECK: B:
+; CHECK-NEXT: tail call fastcc void @check(i32 1) #[[ATTR0]]
+; CHECK-NEXT: br label [[D:%.*]]
+; CHECK: C:
+; CHECK-NEXT: [[INC2]] = add i32 [[INC1]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC2]], 10
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]])
+; CHECK-NEXT: to label [[A]] [label %C.target.E]
+; CHECK: D:
+; CHECK-NEXT: unreachable
+; CHECK: E:
+; CHECK-NEXT: ret void
+; CHECK: A.target.B:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]]
+; CHECK: C.target.E:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: loop.exit.guard:
+; CHECK-NEXT: [[MERGED_BB_IDX:%.*]] = phi i32 [ 0, [[A_TARGET_B]] ], [ 1, [[C_TARGET_E:%.*]] ]
+; CHECK-NEXT: [[B_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 0
+; CHECK-NEXT: br i1 [[B_PREDICATE]], label [[B:%.*]], label [[E]]
+;
+; BOOLEAN-LABEL: @loop_two_exits_callbr(
+; BOOLEAN-NEXT: entry:
+; BOOLEAN-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[E:%.*]]
+; BOOLEAN: A:
+; BOOLEAN-NEXT: [[INC1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC2:%.*]], [[C:%.*]] ]
+; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]])
+; BOOLEAN-NEXT: to label [[A_TARGET_B:%.*]] [label %C]
+; BOOLEAN: B:
+; BOOLEAN-NEXT: tail call fastcc void @check(i32 1) #[[ATTR0]]
+; BOOLEAN-NEXT: br label [[D:%.*]]
+; BOOLEAN: C:
+; BOOLEAN-NEXT: [[INC2]] = add i32 [[INC1]], 1
+; BOOLEAN-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC2]], 10
+; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]])
+; BOOLEAN-NEXT: to label [[A]] [label %C.target.E]
+; BOOLEAN: D:
+; BOOLEAN-NEXT: unreachable
+; BOOLEAN: E:
+; BOOLEAN-NEXT: ret void
+; BOOLEAN: A.target.B:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD:%.*]]
+; BOOLEAN: C.target.E:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]]
+; BOOLEAN: loop.exit.guard:
+; BOOLEAN-NEXT: [[GUARD_B:%.*]] = phi i1 [ true, [[A_TARGET_B]] ], [ false, [[C_TARGET_E:%.*]] ]
+; BOOLEAN-NEXT: br i1 [[GUARD_B]], label [[B:%.*]], label [[E]]
+;
+entry:
+ br i1 %PredEntry, label %A, label %E
+
+A:
+ %inc1 = phi i32 [ 0, %entry ], [ %inc2, %C ]
+ callbr void asm "", "r,!i"(i1 %PredA) to label %B [label %C]
+
+B:
+ tail call fastcc void @check(i32 1) #0
+ br label %D
+
+C:
+ %inc2 = add i32 %inc1, 1
+ %cmp = icmp ult i32 %inc2, 10
+ callbr void asm "","r,!i"(i1 %cmp) to label %A [label %E]
+
+D:
+ unreachable
+
+E:
+ ret void
+}
+
; The loop exit blocks appear in an inner loop.
define void @inner_loop(i1 %PredEntry, i1 %PredA, i1 %PredB) {
@@ -196,6 +275,164 @@ I:
ret void
}
+define void @inner_loop_callbr(i1 %PredEntry, i1 %PredA, i1 %PredB) {
+; CHECK-LABEL: @inner_loop_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[I:%.*]]
+; CHECK: A:
+; CHECK-NEXT: [[OUTER1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OUTER2:%.*]], [[G:%.*]] ]
+; CHECK-NEXT: br label [[B:%.*]]
+; CHECK: B:
+; CHECK-NEXT: [[INNER1:%.*]] = phi i32 [ 0, [[A]] ], [ [[INNER2:%.*]], [[F:%.*]] ]
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]])
+; CHECK-NEXT: to label [[D:%.*]] [label %B.target.B.target.C]
+; CHECK: C:
+; CHECK-NEXT: tail call fastcc void @check(i32 1) #[[ATTR0]]
+; CHECK-NEXT: br label [[H:%.*]]
+; CHECK: D:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB:%.*]])
+; CHECK-NEXT: to label [[D_TARGET_D_TARGET_E:%.*]] [label %F]
+; CHECK: E:
+; CHECK-NEXT: tail call fastcc void @check(i32 2) #[[ATTR0]]
+; CHECK-NEXT: br label [[H]]
+; CHECK: F:
+; CHECK-NEXT: [[INNER2]] = add i32 [[INNER1]], 1
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i32 [[INNER2]], 20
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP1]])
+; CHECK-NEXT: to label [[B]] [label %F.target.G]
+; CHECK: G:
+; CHECK-NEXT: [[OUTER2]] = add i32 [[OUTER1]], 1
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[OUTER2]], 10
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP2]])
+; CHECK-NEXT: to label [[A]] [label %G.target.I]
+; CHECK: H:
+; CHECK-NEXT: unreachable
+; CHECK: I:
+; CHECK-NEXT: ret void
+; CHECK: B.target.C:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]]
+; CHECK: D.target.E:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: G.target.I:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: loop.exit.guard:
+; CHECK-NEXT: [[MERGED_BB_IDX:%.*]] = phi i32 [ 0, [[B_TARGET_C:%.*]] ], [ 1, [[D_TARGET_E:%.*]] ], [ 2, [[G_TARGET_I:%.*]] ]
+; CHECK-NEXT: [[C_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 0
+; CHECK-NEXT: br i1 [[C_PREDICATE]], label [[C:%.*]], label [[LOOP_EXIT_GUARD1:%.*]]
+; CHECK: loop.exit.guard1:
+; CHECK-NEXT: [[E_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 1
+; CHECK-NEXT: br i1 [[E_PREDICATE]], label [[E:%.*]], label [[I]]
+; CHECK: B.target.B.target.C:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD2:%.*]]
+; CHECK: D.target.D.target.E:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD2]]
+; CHECK: F.target.G:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD2]]
+; CHECK: loop.exit.guard2:
+; CHECK-NEXT: [[MERGED_BB_IDX4:%.*]] = phi i32 [ 0, [[B_TARGET_B_TARGET_C:%.*]] ], [ 1, [[D_TARGET_D_TARGET_E]] ], [ 2, [[F_TARGET_G:%.*]] ]
+; CHECK-NEXT: [[B_TARGET_C_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX4]], 0
+; CHECK-NEXT: br i1 [[B_TARGET_C_PREDICATE]], label [[B_TARGET_C]], label [[LOOP_EXIT_GUARD3:%.*]]
+; CHECK: loop.exit.guard3:
+; CHECK-NEXT: [[D_TARGET_E_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX4]], 1
+; CHECK-NEXT: br i1 [[D_TARGET_E_PREDICATE]], label [[D_TARGET_E]], label [[G]]
+;
+; BOOLEAN-LABEL: @inner_loop_callbr(
+; BOOLEAN-NEXT: entry:
+; BOOLEAN-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[I:%.*]]
+; BOOLEAN: A:
+; BOOLEAN-NEXT: [[OUTER1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[OUTER2:%.*]], [[G:%.*]] ]
+; BOOLEAN-NEXT: br label [[B:%.*]]
+; BOOLEAN: B:
+; BOOLEAN-NEXT: [[INNER1:%.*]] = phi i32 [ 0, [[A]] ], [ [[INNER2:%.*]], [[F:%.*]] ]
+; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]])
+; BOOLEAN-NEXT: to label [[D:%.*]] [label %B.target.B.target.C]
+; BOOLEAN: C:
+; BOOLEAN-NEXT: tail call fastcc void @check(i32 1) #[[ATTR0]]
+; BOOLEAN-NEXT: br label [[H:%.*]]
+; BOOLEAN: D:
+; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB:%.*]])
+; BOOLEAN-NEXT: to label [[D_TARGET_D_TARGET_E:%.*]] [label %F]
+; BOOLEAN: E:
+; BOOLEAN-NEXT: tail call fastcc void @check(i32 2) #[[ATTR0]]
+; BOOLEAN-NEXT: br label [[H]]
+; BOOLEAN: F:
+; BOOLEAN-NEXT: [[INNER2]] = add i32 [[INNER1]], 1
+; BOOLEAN-NEXT: [[CMP1:%.*]] = icmp ult i32 [[INNER2]], 20
+; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[CMP1]])
+; BOOLEAN-NEXT: to label [[B]] [label %F.target.G]
+; BOOLEAN: G:
+; BOOLEAN-NEXT: [[OUTER2]] = add i32 [[OUTER1]], 1
+; BOOLEAN-NEXT: [[CMP2:%.*]] = icmp ult i32 [[OUTER2]], 10
+; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[CMP2]])
+; BOOLEAN-NEXT: to label [[A]] [label %G.target.I]
+; BOOLEAN: H:
+; BOOLEAN-NEXT: unreachable
+; BOOLEAN: I:
+; BOOLEAN-NEXT: ret void
+; BOOLEAN: B.target.C:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD:%.*]]
+; BOOLEAN: D.target.E:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]]
+; BOOLEAN: G.target.I:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]]
+; BOOLEAN: loop.exit.guard:
+; BOOLEAN-NEXT: [[GUARD_C:%.*]] = phi i1 [ true, [[B_TARGET_C:%.*]] ], [ false, [[D_TARGET_E:%.*]] ], [ false, [[G_TARGET_I:%.*]] ]
+; BOOLEAN-NEXT: [[GUARD_E:%.*]] = phi i1 [ false, [[B_TARGET_C]] ], [ true, [[D_TARGET_E]] ], [ false, [[G_TARGET_I]] ]
+; BOOLEAN-NEXT: br i1 [[GUARD_C]], label [[C:%.*]], label [[LOOP_EXIT_GUARD1:%.*]]
+; BOOLEAN: loop.exit.guard1:
+; BOOLEAN-NEXT: br i1 [[GUARD_E]], label [[E:%.*]], label [[I]]
+; BOOLEAN: B.target.B.target.C:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD2:%.*]]
+; BOOLEAN: D.target.D.target.E:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD2]]
+; BOOLEAN: F.target.G:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD2]]
+; BOOLEAN: loop.exit.guard2:
+; BOOLEAN-NEXT: [[GUARD_B_TARGET_C:%.*]] = phi i1 [ true, [[B_TARGET_B_TARGET_C:%.*]] ], [ false, [[D_TARGET_D_TARGET_E]] ], [ false, [[F_TARGET_G:%.*]] ]
+; BOOLEAN-NEXT: [[GUARD_D_TARGET_E:%.*]] = phi i1 [ false, [[B_TARGET_B_TARGET_C]] ], [ true, [[D_TARGET_D_TARGET_E]] ], [ false, [[F_TARGET_G]] ]
+; BOOLEAN-NEXT: br i1 [[GUARD_B_TARGET_C]], label [[B_TARGET_C]], label [[LOOP_EXIT_GUARD3:%.*]]
+; BOOLEAN: loop.exit.guard3:
+; BOOLEAN-NEXT: br i1 [[GUARD_D_TARGET_E]], label [[D_TARGET_E]], label [[G]]
+;
+entry:
+ br i1 %PredEntry, label %A, label %I
+
+A:
+ %outer1 = phi i32 [ 0, %entry ], [ %outer2, %G ]
+ br label %B
+
+B:
+ %inner1 = phi i32 [ 0, %A ], [ %inner2, %F ]
+ callbr void asm "", "r,!i"(i1 %PredA) to label %D [label %C]
+
+C:
+ tail call fastcc void @check(i32 1) #0
+ br label %H
+
+D:
+ callbr void asm "", "r,!i"(i1 %PredB) to label %E [label %F]
+
+E:
+ tail call fastcc void @check(i32 2) #0
+ br label %H
+
+F:
+ %inner2 = add i32 %inner1, 1
+ %cmp1 = icmp ult i32 %inner2, 20
+ callbr void asm "", "r,!i"(i1 %cmp1) to label %B [label %G]
+
+G:
+ %outer2 = add i32 %outer1, 1
+ %cmp2 = icmp ult i32 %outer2, 10
+ callbr void asm "", "r,!i"(i1 %cmp2) to label %A [label %I]
+
+H:
+ unreachable
+
+I:
+ ret void
+}
+
; A loop with more exit blocks.
define void @loop_five_exits(i1 %PredEntry, i1 %PredA, i1 %PredB, i1 %PredC, i1 %PredD) {
@@ -341,6 +578,179 @@ L:
ret void
}
+define void @loop_five_exits_callbr(i1 %PredEntry, i1 %PredA, i1 %PredB, i1 %PredC, i1 %PredD) {
+; CHECK-LABEL: @loop_five_exits_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[L:%.*]]
+; CHECK: A:
+; CHECK-NEXT: [[INC1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC2:%.*]], [[I:%.*]] ]
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]])
+; CHECK-NEXT: to label [[A_TARGET_B:%.*]] [label %C]
+; CHECK: B:
+; CHECK-NEXT: tail call fastcc void @check(i32 1) #[[ATTR0]]
+; CHECK-NEXT: br label [[J:%.*]]
+; CHECK: C:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB:%.*]])
+; CHECK-NEXT: to label [[C_TARGET_D:%.*]] [label %E]
+; CHECK: D:
+; CHECK-NEXT: tail call fastcc void @check(i32 2) #[[ATTR0]]
+; CHECK-NEXT: br label [[J]]
+; CHECK: E:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDC:%.*]])
+; CHECK-NEXT: to label [[E_TARGET_F:%.*]] [label %G]
+; CHECK: F:
+; CHECK-NEXT: tail call fastcc void @check(i32 3) #[[ATTR0]]
+; CHECK-NEXT: br label [[K:%.*]]
+; CHECK: G:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDD:%.*]])
+; CHECK-NEXT: to label [[G_TARGET_H:%.*]] [label %I]
+; CHECK: H:
+; CHECK-NEXT: tail call fastcc void @check(i32 4) #[[ATTR0]]
+; CHECK-NEXT: br label [[K]]
+; CHECK: I:
+; CHECK-NEXT: [[INC2]] = add i32 [[INC1]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC2]], 10
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]])
+; CHECK-NEXT: to label [[A]] [label %I.target.L]
+; CHECK: J:
+; CHECK-NEXT: br label [[L]]
+; CHECK: K:
+; CHECK-NEXT: br label [[L]]
+; CHECK: L:
+; CHECK-NEXT: ret void
+; CHECK: A.target.B:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]]
+; CHECK: C.target.D:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: E.target.F:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: G.target.H:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: I.target.L:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: loop.exit.guard:
+; CHECK-NEXT: [[MERGED_BB_IDX:%.*]] = phi i32 [ 0, [[A_TARGET_B]] ], [ 1, [[C_TARGET_D]] ], [ 2, [[E_TARGET_F]] ], [ 3, [[G_TARGET_H]] ], [ 4, [[I_TARGET_L:%.*]] ]
+; CHECK-NEXT: [[B_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 0
+; CHECK-NEXT: br i1 [[B_PREDICATE]], label [[B:%.*]], label [[LOOP_EXIT_GUARD1:%.*]]
+; CHECK: loop.exit.guard1:
+; CHECK-NEXT: [[D_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 1
+; CHECK-NEXT: br i1 [[D_PREDICATE]], label [[D:%.*]], label [[LOOP_EXIT_GUARD2:%.*]]
+; CHECK: loop.exit.guard2:
+; CHECK-NEXT: [[F_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 2
+; CHECK-NEXT: br i1 [[F_PREDICATE]], label [[F:%.*]], label [[LOOP_EXIT_GUARD3:%.*]]
+; CHECK: loop.exit.guard3:
+; CHECK-NEXT: [[H_PREDICATE:%.*]] = icmp eq i32 [[MERGED_BB_IDX]], 3
+; CHECK-NEXT: br i1 [[H_PREDICATE]], label [[H:%.*]], label [[L]]
+;
+; BOOLEAN-LABEL: @loop_five_exits_callbr(
+; BOOLEAN-NEXT: entry:
+; BOOLEAN-NEXT: br i1 [[PREDENTRY:%.*]], label [[A:%.*]], label [[L:%.*]]
+; BOOLEAN: A:
+; BOOLEAN-NEXT: [[INC1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC2:%.*]], [[I:%.*]] ]
+; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA:%.*]])
+; BOOLEAN-NEXT: to label [[A_TARGET_B:%.*]] [label %C]
+; BOOLEAN: B:
+; BOOLEAN-NEXT: tail call fastcc void @check(i32 1) #[[ATTR0]]
+; BOOLEAN-NEXT: br label [[J:%.*]]
+; BOOLEAN: C:
+; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB:%.*]])
+; BOOLEAN-NEXT: to label [[C_TARGET_D:%.*]] [label %E]
+; BOOLEAN: D:
+; BOOLEAN-NEXT: tail call fastcc void @check(i32 2) #[[ATTR0]]
+; BOOLEAN-NEXT: br label [[J]]
+; BOOLEAN: E:
+; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDC:%.*]])
+; BOOLEAN-NEXT: to label [[E_TARGET_F:%.*]] [label %G]
+; BOOLEAN: F:
+; BOOLEAN-NEXT: tail call fastcc void @check(i32 3) #[[ATTR0]]
+; BOOLEAN-NEXT: br label [[K:%.*]]
+; BOOLEAN: G:
+; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[PREDD:%.*]])
+; BOOLEAN-NEXT: to label [[G_TARGET_H:%.*]] [label %I]
+; BOOLEAN: H:
+; BOOLEAN-NEXT: tail call fastcc void @check(i32 4) #[[ATTR0]]
+; BOOLEAN-NEXT: br label [[K]]
+; BOOLEAN: I:
+; BOOLEAN-NEXT: [[INC2]] = add i32 [[INC1]], 1
+; BOOLEAN-NEXT: [[CMP:%.*]] = icmp ult i32 [[INC2]], 10
+; BOOLEAN-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]])
+; BOOLEAN-NEXT: to label [[A]] [label %I.target.L]
+; BOOLEAN: J:
+; BOOLEAN-NEXT: br label [[L]]
+; BOOLEAN: K:
+; BOOLEAN-NEXT: br label [[L]]
+; BOOLEAN: L:
+; BOOLEAN-NEXT: ret void
+; BOOLEAN: A.target.B:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD:%.*]]
+; BOOLEAN: C.target.D:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]]
+; BOOLEAN: E.target.F:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]]
+; BOOLEAN: G.target.H:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]]
+; BOOLEAN: I.target.L:
+; BOOLEAN-NEXT: br label [[LOOP_EXIT_GUARD]]
+; BOOLEAN: loop.exit.guard:
+; BOOLEAN-NEXT: [[GUARD_B:%.*]] = phi i1 [ true, [[A_TARGET_B]] ], [ false, [[C_TARGET_D]] ], [ false, [[E_TARGET_F]] ], [ false, [[G_TARGET_H]] ], [ false, [[I_TARGET_L:%.*]] ]
+; BOOLEAN-NEXT: [[GUARD_D:%.*]] = phi i1 [ false, [[A_TARGET_B]] ], [ true, [[C_TARGET_D]] ], [ false, [[E_TARGET_F]] ], [ false, [[G_TARGET_H]] ], [ false, [[I_TARGET_L]] ]
+; BOOLEAN-NEXT: [[GUARD_F:%.*]] = phi i1 [ false, [[A_TARGET_B]] ], [ false, [[C_TARGET_D]] ], [ true, [[E_TARGET_F]] ], [ false, [[G_TARGET_H]] ], [ false, [[I_TARGET_L]] ]
+; BOOLEAN-NEXT: [[GUARD_H:%.*]] = phi i1 [ false, [[A_TARGET_B]] ], [ false, [[C_TARGET_D]] ], [ false, [[E_TARGET_F]] ], [ true, [[G_TARGET_H]] ], [ false, [[I_TARGET_L]] ]
+; BOOLEAN-NEXT: br i1 [[GUARD_B]], label [[B:%.*]], label [[LOOP_EXIT_GUARD1:%.*]]
+; BOOLEAN: loop.exit.guard1:
+; BOOLEAN-NEXT: br i1 [[GUARD_D]], label [[D:%.*]], label [[LOOP_EXIT_GUARD2:%.*]]
+; BOOLEAN: loop.exit.guard2:
+; BOOLEAN-NEXT: br i1 [[GUARD_F]], label [[F:%.*]], label [[LOOP_EXIT_GUARD3:%.*]]
+; BOOLEAN: loop.exit.guard3:
+; BOOLEAN-NEXT: br i1 [[GUARD_H]], label [[H:%.*]], label [[L]]
+;
+entry:
+ br i1 %PredEntry, label %A, label %L
+
+A:
+ %inc1 = phi i32 [ 0, %entry ], [ %inc2, %I ]
+ callbr void asm "", "r,!i"(i1 %PredA) to label %B [label %C]
+
+B:
+ tail call fastcc void @check(i32 1) #0
+ br label %J
+
+C:
+ callbr void asm "", "r,!i"(i1 %PredB) to label %D [label %E]
+
+D:
+ tail call fastcc void @check(i32 2) #0
+ br label %J
+
+E:
+ callbr void asm "", "r,!i"(i1 %PredC) to label %F [label %G]
+
+F:
+ tail call fastcc void @check(i32 3) #0
+ br label %K
+
+G:
+ callbr void asm "", "r,!i"(i1 %PredD) to label %H [label %I]
+
+H:
+ tail call fastcc void @check(i32 4) #0
+ br label %K
+
+I:
+ %inc2 = add i32 %inc1, 1
+ %cmp = icmp ult i32 %inc2, 10
+ callbr void asm "", "r,!i"(i1 %cmp) to label %A [label %L]
+
+J:
+ br label %L
+
+K:
+ br label %L
+
+L:
+ ret void
+}
+
declare void @check(i32 noundef %i) #0
diff --git a/llvm/test/Transforms/UnifyLoopExits/nested.ll b/llvm/test/Transforms/UnifyLoopExits/nested.ll
index 8fae2c4..2ec576a 100644
--- a/llvm/test/Transforms/UnifyLoopExits/nested.ll
+++ b/llvm/test/Transforms/UnifyLoopExits/nested.ll
@@ -78,3 +78,145 @@ exit:
%exit.phi = phi i32 [%A4.phi, %A5], [%Z, %C]
ret void
}
+
+define void @nested_callbr(i1 %PredB3, i1 %PredB4, i1 %PredA4, i1 %PredA3, i32 %X, i32 %Y, i32 %Z) {
+; CHECK-LABEL: @nested_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[A1:%.*]]
+; CHECK: A1:
+; CHECK-NEXT: br label [[B1:%.*]]
+; CHECK: B1:
+; CHECK-NEXT: br label [[B2:%.*]]
+; CHECK: B2:
+; CHECK-NEXT: [[X_INC:%.*]] = add i32 [[X:%.*]], 1
+; CHECK-NEXT: br label [[B3:%.*]]
+; CHECK: B3:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB3:%.*]])
+; CHECK-NEXT: to label [[B4:%.*]] [label %B3.target.A3]
+; CHECK: B4:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDB4:%.*]])
+; CHECK-NEXT: to label [[B1]] [label %B4.target.A2]
+; CHECK: A2:
+; CHECK-NEXT: br label [[A4:%.*]]
+; CHECK: A3:
+; CHECK-NEXT: br label [[A4]]
+; CHECK: A4:
+; CHECK-NEXT: [[A4_PHI:%.*]] = phi i32 [ [[Y:%.*]], [[A3:%.*]] ], [ [[X_INC_MOVED:%.*]], [[A2:%.*]] ]
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA4:%.*]])
+; CHECK-NEXT: to label [[A4_TARGET_C:%.*]] [label %A5]
+; CHECK: A5:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[PREDA3:%.*]])
+; CHECK-NEXT: to label [[A5_TARGET_EXIT:%.*]] [label %A1]
+; CHECK: C:
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: [[EXIT_PHI:%.*]] = phi i32 [ [[Z:%.*]], [[C:%.*]] ], [ [[EXIT_PHI_MOVED:%.*]], [[LOOP_EXIT_GUARD:%.*]] ]
+; CHECK-NEXT: ret void
+; CHECK: A4.target.C:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: A5.target.exit:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: loop.exit.guard:
+; CHECK-NEXT: [[EXIT_PHI_MOVED]] = phi i32 [ poison, [[A4_TARGET_C]] ], [ [[A4_PHI]], [[A5_TARGET_EXIT]] ]
+; CHECK-NEXT: [[GUARD_C:%.*]] = phi i1 [ true, [[A4_TARGET_C]] ], [ false, [[A5_TARGET_EXIT]] ]
+; CHECK-NEXT: br i1 [[GUARD_C]], label [[C]], label [[EXIT]]
+; CHECK: B3.target.A3:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD1:%.*]]
+; CHECK: B4.target.A2:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD1]]
+; CHECK: loop.exit.guard1:
+; CHECK-NEXT: [[X_INC_MOVED]] = phi i32 [ [[X_INC]], [[B3_TARGET_A3:%.*]] ], [ [[X_INC]], [[B4_TARGET_A2:%.*]] ]
+; CHECK-NEXT: [[GUARD_A3:%.*]] = phi i1 [ true, [[B3_TARGET_A3]] ], [ false, [[B4_TARGET_A2]] ]
+; CHECK-NEXT: br i1 [[GUARD_A3]], label [[A3]], label [[A2]]
+;
+entry:
+ br label %A1
+
+A1:
+ br label %B1
+
+B1:
+ br label %B2
+
+B2:
+ %X.inc = add i32 %X, 1
+ br label %B3
+
+B3:
+ callbr void asm "", "r,!i"(i1 %PredB3) to label %B4 [label %A3]
+
+B4:
+ callbr void asm "", "r,!i"(i1 %PredB4) to label %B1 [label %A2]
+
+A2:
+ br label %A4
+
+A3:
+ br label %A4
+
+A4:
+ %A4.phi = phi i32 [%Y, %A3], [%X.inc, %A2]
+ callbr void asm "", "r,!i"(i1 %PredA4) to label %C [label %A5]
+
+A5:
+ callbr void asm "", "r,!i"(i1 %PredA3) to label %exit [label %A1]
+
+C:
+ br label %exit
+
+exit:
+ %exit.phi = phi i32 [%A4.phi, %A5], [%Z, %C]
+ ret void
+}
+
+; Here, the newly created target loop that connects b to r1 needs to be part of
+; the parent loop (the outer loop b participates in). Otherwise, it will be
+; regarded as an additional loop entry point to this outer loop.
+define void @nested_callbr_multiple_exits() {
+; CHECK-LABEL: @nested_callbr_multiple_exits(
+; CHECK-NEXT: br label [[A:%.*]]
+; CHECK: a:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[B:%.*]] []
+; CHECK: b:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label [[C:%.*]] [label %b.target.b.target.r1]
+; CHECK: c:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label [[C_TARGET_E:%.*]] [label %b]
+; CHECK: e:
+; CHECK-NEXT: callbr void asm "", "!i"()
+; CHECK-NEXT: to label [[A]] [label %e.target.r2]
+; CHECK: r1:
+; CHECK-NEXT: ret void
+; CHECK: r2:
+; CHECK-NEXT: ret void
+; CHECK: b.target.r1:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]]
+; CHECK: e.target.r2:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: loop.exit.guard:
+; CHECK-NEXT: [[GUARD_R1:%.*]] = phi i1 [ true, [[B_TARGET_R1:%.*]] ], [ false, [[E_TARGET_R2:%.*]] ]
+; CHECK-NEXT: br i1 [[GUARD_R1]], label [[R1:%.*]], label [[R2:%.*]]
+; CHECK: b.target.b.target.r1:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD1:%.*]]
+; CHECK: c.target.e:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD1]]
+; CHECK: loop.exit.guard1:
+; CHECK-NEXT: [[GUARD_B_TARGET_R1:%.*]] = phi i1 [ true, [[B_TARGET_B_TARGET_R1:%.*]] ], [ false, [[C_TARGET_E]] ]
+; CHECK-NEXT: br i1 [[GUARD_B_TARGET_R1]], label [[B_TARGET_R1]], label [[E:%.*]]
+;
+ br label %a
+a:
+ callbr void asm "", ""() to label %b []
+b:
+ callbr void asm "", "!i"() to label %c [label %r1]
+c:
+ callbr void asm "", "!i"() to label %e [label %b]
+e:
+ callbr void asm "", "!i"() to label %a [label %r2]
+r1:
+ ret void
+r2:
+ ret void
+}
diff --git a/llvm/test/Transforms/UnifyLoopExits/restore-ssa.ll b/llvm/test/Transforms/UnifyLoopExits/restore-ssa.ll
index 3e68df3..ffe8026 100644
--- a/llvm/test/Transforms/UnifyLoopExits/restore-ssa.ll
+++ b/llvm/test/Transforms/UnifyLoopExits/restore-ssa.ll
@@ -57,6 +57,60 @@ return:
ret i32 %phi
}
+define i32 @exiting-used-in-exit_callbr(ptr %arg1, ptr %arg2) local_unnamed_addr align 2 {
+; CHECK-LABEL: @exiting-used-in-exit_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[A:%.*]] []
+; CHECK: A:
+; CHECK-NEXT: [[MYTMP42:%.*]] = load i32, ptr [[ARG1:%.*]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[MYTMP42]], 0
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP1]])
+; CHECK-NEXT: to label [[B:%.*]] [label %A.target.return]
+; CHECK: B:
+; CHECK-NEXT: [[MYTMP41:%.*]] = load i32, ptr [[ARG2:%.*]], align 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[MYTMP41]], 0
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]])
+; CHECK-NEXT: to label [[A]] [label %B.target.C]
+; CHECK: C:
+; CHECK-NEXT: [[INC:%.*]] = add i32 [[MYTMP41_MOVED:%.*]], 1
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[RETURN:%.*]] []
+; CHECK: return:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[INC]], [[C:%.*]] ], [ [[PHI_MOVED:%.*]], [[LOOP_EXIT_GUARD:%.*]] ]
+; CHECK-NEXT: ret i32 [[PHI]]
+; CHECK: A.target.return:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: B.target.C:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: loop.exit.guard:
+; CHECK-NEXT: [[MYTMP41_MOVED]] = phi i32 [ poison, [[A_TARGET_RETURN:%.*]] ], [ [[MYTMP41]], [[B_TARGET_C:%.*]] ]
+; CHECK-NEXT: [[PHI_MOVED]] = phi i32 [ [[MYTMP42]], [[A_TARGET_RETURN]] ], [ poison, [[B_TARGET_C]] ]
+; CHECK-NEXT: [[GUARD_RETURN:%.*]] = phi i1 [ true, [[A_TARGET_RETURN]] ], [ false, [[B_TARGET_C]] ]
+; CHECK-NEXT: br i1 [[GUARD_RETURN]], label [[RETURN]], label [[C]]
+;
+entry:
+ callbr void asm "", ""() to label %A []
+
+A:
+ %mytmp42 = load i32, ptr %arg1, align 4
+ %cmp1 = icmp slt i32 %mytmp42, 0
+ callbr void asm "", "r,!i"(i1 %cmp1) to label %B [label %return]
+
+B:
+ %mytmp41 = load i32, ptr %arg2, align 4
+ %cmp = icmp slt i32 %mytmp41, 0
+ callbr void asm "", "r,!i"(i1 %cmp) to label %A [label %C]
+
+C:
+ %inc = add i32 %mytmp41, 1
+ callbr void asm "", ""() to label %return []
+
+return:
+ %phi = phi i32 [ %inc, %C ], [ %mytmp42, %A ]
+ ret i32 %phi
+}
+
; Loop consists of A, B and C:
; - A is the header
; - A and C are exiting blocks
@@ -112,6 +166,63 @@ return:
ret i32 0
}
+define i32 @internal-used-in-exit_callbr(ptr %arg1, ptr %arg2) local_unnamed_addr align 2 {
+; CHECK-LABEL: @internal-used-in-exit_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[MYTMP42:%.*]] = load i32, ptr [[ARG1:%.*]], align 4
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[A:%.*]] []
+; CHECK: A:
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[MYTMP42]], 0
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP1]])
+; CHECK-NEXT: to label [[B:%.*]] [label %A.target.return]
+; CHECK: B:
+; CHECK-NEXT: [[MYTMP41:%.*]] = load i32, ptr [[ARG2:%.*]], align 4
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[C:%.*]] []
+; CHECK: C:
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[MYTMP42]], 0
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]])
+; CHECK-NEXT: to label [[A]] [label %C.target.D]
+; CHECK: D:
+; CHECK-NEXT: [[INC:%.*]] = add i32 [[MYTMP41_MOVED:%.*]], 1
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[RETURN:%.*]] []
+; CHECK: return:
+; CHECK-NEXT: ret i32 0
+; CHECK: A.target.return:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]]
+; CHECK: C.target.D:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: loop.exit.guard:
+; CHECK-NEXT: [[MYTMP41_MOVED]] = phi i32 [ poison, [[A_TARGET_RETURN:%.*]] ], [ [[MYTMP41]], [[C_TARGET_D:%.*]] ]
+; CHECK-NEXT: [[GUARD_RETURN:%.*]] = phi i1 [ true, [[A_TARGET_RETURN]] ], [ false, [[C_TARGET_D]] ]
+; CHECK-NEXT: br i1 [[GUARD_RETURN]], label [[RETURN]], label [[D:%.*]]
+;
+entry:
+ %mytmp42 = load i32, ptr %arg1, align 4
+ callbr void asm "", ""() to label %A []
+
+A:
+ %cmp1 = icmp slt i32 %mytmp42, 0
+ callbr void asm "", "r,!i"(i1 %cmp1) to label %B [label %return]
+
+B:
+ %mytmp41 = load i32, ptr %arg2, align 4
+ callbr void asm "", ""() to label %C []
+
+C:
+ %cmp = icmp slt i32 %mytmp42, 0
+ callbr void asm "", "r,!i"(i1 %cmp) to label %A [label %D]
+
+D:
+ %inc = add i32 %mytmp41, 1
+ callbr void asm "", ""() to label %return []
+
+return:
+ ret i32 0
+}
+
; Loop consists of A, B and C:
; - A is the header
; - A and C are exiting blocks
@@ -172,6 +283,68 @@ return:
ret i32 %phi
}
+define i32 @mixed-use-in-exit_callbr(ptr %arg1, ptr %arg2) local_unnamed_addr align 2 {
+; CHECK-LABEL: @mixed-use-in-exit_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[MYTMP42:%.*]] = load i32, ptr [[ARG1:%.*]], align 4
+; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[MYTMP42]], 0
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP2]])
+; CHECK-NEXT: to label [[A:%.*]] [label %return]
+; CHECK: A:
+; CHECK-NEXT: [[MYTMP43:%.*]] = add i32 [[MYTMP42]], 1
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[MYTMP42]], 0
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP1]])
+; CHECK-NEXT: to label [[B:%.*]] [label %A.target.return]
+; CHECK: B:
+; CHECK-NEXT: [[MYTMP41:%.*]] = load i32, ptr [[ARG2:%.*]], align 4
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[C:%.*]] []
+; CHECK: C:
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[MYTMP42]], 0
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]])
+; CHECK-NEXT: to label [[A]] [label %C.target.D]
+; CHECK: D:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[RETURN:%.*]] []
+; CHECK: return:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[MYTMP41_MOVED:%.*]], [[D:%.*]] ], [ [[MYTMP42]], [[ENTRY:%.*]] ], [ [[PHI_MOVED:%.*]], [[LOOP_EXIT_GUARD:%.*]] ]
+; CHECK-NEXT: ret i32 [[PHI]]
+; CHECK: A.target.return:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: C.target.D:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: loop.exit.guard:
+; CHECK-NEXT: [[MYTMP41_MOVED]] = phi i32 [ poison, [[A_TARGET_RETURN:%.*]] ], [ [[MYTMP41]], [[C_TARGET_D:%.*]] ]
+; CHECK-NEXT: [[PHI_MOVED]] = phi i32 [ [[MYTMP43]], [[A_TARGET_RETURN]] ], [ poison, [[C_TARGET_D]] ]
+; CHECK-NEXT: [[GUARD_RETURN:%.*]] = phi i1 [ true, [[A_TARGET_RETURN]] ], [ false, [[C_TARGET_D]] ]
+; CHECK-NEXT: br i1 [[GUARD_RETURN]], label [[RETURN]], label [[D]]
+;
+entry:
+ %mytmp42 = load i32, ptr %arg1, align 4
+ %cmp2 = icmp slt i32 %mytmp42, 0
+ callbr void asm "", "r,!i"(i1 %cmp2) to label %A [label %return]
+
+A:
+ %mytmp43 = add i32 %mytmp42, 1
+ %cmp1 = icmp slt i32 %mytmp42, 0
+ callbr void asm "", "r,!i"(i1 %cmp1) to label %B [label %return]
+
+B:
+ %mytmp41 = load i32, ptr %arg2, align 4
+ callbr void asm "", ""() to label %C []
+
+C:
+ %cmp = icmp slt i32 %mytmp42, 0
+ callbr void asm "", "r,!i"(i1 %cmp) to label %A [label %D]
+
+D:
+ callbr void asm "", ""() to label %return []
+
+return:
+ %phi = phi i32 [ %mytmp41, %D ], [ %mytmp43, %A ], [%mytmp42, %entry]
+ ret i32 %phi
+}
+
; Loop consists of A, B and C:
; - A is the header
; - A and C are exiting blocks
@@ -236,3 +409,66 @@ return:
%phi = phi i32 [ %mytmp41, %D ], [ %mytmp42, %E ]
ret i32 %phi
}
+
+define i32 @phi-via-external-block_callbr(ptr %arg1, ptr %arg2) local_unnamed_addr align 2 {
+; CHECK-LABEL: @phi-via-external-block_callbr(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[MYTMP42:%.*]] = load i32, ptr [[ARG1:%.*]], align 4
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[A:%.*]] []
+; CHECK: A:
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[MYTMP42]], 0
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP1]])
+; CHECK-NEXT: to label [[B:%.*]] [label %A.target.E]
+; CHECK: B:
+; CHECK-NEXT: [[MYTMP41:%.*]] = load i32, ptr [[ARG2:%.*]], align 4
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[C:%.*]] []
+; CHECK: C:
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[MYTMP42]], 0
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[CMP]])
+; CHECK-NEXT: to label [[A]] [label %C.target.D]
+; CHECK: D:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[RETURN:%.*]] []
+; CHECK: E:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label [[RETURN]] []
+; CHECK: return:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[MYTMP41_MOVED:%.*]], [[D:%.*]] ], [ [[MYTMP42]], [[E:%.*]] ]
+; CHECK-NEXT: ret i32 [[PHI]]
+; CHECK: A.target.E:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD:%.*]]
+; CHECK: C.target.D:
+; CHECK-NEXT: br label [[LOOP_EXIT_GUARD]]
+; CHECK: loop.exit.guard:
+; CHECK-NEXT: [[MYTMP41_MOVED]] = phi i32 [ poison, [[A_TARGET_E:%.*]] ], [ [[MYTMP41]], [[C_TARGET_D:%.*]] ]
+; CHECK-NEXT: [[GUARD_E:%.*]] = phi i1 [ true, [[A_TARGET_E]] ], [ false, [[C_TARGET_D]] ]
+; CHECK-NEXT: br i1 [[GUARD_E]], label [[E]], label [[D]]
+;
+entry:
+ %mytmp42 = load i32, ptr %arg1, align 4
+ callbr void asm "", ""() to label %A []
+
+A:
+ %cmp1 = icmp slt i32 %mytmp42, 0
+ callbr void asm "", "r,!i"(i1 %cmp1) to label %B [label %E]
+
+B:
+ %mytmp41 = load i32, ptr %arg2, align 4
+ callbr void asm "", ""() to label %C []
+
+C:
+ %cmp = icmp slt i32 %mytmp42, 0
+ callbr void asm "", "r,!i"(i1 %cmp) to label %A [label %D]
+
+D:
+ callbr void asm "", ""() to label %return []
+
+E:
+ callbr void asm "", ""() to label %return []
+
+return:
+ %phi = phi i32 [ %mytmp41, %D ], [ %mytmp42, %E ]
+ ret i32 %phi
+}
diff --git a/llvm/test/Transforms/UnifyLoopExits/undef-phis.ll b/llvm/test/Transforms/UnifyLoopExits/undef-phis.ll
index 05f50fc..e65e254 100644
--- a/llvm/test/Transforms/UnifyLoopExits/undef-phis.ll
+++ b/llvm/test/Transforms/UnifyLoopExits/undef-phis.ll
@@ -56,3 +56,71 @@ mbb5291: ; preds = %mbb4321
store volatile [2 x i32] %i5293, ptr addrspace(5) null, align 4
ret void
}
+
+define fastcc void @undef_phi_callbr(i64 %i5247, i1 %i4530, i1 %i4936.not) {
+; CHECK-LABEL: define fastcc void @undef_phi_callbr(
+; CHECK-SAME: i64 [[I5247:%.*]], i1 [[I4530:%.*]], i1 [[I4936_NOT:%.*]]) {
+; CHECK-NEXT: [[MBB:.*:]]
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label %[[MBB3932:.*]] []
+; CHECK: [[MBB3932]]:
+; CHECK-NEXT: callbr void asm "", ""()
+; CHECK-NEXT: to label %[[MBB4454:.*]] []
+; CHECK: [[MBB4321:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[I5247]] to i32
+; CHECK-NEXT: [[I5290:%.*]] = icmp eq i32 [[TMP0]], 0
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[I5290]])
+; CHECK-NEXT: to label %[[MBB3932]] [label %mbb4321.target.mbb5291]
+; CHECK: [[MBB4454]]:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[I4530]])
+; CHECK-NEXT: to label %[[MBB4535:.*]] [label %mbb4454.target.mbb4454.target.mbb4531]
+; CHECK: [[MBB4531:.*]]:
+; CHECK-NEXT: ret void
+; CHECK: [[MBB4535]]:
+; CHECK-NEXT: callbr void asm "", "r,!i"(i1 [[I4936_NOT]])
+; CHECK-NEXT: to label %[[MBB4535_TARGET_MBB4321:.*]] [label %mbb4454]
+; CHECK: [[MBB5291:.*]]:
+; CHECK-NEXT: [[I5293:%.*]] = insertvalue [2 x i32] zeroinitializer, i32 [[DOTMOVED:%.*]], 1
+; CHECK-NEXT: store volatile [2 x i32] [[I5293]], ptr addrspace(5) null, align 4
+; CHECK-NEXT: ret void
+; CHECK: [[MBB4454_TARGET_MBB4531:.*]]:
+; CHECK-NEXT: br label %[[LOOP_EXIT_GUARD:.*]]
+; CHECK: [[MBB4321_TARGET_MBB5291:.*]]:
+; CHECK-NEXT: br label %[[LOOP_EXIT_GUARD]]
+; CHECK: [[LOOP_EXIT_GUARD]]:
+; CHECK-NEXT: [[DOTMOVED]] = phi i32 [ poison, %[[MBB4454_TARGET_MBB4531]] ], [ [[TMP0]], %[[MBB4321_TARGET_MBB5291]] ]
+; CHECK-NEXT: [[GUARD_MBB4531:%.*]] = phi i1 [ true, %[[MBB4454_TARGET_MBB4531]] ], [ false, %[[MBB4321_TARGET_MBB5291]] ]
+; CHECK-NEXT: br i1 [[GUARD_MBB4531]], label %[[MBB4531]], label %[[MBB5291]]
+; CHECK: [[MBB4454_TARGET_MBB4454_TARGET_MBB4531:.*]]:
+; CHECK-NEXT: br label %[[LOOP_EXIT_GUARD1:.*]]
+; CHECK: [[MBB4535_TARGET_MBB4321]]:
+; CHECK-NEXT: br label %[[LOOP_EXIT_GUARD1]]
+; CHECK: [[LOOP_EXIT_GUARD1]]:
+; CHECK-NEXT: [[GUARD_MBB4454_TARGET_MBB4531:%.*]] = phi i1 [ true, %[[MBB4454_TARGET_MBB4454_TARGET_MBB4531]] ], [ false, %[[MBB4535_TARGET_MBB4321]] ]
+; CHECK-NEXT: br i1 [[GUARD_MBB4454_TARGET_MBB4531]], label %[[MBB4454_TARGET_MBB4531]], label %[[MBB4321]]
+;
+mbb:
+ callbr void asm "", ""() to label %mbb3932 []
+
+mbb3932: ; preds = %mbb4321, %mbb
+ callbr void asm "", ""() to label %mbb4454 []
+
+mbb4321: ; preds = %mbb4535
+ %0 = trunc i64 %i5247 to i32
+ %i5290 = icmp eq i32 %0, 0
+ callbr void asm "", "r,!i"(i1 %i5290) to label %mbb3932 [label %mbb5291]
+
+mbb4454: ; preds = %mbb4535, %mbb3932
+ callbr void asm "", "r,!i"(i1 %i4530) to label %mbb4535 [label %mbb4531]
+
+mbb4531: ; preds = %mbb4454
+ ret void
+
+mbb4535: ; preds = %mbb4454
+ callbr void asm "", "r,!i"(i1 %i4936.not) to label %mbb4321 [label %mbb4454]
+
+mbb5291: ; preds = %mbb4321
+ %i5293 = insertvalue [2 x i32] zeroinitializer, i32 %0, 1
+ store volatile [2 x i32] %i5293, ptr addrspace(5) null, align 4
+ ret void
+}
diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py
index 781240a..11a5a57 100644
--- a/llvm/test/lit.cfg.py
+++ b/llvm/test/lit.cfg.py
@@ -753,10 +753,17 @@ if not hasattr(sys, "getwindowsversion") or sys.getwindowsversion().build >= 170
config.available_features.add("unix-sockets")
# .debug_frame is not emitted for targeting Windows x64, aarch64/arm64, AIX, or Apple Silicon Mac.
-if not re.match(
- r"^(x86_64|aarch64|arm64|powerpc|powerpc64).*-(windows-cygnus|windows-gnu|windows-msvc|aix)",
- config.target_triple,
-) and not re.match(r"^arm64(e)?-apple-(macos|darwin)", config.target_triple):
+if (
+ not re.match(
+ r"^(x86_64|aarch64|arm64|powerpc|powerpc64).*-(windows-cygnus|windows-gnu|windows-msvc|aix)",
+ config.target_triple,
+ )
+ and not re.match(
+ r"^arm64(e)?-apple-(macos|darwin)",
+ config.target_triple,
+ )
+ and not re.match(r".*-zos.*", config.target_triple)
+):
config.available_features.add("debug_frame")
if config.enable_backtrace:
diff --git a/llvm/unittests/ADT/TypeSwitchTest.cpp b/llvm/unittests/ADT/TypeSwitchTest.cpp
index a7d9342..b801228 100644
--- a/llvm/unittests/ADT/TypeSwitchTest.cpp
+++ b/llvm/unittests/ADT/TypeSwitchTest.cpp
@@ -142,3 +142,44 @@ TEST(TypeSwitchTest, DefaultUnreachableWithVoid) {
EXPECT_DEATH((void)translate(DerivedD()), "Unhandled type");
#endif
}
+
+TEST(TypeSwitchTest, DefaultNullopt) {
+ auto translate = [](auto value) {
+ return TypeSwitch<Base *, std::optional<int>>(&value)
+ .Case([](DerivedA *) { return 0; })
+ .Default(std::nullopt);
+ };
+ EXPECT_EQ(0, translate(DerivedA()));
+ EXPECT_EQ(std::nullopt, translate(DerivedD()));
+}
+
+TEST(TypeSwitchTest, DefaultNullptr) {
+ float foo = 0.0f;
+ auto translate = [&](auto value) {
+ return TypeSwitch<Base *, float *>(&value)
+ .Case([&](DerivedA *) { return &foo; })
+ .Default(nullptr);
+ };
+ EXPECT_EQ(&foo, translate(DerivedA()));
+ EXPECT_EQ(nullptr, translate(DerivedD()));
+}
+
+TEST(TypeSwitchTest, DefaultNullptrForPointerLike) {
+ struct Value {
+ void *ptr;
+ Value(const Value &other) : ptr(other.ptr) {}
+ Value(std::nullptr_t) : ptr(nullptr) {}
+ Value() : Value(nullptr) {}
+ };
+
+ float foo = 0.0f;
+ Value fooVal;
+ fooVal.ptr = &foo;
+ auto translate = [&](auto value) {
+ return TypeSwitch<Base *, Value>(&value)
+ .Case([&](DerivedA *) { return fooVal; })
+ .Default(nullptr);
+ };
+ EXPECT_EQ(&foo, translate(DerivedA()).ptr);
+ EXPECT_EQ(nullptr, translate(DerivedD()).ptr);
+}
diff --git a/llvm/utils/UpdateTestChecks/common.py b/llvm/utils/UpdateTestChecks/common.py
index 8cd200c9..b6b80ea 100644
--- a/llvm/utils/UpdateTestChecks/common.py
+++ b/llvm/utils/UpdateTestChecks/common.py
@@ -2396,244 +2396,6 @@ def add_analyze_checks(
)
-IR_FUNC_NAME_RE = re.compile(
- r"^\s*define\s+(?:internal\s+)?[^@]*@(?P<func>[A-Za-z0-9_.]+)\s*\("
-)
-IR_PREFIX_DATA_RE = re.compile(r"^ *(;|$)")
-MIR_FUNC_NAME_RE = re.compile(r" *name: *(?P<func>[A-Za-z0-9_.-]+)")
-MIR_BODY_BEGIN_RE = re.compile(r" *body: *\|")
-MIR_BASIC_BLOCK_RE = re.compile(r" *bb\.[0-9]+.*:$")
-MIR_PREFIX_DATA_RE = re.compile(r"^ *(;|bb.[0-9].*: *$|[a-z]+:( |$)|$)")
-
-
-def find_mir_functions_with_one_bb(lines, verbose=False):
- result = []
- cur_func = None
- bbs = 0
- for line in lines:
- m = MIR_FUNC_NAME_RE.match(line)
- if m:
- if bbs == 1:
- result.append(cur_func)
- cur_func = m.group("func")
- bbs = 0
- m = MIR_BASIC_BLOCK_RE.match(line)
- if m:
- bbs += 1
- if bbs == 1:
- result.append(cur_func)
- return result
-
-
-def add_mir_checks_for_function(
- test,
- output_lines,
- run_list,
- func_dict,
- func_name,
- single_bb,
- print_fixed_stack,
- first_check_is_next,
- at_the_function_name,
-):
- printed_prefixes = set()
- for run in run_list:
- for prefix in run[0]:
- if prefix in printed_prefixes:
- break
- if not func_dict[prefix][func_name]:
- continue
- if printed_prefixes:
- # Add some space between different check prefixes.
- indent = len(output_lines[-1]) - len(output_lines[-1].lstrip(" "))
- output_lines.append(" " * indent + ";")
- printed_prefixes.add(prefix)
- add_mir_check_lines(
- test,
- output_lines,
- prefix,
- ("@" if at_the_function_name else "") + func_name,
- single_bb,
- func_dict[prefix][func_name],
- print_fixed_stack,
- first_check_is_next,
- )
- break
- else:
- warn(
- "Found conflicting asm for function: {}".format(func_name),
- test_file=test,
- )
- return output_lines
-
-
-def add_mir_check_lines(
- test,
- output_lines,
- prefix,
- func_name,
- single_bb,
- func_info,
- print_fixed_stack,
- first_check_is_next,
-):
- func_body = str(func_info).splitlines()
- if single_bb:
- # Don't bother checking the basic block label for a single BB
- func_body.pop(0)
-
- if not func_body:
- warn(
- "Function has no instructions to check: {}".format(func_name),
- test_file=test,
- )
- return
-
- first_line = func_body[0]
- indent = len(first_line) - len(first_line.lstrip(" "))
- # A check comment, indented the appropriate amount
- check = "{:>{}}; {}".format("", indent, prefix)
-
- output_lines.append("{}-LABEL: name: {}".format(check, func_name))
-
- if print_fixed_stack:
- output_lines.append("{}: fixedStack:".format(check))
- for stack_line in func_info.extrascrub.splitlines():
- filecheck_directive = check + "-NEXT"
- output_lines.append("{}: {}".format(filecheck_directive, stack_line))
-
- first_check = not first_check_is_next
- for func_line in func_body:
- if not func_line.strip():
- # The mir printer prints leading whitespace so we can't use CHECK-EMPTY:
- output_lines.append(check + "-NEXT: {{" + func_line + "$}}")
- continue
- filecheck_directive = check if first_check else check + "-NEXT"
- first_check = False
- check_line = "{}: {}".format(filecheck_directive, func_line[indent:]).rstrip()
- output_lines.append(check_line)
-
-
-def should_add_mir_line_to_output(input_line, prefix_set):
- # Skip any check lines that we're handling as well as comments
- m = CHECK_RE.match(input_line)
- if (m and m.group(1) in prefix_set) or input_line.strip() == ";":
- return False
- return True
-
-
-def add_mir_checks(
- input_lines,
- prefix_set,
- autogenerated_note,
- test,
- run_list,
- func_dict,
- print_fixed_stack,
- first_check_is_next,
- at_the_function_name,
-):
- simple_functions = find_mir_functions_with_one_bb(input_lines)
-
- output_lines = []
- output_lines.append(autogenerated_note)
-
- func_name = None
- state = "toplevel"
- for input_line in input_lines:
- if input_line == autogenerated_note:
- continue
-
- if state == "toplevel":
- m = IR_FUNC_NAME_RE.match(input_line)
- if m:
- state = "ir function prefix"
- func_name = m.group("func")
- if input_line.rstrip("| \r\n") == "---":
- state = "document"
- output_lines.append(input_line)
- elif state == "document":
- m = MIR_FUNC_NAME_RE.match(input_line)
- if m:
- state = "mir function metadata"
- func_name = m.group("func")
- if input_line.strip() == "...":
- state = "toplevel"
- func_name = None
- if should_add_mir_line_to_output(input_line, prefix_set):
- output_lines.append(input_line)
- elif state == "mir function metadata":
- if should_add_mir_line_to_output(input_line, prefix_set):
- output_lines.append(input_line)
- m = MIR_BODY_BEGIN_RE.match(input_line)
- if m:
- if func_name in simple_functions:
- # If there's only one block, put the checks inside it
- state = "mir function prefix"
- continue
- state = "mir function body"
- add_mir_checks_for_function(
- test,
- output_lines,
- run_list,
- func_dict,
- func_name,
- single_bb=False,
- print_fixed_stack=print_fixed_stack,
- first_check_is_next=first_check_is_next,
- at_the_function_name=at_the_function_name,
- )
- elif state == "mir function prefix":
- m = MIR_PREFIX_DATA_RE.match(input_line)
- if not m:
- state = "mir function body"
- add_mir_checks_for_function(
- test,
- output_lines,
- run_list,
- func_dict,
- func_name,
- single_bb=True,
- print_fixed_stack=print_fixed_stack,
- first_check_is_next=first_check_is_next,
- at_the_function_name=at_the_function_name,
- )
-
- if should_add_mir_line_to_output(input_line, prefix_set):
- output_lines.append(input_line)
- elif state == "mir function body":
- if input_line.strip() == "...":
- state = "toplevel"
- func_name = None
- if should_add_mir_line_to_output(input_line, prefix_set):
- output_lines.append(input_line)
- elif state == "ir function prefix":
- m = IR_PREFIX_DATA_RE.match(input_line)
- if not m:
- state = "ir function body"
- add_mir_checks_for_function(
- test,
- output_lines,
- run_list,
- func_dict,
- func_name,
- single_bb=False,
- print_fixed_stack=print_fixed_stack,
- first_check_is_next=first_check_is_next,
- at_the_function_name=at_the_function_name,
- )
-
- if should_add_mir_line_to_output(input_line, prefix_set):
- output_lines.append(input_line)
- elif state == "ir function body":
- if input_line.strip() == "}":
- state = "toplevel"
- func_name = None
- if should_add_mir_line_to_output(input_line, prefix_set):
- output_lines.append(input_line)
- return output_lines
-
-
def build_global_values_dictionary(glob_val_dict, raw_tool_output, prefixes, ginfo):
for nameless_value in ginfo.get_nameless_values():
if nameless_value.global_ir_rhs_regexp is None:
diff --git a/llvm/utils/UpdateTestChecks/mir.py b/llvm/utils/UpdateTestChecks/mir.py
new file mode 100644
index 0000000..24bb8b3
--- /dev/null
+++ b/llvm/utils/UpdateTestChecks/mir.py
@@ -0,0 +1,362 @@
+"""MIR test utility functions for UpdateTestChecks scripts."""
+
+import re
+import sys
+from UpdateTestChecks import common
+from UpdateTestChecks.common import (
+ CHECK_RE,
+ warn,
+)
+
+IR_FUNC_NAME_RE = re.compile(
+ r"^\s*define\s+(?:internal\s+)?[^@]*@(?P<func>[A-Za-z0-9_.]+)\s*\("
+)
+IR_PREFIX_DATA_RE = re.compile(r"^ *(;|$)")
+MIR_FUNC_NAME_RE = re.compile(r" *name: *(?P<func>[A-Za-z0-9_.-]+)")
+MIR_BODY_BEGIN_RE = re.compile(r" *body: *\|")
+MIR_BASIC_BLOCK_RE = re.compile(r" *bb\.[0-9]+.*:$")
+MIR_PREFIX_DATA_RE = re.compile(r"^ *(;|bb.[0-9].*: *$|[a-z]+:( |$)|$)")
+
+VREG_RE = re.compile(r"(%[0-9]+)(?:\.[a-z0-9_]+)?(?::[a-z0-9_]+)?(?:\([<>a-z0-9 ]+\))?")
+MI_FLAGS_STR = (
+ r"(frame-setup |frame-destroy |nnan |ninf |nsz |arcp |contract |afn "
+ r"|reassoc |nuw |nsw |exact |nofpexcept |nomerge |unpredictable "
+ r"|noconvergent |nneg |disjoint |nusw |samesign |inbounds )*"
+)
+VREG_DEF_FLAGS_STR = r"(?:dead |undef )*"
+
+# Pattern to match the defined vregs and the opcode of an instruction that
+# defines vregs. Opcodes starting with a lower-case 't' are allowed to match
+# ARM's thumb instructions, like tADDi8 and t2ADDri.
+VREG_DEF_RE = re.compile(
+ r"^ *(?P<vregs>{2}{0}(?:, {2}{0})*) = "
+ r"{1}(?P<opcode>[A-Zt][A-Za-z0-9_]+)".format(
+ VREG_RE.pattern, MI_FLAGS_STR, VREG_DEF_FLAGS_STR
+ )
+)
+
+MIR_FUNC_RE = re.compile(
+ r"^---$"
+ r"\n"
+ r"^ *name: *(?P<func>[A-Za-z0-9_.-]+)$"
+ r".*?"
+ r"(?:^ *fixedStack: *(\[\])? *\n"
+ r"(?P<fixedStack>.*?)\n?"
+ r"^ *stack:"
+ r".*?)?"
+ r"^ *body: *\|\n"
+ r"(?P<body>.*?)\n"
+ r"^\.\.\.$",
+ flags=(re.M | re.S),
+)
+
+
+def build_function_info_dictionary(
+ test, raw_tool_output, triple, prefixes, func_dict, verbose
+):
+ for m in MIR_FUNC_RE.finditer(raw_tool_output):
+ func = m.group("func")
+ fixedStack = m.group("fixedStack")
+ body = m.group("body")
+ if verbose:
+ print("Processing function: {}".format(func), file=sys.stderr)
+ for l in body.splitlines():
+ print(" {}".format(l), file=sys.stderr)
+
+ # Vreg mangling
+ mangled = []
+ vreg_map = {}
+ for func_line in body.splitlines(keepends=True):
+ m = VREG_DEF_RE.match(func_line)
+ if m:
+ for vreg in VREG_RE.finditer(m.group("vregs")):
+ if vreg.group(1) in vreg_map:
+ name = vreg_map[vreg.group(1)]
+ else:
+ name = mangle_vreg(m.group("opcode"), vreg_map.values())
+ vreg_map[vreg.group(1)] = name
+ func_line = func_line.replace(
+ vreg.group(1), "[[{}:%[0-9]+]]".format(name), 1
+ )
+ for number, name in vreg_map.items():
+ func_line = re.sub(
+ r"{}\b".format(number), "[[{}]]".format(name), func_line
+ )
+ mangled.append(func_line)
+ body = "".join(mangled)
+
+ for prefix in prefixes:
+ info = common.function_body(
+ body, fixedStack, None, None, None, None, ginfo=None
+ )
+ if func in func_dict[prefix]:
+ if (
+ not func_dict[prefix][func]
+ or func_dict[prefix][func].scrub != info.scrub
+ or func_dict[prefix][func].extrascrub != info.extrascrub
+ ):
+ func_dict[prefix][func] = None
+ else:
+ func_dict[prefix][func] = info
+
+
+def mangle_vreg(opcode, current_names):
+ base = opcode
+ # Simplify some common prefixes and suffixes
+ if opcode.startswith("G_"):
+ base = base[len("G_") :]
+ if opcode.endswith("_PSEUDO"):
+ base = base[: len("_PSEUDO")]
+ # Shorten some common opcodes with long-ish names
+ base = dict(
+ IMPLICIT_DEF="DEF",
+ GLOBAL_VALUE="GV",
+ CONSTANT="C",
+ FCONSTANT="C",
+ MERGE_VALUES="MV",
+ UNMERGE_VALUES="UV",
+ INTRINSIC="INT",
+ INTRINSIC_W_SIDE_EFFECTS="INT",
+ INSERT_VECTOR_ELT="IVEC",
+ EXTRACT_VECTOR_ELT="EVEC",
+ SHUFFLE_VECTOR="SHUF",
+ ).get(base, base)
+ # Avoid ambiguity when opcodes end in numbers
+ if len(base.rstrip("0123456789")) < len(base):
+ base += "_"
+
+ i = 0
+ for name in current_names:
+ if name.rstrip("0123456789") == base:
+ i += 1
+ if i:
+ return "{}{}".format(base, i)
+ return base
+
+
+def find_mir_functions_with_one_bb(lines, verbose=False):
+ result = []
+ cur_func = None
+ bbs = 0
+ for line in lines:
+ m = MIR_FUNC_NAME_RE.match(line)
+ if m:
+ if bbs == 1:
+ result.append(cur_func)
+ cur_func = m.group("func")
+ bbs = 0
+ m = MIR_BASIC_BLOCK_RE.match(line)
+ if m:
+ bbs += 1
+ if bbs == 1:
+ result.append(cur_func)
+ return result
+
+
+def add_mir_checks_for_function(
+ test,
+ output_lines,
+ run_list,
+ func_dict,
+ func_name,
+ single_bb,
+ print_fixed_stack,
+ first_check_is_next,
+ at_the_function_name,
+):
+ printed_prefixes = set()
+ for run in run_list:
+ for prefix in run[0]:
+ if prefix in printed_prefixes:
+ break
+ if not func_dict[prefix][func_name]:
+ continue
+ if printed_prefixes:
+ # Add some space between different check prefixes.
+ indent = len(output_lines[-1]) - len(output_lines[-1].lstrip(" "))
+ output_lines.append(" " * indent + ";")
+ printed_prefixes.add(prefix)
+ add_mir_check_lines(
+ test,
+ output_lines,
+ prefix,
+ ("@" if at_the_function_name else "") + func_name,
+ single_bb,
+ func_dict[prefix][func_name],
+ print_fixed_stack,
+ first_check_is_next,
+ )
+ break
+ else:
+ warn(
+ "Found conflicting asm for function: {}".format(func_name),
+ test_file=test,
+ )
+ return output_lines
+
+
+def add_mir_check_lines(
+ test,
+ output_lines,
+ prefix,
+ func_name,
+ single_bb,
+ func_info,
+ print_fixed_stack,
+ first_check_is_next,
+):
+ func_body = str(func_info).splitlines()
+ if single_bb:
+ # Don't bother checking the basic block label for a single BB
+ func_body.pop(0)
+
+ if not func_body:
+ warn(
+ "Function has no instructions to check: {}".format(func_name),
+ test_file=test,
+ )
+ return
+
+ first_line = func_body[0]
+ indent = len(first_line) - len(first_line.lstrip(" "))
+ # A check comment, indented the appropriate amount
+ check = "{:>{}}; {}".format("", indent, prefix)
+
+ output_lines.append("{}-LABEL: name: {}".format(check, func_name))
+
+ if print_fixed_stack:
+ output_lines.append("{}: fixedStack:".format(check))
+ for stack_line in func_info.extrascrub.splitlines():
+ filecheck_directive = check + "-NEXT"
+ output_lines.append("{}: {}".format(filecheck_directive, stack_line))
+
+ first_check = not first_check_is_next
+ for func_line in func_body:
+ if not func_line.strip():
+ # The mir printer prints leading whitespace so we can't use CHECK-EMPTY:
+ output_lines.append(check + "-NEXT: {{" + func_line + "$}}")
+ continue
+ filecheck_directive = check if first_check else check + "-NEXT"
+ first_check = False
+ check_line = "{}: {}".format(filecheck_directive, func_line[indent:]).rstrip()
+ output_lines.append(check_line)
+
+
+def should_add_mir_line_to_output(input_line, prefix_set):
+ # Skip any check lines that we're handling as well as comments
+ m = CHECK_RE.match(input_line)
+ if (m and m.group(1) in prefix_set) or input_line.strip() == ";":
+ return False
+ return True
+
+
+def add_mir_checks(
+ input_lines,
+ prefix_set,
+ autogenerated_note,
+ test,
+ run_list,
+ func_dict,
+ print_fixed_stack,
+ first_check_is_next,
+ at_the_function_name,
+):
+ simple_functions = find_mir_functions_with_one_bb(input_lines)
+
+ output_lines = []
+ output_lines.append(autogenerated_note)
+
+ func_name = None
+ state = "toplevel"
+ for input_line in input_lines:
+ if input_line == autogenerated_note:
+ continue
+
+ if state == "toplevel":
+ m = IR_FUNC_NAME_RE.match(input_line)
+ if m:
+ state = "ir function prefix"
+ func_name = m.group("func")
+ if input_line.rstrip("| \r\n") == "---":
+ state = "document"
+ output_lines.append(input_line)
+ elif state == "document":
+ m = MIR_FUNC_NAME_RE.match(input_line)
+ if m:
+ state = "mir function metadata"
+ func_name = m.group("func")
+ if input_line.strip() == "...":
+ state = "toplevel"
+ func_name = None
+ if should_add_mir_line_to_output(input_line, prefix_set):
+ output_lines.append(input_line)
+ elif state == "mir function metadata":
+ if should_add_mir_line_to_output(input_line, prefix_set):
+ output_lines.append(input_line)
+ m = MIR_BODY_BEGIN_RE.match(input_line)
+ if m:
+ if func_name in simple_functions:
+ # If there's only one block, put the checks inside it
+ state = "mir function prefix"
+ continue
+ state = "mir function body"
+ add_mir_checks_for_function(
+ test,
+ output_lines,
+ run_list,
+ func_dict,
+ func_name,
+ single_bb=False,
+ print_fixed_stack=print_fixed_stack,
+ first_check_is_next=first_check_is_next,
+ at_the_function_name=at_the_function_name,
+ )
+ elif state == "mir function prefix":
+ m = MIR_PREFIX_DATA_RE.match(input_line)
+ if not m:
+ state = "mir function body"
+ add_mir_checks_for_function(
+ test,
+ output_lines,
+ run_list,
+ func_dict,
+ func_name,
+ single_bb=True,
+ print_fixed_stack=print_fixed_stack,
+ first_check_is_next=first_check_is_next,
+ at_the_function_name=at_the_function_name,
+ )
+
+ if should_add_mir_line_to_output(input_line, prefix_set):
+ output_lines.append(input_line)
+ elif state == "mir function body":
+ if input_line.strip() == "...":
+ state = "toplevel"
+ func_name = None
+ if should_add_mir_line_to_output(input_line, prefix_set):
+ output_lines.append(input_line)
+ elif state == "ir function prefix":
+ m = IR_PREFIX_DATA_RE.match(input_line)
+ if not m:
+ state = "ir function body"
+ add_mir_checks_for_function(
+ test,
+ output_lines,
+ run_list,
+ func_dict,
+ func_name,
+ single_bb=False,
+ print_fixed_stack=print_fixed_stack,
+ first_check_is_next=first_check_is_next,
+ at_the_function_name=at_the_function_name,
+ )
+
+ if should_add_mir_line_to_output(input_line, prefix_set):
+ output_lines.append(input_line)
+ elif state == "ir function body":
+ if input_line.strip() == "}":
+ state = "toplevel"
+ func_name = None
+ if should_add_mir_line_to_output(input_line, prefix_set):
+ output_lines.append(input_line)
+ return output_lines
diff --git a/llvm/utils/lit/lit/TestRunner.py b/llvm/utils/lit/lit/TestRunner.py
index 9fba96a..3176b1a 100644
--- a/llvm/utils/lit/lit/TestRunner.py
+++ b/llvm/utils/lit/lit/TestRunner.py
@@ -600,18 +600,33 @@ def executeBuiltinUmask(cmd, shenv):
def executeBuiltinUlimit(cmd, shenv):
"""executeBuiltinUlimit - Change the current limits."""
- if os.name != "posix":
+ try:
+ # Try importing the resource module (available on POSIX systems) and
+ # emit an error where it does not exist (e.g., Windows).
+ import resource
+ except ImportError:
raise InternalShellError(cmd, "'ulimit' not supported on this system")
if len(cmd.args) != 3:
raise InternalShellError(cmd, "'ulimit' requires two arguments")
try:
- new_limit = int(cmd.args[2])
+ if cmd.args[2] == "unlimited":
+ new_limit = resource.RLIM_INFINITY
+ else:
+ new_limit = int(cmd.args[2])
except ValueError as err:
raise InternalShellError(cmd, "Error: 'ulimit': %s" % str(err))
if cmd.args[1] == "-v":
- shenv.ulimit["RLIMIT_AS"] = new_limit * 1024
+ if new_limit != resource.RLIM_INFINITY:
+ new_limit = new_limit * 1024
+ shenv.ulimit["RLIMIT_AS"] = new_limit
elif cmd.args[1] == "-n":
shenv.ulimit["RLIMIT_NOFILE"] = new_limit
+ elif cmd.args[1] == "-s":
+ if new_limit != resource.RLIM_INFINITY:
+ new_limit = new_limit * 1024
+ shenv.ulimit["RLIMIT_STACK"] = new_limit
+ elif cmd.args[1] == "-f":
+ shenv.ulimit["RLIMIT_FSIZE"] = new_limit
else:
raise InternalShellError(
cmd, "'ulimit' does not support option: %s" % cmd.args[1]
@@ -811,6 +826,10 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
not_args = []
not_count = 0
not_crash = False
+
+ # Expand all late substitutions.
+ args = _expandLateSubstitutions(j, args, cmd_shenv.cwd)
+
while True:
if args[0] == "env":
# Create a copy of the global environment and modify it for
@@ -860,9 +879,6 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
# Ensure args[0] is hashable.
args[0] = expand_glob(args[0], cmd_shenv.cwd)[0]
- # Expand all late substitutions.
- args = _expandLateSubstitutions(j, args, cmd_shenv.cwd)
-
inproc_builtin = inproc_builtins.get(args[0], None)
if inproc_builtin and (args[0] != "echo" or len(cmd.commands) == 1):
# env calling an in-process builtin is useless, so we take the safe
diff --git a/llvm/utils/lit/lit/builtin_commands/_launch_with_limit.py b/llvm/utils/lit/lit/builtin_commands/_launch_with_limit.py
index 33d2d59..a9dc259 100644
--- a/llvm/utils/lit/lit/builtin_commands/_launch_with_limit.py
+++ b/llvm/utils/lit/lit/builtin_commands/_launch_with_limit.py
@@ -17,6 +17,10 @@ def main(argv):
resource.setrlimit(resource.RLIMIT_AS, limit)
elif limit_str == "RLIMIT_NOFILE":
resource.setrlimit(resource.RLIMIT_NOFILE, limit)
+ elif limit_str == "RLIMIT_STACK":
+ resource.setrlimit(resource.RLIMIT_STACK, limit)
+ elif limit_str == "RLIMIT_FSIZE":
+ resource.setrlimit(resource.RLIMIT_FSIZE, limit)
process_output = subprocess.run(command_args)
sys.exit(process_output.returncode)
diff --git a/llvm/utils/lit/tests/Inputs/shtest-readfile/env.txt b/llvm/utils/lit/tests/Inputs/shtest-readfile/env.txt
new file mode 100644
index 0000000..3e19373
--- /dev/null
+++ b/llvm/utils/lit/tests/Inputs/shtest-readfile/env.txt
@@ -0,0 +1,6 @@
+## Tests that readfile works with the env builtin.
+# RUN: echo -n "hello" > %t.1
+# RUN: env TEST=%{readfile:%t.1} %{python} -c "import os; print(os.environ['TEST'])"
+
+## Fail the test so we can assert on the output.
+# RUN: not echo return \ No newline at end of file
diff --git a/llvm/utils/lit/tests/Inputs/shtest-readfile/lit.cfg b/llvm/utils/lit/tests/Inputs/shtest-readfile/lit.cfg
index ee49667..80af27f 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-readfile/lit.cfg
+++ b/llvm/utils/lit/tests/Inputs/shtest-readfile/lit.cfg
@@ -10,6 +10,7 @@ use_lit_shell = lit.util.pythonize_bool(lit_shell_env)
config.test_format = lit.formats.ShTest(execute_external=not use_lit_shell)
config.test_source_root = None
config.test_exec_root = None
+config.substitutions.append(("%{python}", '"%s"' % (sys.executable)))
# If we are testing with the external shell, remove the fake-externals from
# PATH so that we use mkdir in the tests.
diff --git a/llvm/utils/lit/tests/Inputs/shtest-ulimit-nondarwin/ulimit_okay.txt b/llvm/utils/lit/tests/Inputs/shtest-ulimit-nondarwin/ulimit_okay.txt
index dbdd003..a5fac7b 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-ulimit-nondarwin/ulimit_okay.txt
+++ b/llvm/utils/lit/tests/Inputs/shtest-ulimit-nondarwin/ulimit_okay.txt
@@ -1,4 +1,5 @@
# RUN: ulimit -v 1048576
+# RUN: ulimit -s 256
# RUN: %{python} %S/../shtest-ulimit/print_limits.py
# Fail the test so that we can assert on the output.
# RUN: not echo return
diff --git a/llvm/utils/lit/tests/Inputs/shtest-ulimit-nondarwin/ulimit_unlimited.txt b/llvm/utils/lit/tests/Inputs/shtest-ulimit-nondarwin/ulimit_unlimited.txt
new file mode 100644
index 0000000..4c687e3
--- /dev/null
+++ b/llvm/utils/lit/tests/Inputs/shtest-ulimit-nondarwin/ulimit_unlimited.txt
@@ -0,0 +1,6 @@
+# RUN: ulimit -f 5
+# RUN: %{python} %S/../shtest-ulimit/print_limits.py
+# RUN: ulimit -f unlimited
+# RUN: %{python} %S/../shtest-ulimit/print_limits.py
+# Fail the test so that we can assert on the output.
+# RUN: not echo return
diff --git a/llvm/utils/lit/tests/Inputs/shtest-ulimit/print_limits.py b/llvm/utils/lit/tests/Inputs/shtest-ulimit/print_limits.py
index 632f954..c732c04 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-ulimit/print_limits.py
+++ b/llvm/utils/lit/tests/Inputs/shtest-ulimit/print_limits.py
@@ -2,3 +2,5 @@ import resource
print("RLIMIT_AS=" + str(resource.getrlimit(resource.RLIMIT_AS)[0]))
print("RLIMIT_NOFILE=" + str(resource.getrlimit(resource.RLIMIT_NOFILE)[0]))
+print("RLIMIT_STACK=" + str(resource.getrlimit(resource.RLIMIT_STACK)[0]))
+print("RLIMIT_FSIZE=" + str(resource.getrlimit(resource.RLIMIT_FSIZE)[0]))
diff --git a/llvm/utils/lit/tests/Inputs/shtest-ulimit/ulimit_okay.txt b/llvm/utils/lit/tests/Inputs/shtest-ulimit/ulimit_okay.txt
index 4edf1c3..b1f2396b 100644
--- a/llvm/utils/lit/tests/Inputs/shtest-ulimit/ulimit_okay.txt
+++ b/llvm/utils/lit/tests/Inputs/shtest-ulimit/ulimit_okay.txt
@@ -1,4 +1,5 @@
# RUN: ulimit -n 50
+# RUN: ulimit -f 5
# RUN: %{python} %S/print_limits.py
# Fail the test so that we can assert on the output.
# RUN: not echo return
diff --git a/llvm/utils/lit/tests/shtest-readfile-external.py b/llvm/utils/lit/tests/shtest-readfile-external.py
index c00bff4..6fe1088 100644
--- a/llvm/utils/lit/tests/shtest-readfile-external.py
+++ b/llvm/utils/lit/tests/shtest-readfile-external.py
@@ -6,7 +6,7 @@
# UNSUPPORTED: system-windows
# RUN: env LIT_USE_INTERNAL_SHELL=0 not %{lit} -a -v %{inputs}/shtest-readfile | FileCheck -match-full-lines -DTEMP_PATH=%S/Inputs/shtest-readfile/Output %s
-# CHECK: -- Testing: 4 tests{{.*}}
+# CHECK: -- Testing: 5 tests{{.*}}
# CHECK-LABEL: FAIL: shtest-readfile :: absolute-paths.txt ({{[^)]*}})
# CHECK: echo $(cat [[TEMP_PATH]]/absolute-paths.txt.tmp) && test -e [[TEMP_PATH]]/absolute-paths.txt.tmp {{.*}}
diff --git a/llvm/utils/lit/tests/shtest-readfile.py b/llvm/utils/lit/tests/shtest-readfile.py
index 66e3a04..218da22 100644
--- a/llvm/utils/lit/tests/shtest-readfile.py
+++ b/llvm/utils/lit/tests/shtest-readfile.py
@@ -5,12 +5,16 @@
# RUN: env LIT_USE_INTERNAL_SHELL=1 not %{lit} -a -v %{inputs}/shtest-readfile | FileCheck -match-full-lines -DTEMP_PATH=%S%{fs-sep}Inputs%{fs-sep}shtest-readfile%{fs-sep}Output %s
-# CHECK: -- Testing: 4 tests{{.*}}
+# CHECK: -- Testing: 5 tests{{.*}}
# CHECK-LABEL: FAIL: shtest-readfile :: absolute-paths.txt ({{[^)]*}})
# CHECK: echo hello
# CHECK: # executed command: echo '%{readfile:[[TEMP_PATH]]{{[\\\/]}}absolute-paths.txt.tmp}'
+# CHECK-LABEL: FAIL: shtest-readfile :: env.txt ({{[^)]*}})
+# CHECK: env TEST=hello {{.*}} -c "import os; print(os.environ['TEST'])"
+# CHECK: # | hello
+
# CHECK-LABEL: FAIL: shtest-readfile :: file-does-not-exist.txt ({{[^)]*}})
# CHECK: # executed command: @echo 'echo %{readfile:/file/does/not/exist}'
# CHECK: # | File specified in readfile substitution does not exist: {{.*}}/file/does/not/exist
diff --git a/llvm/utils/lit/tests/shtest-ulimit-nondarwin.py b/llvm/utils/lit/tests/shtest-ulimit-nondarwin.py
index 2d96fea..893270e 100644
--- a/llvm/utils/lit/tests/shtest-ulimit-nondarwin.py
+++ b/llvm/utils/lit/tests/shtest-ulimit-nondarwin.py
@@ -6,8 +6,16 @@
# RUN: not %{lit} -a -v %{inputs}/shtest-ulimit-nondarwin | FileCheck %s
-# CHECK: -- Testing: 1 tests{{.*}}
+# CHECK: -- Testing: 2 tests{{.*}}
# CHECK-LABEL: FAIL: shtest-ulimit :: ulimit_okay.txt ({{[^)]*}})
# CHECK: ulimit -v 1048576
+# CHECK: ulimit -s 256
# CHECK: RLIMIT_AS=1073741824
+# CHECK: RLIMIT_STACK=262144
+
+# CHECK-LABEL: FAIL: shtest-ulimit :: ulimit_unlimited.txt ({{[^)]*}})
+# CHECK: ulimit -f 5
+# CHECK: RLIMIT_FSIZE=5
+# CHECK: ulimit -f unlimited
+# CHECK: RLIMIT_FSIZE=-1
diff --git a/llvm/utils/lit/tests/shtest-ulimit.py b/llvm/utils/lit/tests/shtest-ulimit.py
index 09cd475..21e5a5e 100644
--- a/llvm/utils/lit/tests/shtest-ulimit.py
+++ b/llvm/utils/lit/tests/shtest-ulimit.py
@@ -19,7 +19,9 @@
# CHECK-LABEL: FAIL: shtest-ulimit :: ulimit_okay.txt ({{[^)]*}})
# CHECK: ulimit -n 50
+# CHECK: ulimit -f 5
# CHECK: RLIMIT_NOFILE=50
+# CHECK: RLIMIT_FSIZE=5
# CHECK-LABEL: FAIL: shtest-ulimit :: ulimit_reset.txt ({{[^)]*}})
# CHECK: RLIMIT_NOFILE=[[BASE_NOFILE_LIMIT]]
diff --git a/llvm/utils/update_givaluetracking_test_checks.py b/llvm/utils/update_givaluetracking_test_checks.py
index 49b068a..9ad0f3e 100755
--- a/llvm/utils/update_givaluetracking_test_checks.py
+++ b/llvm/utils/update_givaluetracking_test_checks.py
@@ -19,6 +19,7 @@ import re
import sys
from UpdateTestChecks import common
+from UpdateTestChecks import mir
VT_FUNCTION_RE = re.compile(
r"\s*name:\s*@(?P<func>[A-Za-z0-9_-]+)"
@@ -92,7 +93,7 @@ def update_test(ti: common.TestInfo):
func_dict = builder.finish_and_get_func_dict()
prefix_set = set([prefix for p in run_list for prefix in p[0]])
common.debug("Rewriting FileCheck prefixes:", str(prefix_set))
- output_lines = common.add_mir_checks(
+ output_lines = mir.add_mir_checks(
ti.input_lines,
prefix_set,
ti.test_autogenerated_note,
diff --git a/llvm/utils/update_mir_test_checks.py b/llvm/utils/update_mir_test_checks.py
index c4ee052..ba70249 100755
--- a/llvm/utils/update_mir_test_checks.py
+++ b/llvm/utils/update_mir_test_checks.py
@@ -31,39 +31,7 @@ import subprocess
import sys
from UpdateTestChecks import common
-
-VREG_RE = re.compile(r"(%[0-9]+)(?:\.[a-z0-9_]+)?(?::[a-z0-9_]+)?(?:\([<>a-z0-9 ]+\))?")
-MI_FLAGS_STR = (
- r"(frame-setup |frame-destroy |nnan |ninf |nsz |arcp |contract |afn "
- r"|reassoc |nuw |nsw |exact |nofpexcept |nomerge |unpredictable "
- r"|noconvergent |nneg |disjoint |nusw |samesign |inbounds )*"
-)
-VREG_DEF_FLAGS_STR = r"(?:dead |undef )*"
-
-# Pattern to match the defined vregs and the opcode of an instruction that
-# defines vregs. Opcodes starting with a lower-case 't' are allowed to match
-# ARM's thumb instructions, like tADDi8 and t2ADDri.
-VREG_DEF_RE = re.compile(
- r"^ *(?P<vregs>{2}{0}(?:, {2}{0})*) = "
- r"{1}(?P<opcode>[A-Zt][A-Za-z0-9_]+)".format(
- VREG_RE.pattern, MI_FLAGS_STR, VREG_DEF_FLAGS_STR
- )
-)
-
-MIR_FUNC_RE = re.compile(
- r"^---$"
- r"\n"
- r"^ *name: *(?P<func>[A-Za-z0-9_.-]+)$"
- r".*?"
- r"(?:^ *fixedStack: *(\[\])? *\n"
- r"(?P<fixedStack>.*?)\n?"
- r"^ *stack:"
- r".*?)?"
- r"^ *body: *\|\n"
- r"(?P<body>.*?)\n"
- r"^\.\.\.$",
- flags=(re.M | re.S),
-)
+from UpdateTestChecks import mir
class LLC:
@@ -143,89 +111,6 @@ def build_run_list(test, run_lines, verbose=False):
return run_list
-def build_function_info_dictionary(
- test, raw_tool_output, triple, prefixes, func_dict, verbose
-):
- for m in MIR_FUNC_RE.finditer(raw_tool_output):
- func = m.group("func")
- fixedStack = m.group("fixedStack")
- body = m.group("body")
- if verbose:
- log("Processing function: {}".format(func))
- for l in body.splitlines():
- log(" {}".format(l))
-
- # Vreg mangling
- mangled = []
- vreg_map = {}
- for func_line in body.splitlines(keepends=True):
- m = VREG_DEF_RE.match(func_line)
- if m:
- for vreg in VREG_RE.finditer(m.group("vregs")):
- if vreg.group(1) in vreg_map:
- name = vreg_map[vreg.group(1)]
- else:
- name = mangle_vreg(m.group("opcode"), vreg_map.values())
- vreg_map[vreg.group(1)] = name
- func_line = func_line.replace(
- vreg.group(1), "[[{}:%[0-9]+]]".format(name), 1
- )
- for number, name in vreg_map.items():
- func_line = re.sub(
- r"{}\b".format(number), "[[{}]]".format(name), func_line
- )
- mangled.append(func_line)
- body = "".join(mangled)
-
- for prefix in prefixes:
- info = common.function_body(
- body, fixedStack, None, None, None, None, ginfo=None
- )
- if func in func_dict[prefix]:
- if (
- not func_dict[prefix][func]
- or func_dict[prefix][func].scrub != info.scrub
- or func_dict[prefix][func].extrascrub != info.extrascrub
- ):
- func_dict[prefix][func] = None
- else:
- func_dict[prefix][func] = info
-
-
-def mangle_vreg(opcode, current_names):
- base = opcode
- # Simplify some common prefixes and suffixes
- if opcode.startswith("G_"):
- base = base[len("G_") :]
- if opcode.endswith("_PSEUDO"):
- base = base[: len("_PSEUDO")]
- # Shorten some common opcodes with long-ish names
- base = dict(
- IMPLICIT_DEF="DEF",
- GLOBAL_VALUE="GV",
- CONSTANT="C",
- FCONSTANT="C",
- MERGE_VALUES="MV",
- UNMERGE_VALUES="UV",
- INTRINSIC="INT",
- INTRINSIC_W_SIDE_EFFECTS="INT",
- INSERT_VECTOR_ELT="IVEC",
- EXTRACT_VECTOR_ELT="EVEC",
- SHUFFLE_VECTOR="SHUF",
- ).get(base, base)
- # Avoid ambiguity when opcodes end in numbers
- if len(base.rstrip("0123456789")) < len(base):
- base += "_"
-
- i = 0
- for name in current_names:
- if name.rstrip("0123456789") == base:
- i += 1
- if i:
- return "{}{}".format(base, i)
- return base
-
-
def update_test_file(args, test, autogenerated_note):
with open(test) as fd:
input_lines = [l.rstrip() for l in fd]
@@ -247,7 +132,7 @@ def update_test_file(args, test, autogenerated_note):
common.warn("No triple found: skipping file", test_file=test)
return
- build_function_info_dictionary(
+ mir.build_function_info_dictionary(
test,
raw_tool_output,
triple_in_cmd or triple_in_ir,
@@ -259,7 +144,7 @@ def update_test_file(args, test, autogenerated_note):
prefix_set = set([prefix for run in run_list for prefix in run[0]])
log("Rewriting FileCheck prefixes: {}".format(prefix_set), args.verbose)
- output_lines = common.add_mir_checks(
+ output_lines = mir.add_mir_checks(
input_lines,
prefix_set,
autogenerated_note,
diff --git a/mlir/include/mlir/Dialect/ControlFlow/Transforms/StructuralTypeConversions.h b/mlir/include/mlir/Dialect/ControlFlow/Transforms/StructuralTypeConversions.h
new file mode 100644
index 0000000..a32d9e2
--- /dev/null
+++ b/mlir/include/mlir/Dialect/ControlFlow/Transforms/StructuralTypeConversions.h
@@ -0,0 +1,48 @@
+//===- StructuralTypeConversions.h - CF Type Conversions --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_CONTROL_FLOW_TRANSFORMS_STRUCTURAL_TYPE_CONVERSIONS_H
+#define MLIR_DIALECT_CONTROL_FLOW_TRANSFORMS_STRUCTURAL_TYPE_CONVERSIONS_H
+
+#include "mlir/IR/PatternMatch.h"
+
+namespace mlir {
+
+class ConversionTarget;
+class TypeConverter;
+
+namespace cf {
+
+/// Populates patterns for CF structural type conversions and sets up the
+/// provided ConversionTarget with the appropriate legality configuration for
+/// the ops to get converted properly.
+///
+/// A "structural" type conversion is one where the underlying ops are
+/// completely agnostic to the actual types involved and simply need to update
+/// their types. An example of this is cf.br -- the cf.br op needs to update
+/// its types accordingly to the TypeConverter, but otherwise does not care
+/// what type conversions are happening.
+void populateCFStructuralTypeConversionsAndLegality(
+ const TypeConverter &typeConverter, RewritePatternSet &patterns,
+ ConversionTarget &target, PatternBenefit benefit = 1);
+
+/// Similar to `populateCFStructuralTypeConversionsAndLegality` but does not
+/// populate the conversion target.
+void populateCFStructuralTypeConversions(const TypeConverter &typeConverter,
+ RewritePatternSet &patterns,
+ PatternBenefit benefit = 1);
+
+/// Updates the ConversionTarget with dynamic legality of CF operations based
+/// on the provided type converter.
+void populateCFStructuralTypeConversionTarget(
+ const TypeConverter &typeConverter, ConversionTarget &target);
+
+} // namespace cf
+} // namespace mlir
+
+#endif // MLIR_DIALECT_CONTROL_FLOW_TRANSFORMS_STRUCTURAL_TYPE_CONVERSIONS_H
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
index f3674c3..ecd036d 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
@@ -293,10 +293,6 @@ def MapOp : LinalgStructuredBase_Op<"map", [
// Implement functions necessary for DestinationStyleOpInterface.
MutableOperandRange getDpsInitsMutable() { return getInitMutable(); }
- SmallVector<OpOperand *> getOpOperandsMatchingBBargs() {
- return getDpsInputOperands();
- }
-
bool payloadUsesValueFromOperand(OpOperand * opOperand) {
if (isDpsInit(opOperand)) return false;
return !getMatchingBlockArgument(opOperand).use_empty();
diff --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
index 7d0a236..76a822b 100644
--- a/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
+++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
@@ -14,6 +14,7 @@
#include "mlir/Conversion/SCFToGPU/SCFToGPU.h"
+#include "mlir/Analysis/AliasAnalysis/LocalAliasAnalysis.h"
#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
@@ -27,6 +28,7 @@
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/RegionUtils.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/Support/DebugLog.h"
#include <optional>
@@ -625,18 +627,49 @@ ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
bool seenSideeffects = false;
// Whether we have left a nesting scope (and hence are no longer innermost).
bool leftNestingScope = false;
+ LocalAliasAnalysis aliasAnalysis;
+ llvm::DenseSet<Value> writtenBuffer;
while (!worklist.empty()) {
Operation *op = worklist.pop_back_val();
// Now walk over the body and clone it.
// TODO: This is only correct if there either is no further scf.parallel
- // nested or this code is side-effect free. Otherwise we might need
- // predication. We are overly conservative for now and only allow
- // side-effects in the innermost scope.
+ // nested or this code has side-effect but the memory buffer is not
+ // alias to inner loop access buffer. Otherwise we might need
+ // predication.
if (auto nestedParallel = dyn_cast<ParallelOp>(op)) {
// Before entering a nested scope, make sure there have been no
- // sideeffects until now.
- if (seenSideeffects)
- return failure();
+ // sideeffects until now or the nested operations do not access the
+ // buffer written by outer scope.
+ if (seenSideeffects) {
+ WalkResult walkRes = nestedParallel.walk([&](Operation *nestedOp) {
+ if (isMemoryEffectFree(nestedOp))
+ return WalkResult::advance();
+
+ auto memEffectInterface = dyn_cast<MemoryEffectOpInterface>(nestedOp);
+ if (!memEffectInterface)
+ return WalkResult::advance();
+
+ SmallVector<MemoryEffects::EffectInstance> effects;
+ memEffectInterface.getEffects(effects);
+ for (const MemoryEffects::EffectInstance &effect : effects) {
+ if (isa<MemoryEffects::Read>(effect.getEffect()) ||
+ isa<MemoryEffects::Write>(effect.getEffect())) {
+ Value baseBuffer = effect.getValue();
+ if (!baseBuffer)
+ return WalkResult::interrupt();
+ for (Value val : writtenBuffer) {
+ if (aliasAnalysis.alias(baseBuffer, val) !=
+ AliasResult::NoAlias) {
+ return WalkResult::interrupt();
+ }
+ }
+ }
+ }
+ return WalkResult::advance();
+ });
+ if (walkRes.wasInterrupted())
+ return failure();
+ }
// A nested scf.parallel needs insertion of code to compute indices.
// Insert that now. This will also update the worklist with the loops
// body.
@@ -650,6 +683,7 @@ ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
rewriter.setInsertionPointAfter(parent);
leftNestingScope = true;
seenSideeffects = false;
+ writtenBuffer.clear();
} else if (auto reduceOp = dyn_cast<scf::ReduceOp>(op)) {
// Convert scf.reduction op
auto parentLoop = op->getParentOfType<ParallelOp>();
@@ -682,6 +716,24 @@ ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
Operation *clone = rewriter.clone(*op, cloningMap);
cloningMap.map(op->getResults(), clone->getResults());
// Check for side effects.
+ if (!isMemoryEffectFree(clone)) {
+ // Record the buffer accessed by the operations with write effects.
+ if (auto memEffectInterface =
+ dyn_cast<MemoryEffectOpInterface>(clone)) {
+ SmallVector<MemoryEffects::EffectInstance> effects;
+ memEffectInterface.getEffects(effects);
+ for (const MemoryEffects::EffectInstance &effect : effects) {
+ if (isa<MemoryEffects::Write>(effect.getEffect())) {
+ Value writtenBase = effect.getValue();
+ // Conservatively return failure if we cannot find the written
+ // address.
+ if (!writtenBase)
+ return failure();
+ writtenBuffer.insert(writtenBase);
+ }
+ }
+ }
+ }
// TODO: Handle region side effects properly.
seenSideeffects |=
!isMemoryEffectFree(clone) || clone->getNumRegions() != 0;
diff --git a/mlir/lib/Dialect/ControlFlow/Transforms/CMakeLists.txt b/mlir/lib/Dialect/ControlFlow/Transforms/CMakeLists.txt
index 47740d3..e9da135 100644
--- a/mlir/lib/Dialect/ControlFlow/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/ControlFlow/Transforms/CMakeLists.txt
@@ -1,6 +1,7 @@
add_mlir_dialect_library(MLIRControlFlowTransforms
BufferDeallocationOpInterfaceImpl.cpp
BufferizableOpInterfaceImpl.cpp
+ StructuralTypeConversions.cpp
ADDITIONAL_HEADER_DIRS
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/ControlFlow/Transforms
diff --git a/mlir/lib/Dialect/ControlFlow/Transforms/StructuralTypeConversions.cpp b/mlir/lib/Dialect/ControlFlow/Transforms/StructuralTypeConversions.cpp
new file mode 100644
index 0000000..5e2a742
--- /dev/null
+++ b/mlir/lib/Dialect/ControlFlow/Transforms/StructuralTypeConversions.cpp
@@ -0,0 +1,169 @@
+//===- TypeConversion.cpp - Type Conversion of Unstructured Control Flow --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a pass to convert MLIR standard and builtin dialects
+// into the LLVM IR dialect.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/ControlFlow/Transforms/StructuralTypeConversions.h"
+
+#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/Pass/Pass.h"
+#include "mlir/Transforms/DialectConversion.h"
+
+using namespace mlir;
+
+namespace {
+
+/// Helper function for converting branch ops. This function converts the
+/// signature of the given block. If the new block signature is different from
+/// `expectedTypes`, returns "failure".
+static FailureOr<Block *> getConvertedBlock(ConversionPatternRewriter &rewriter,
+ const TypeConverter *converter,
+ Operation *branchOp, Block *block,
+ TypeRange expectedTypes) {
+ assert(converter && "expected non-null type converter");
+ assert(!block->isEntryBlock() && "entry blocks have no predecessors");
+
+ // There is nothing to do if the types already match.
+ if (block->getArgumentTypes() == expectedTypes)
+ return block;
+
+ // Compute the new block argument types and convert the block.
+ std::optional<TypeConverter::SignatureConversion> conversion =
+ converter->convertBlockSignature(block);
+ if (!conversion)
+ return rewriter.notifyMatchFailure(branchOp,
+ "could not compute block signature");
+ if (expectedTypes != conversion->getConvertedTypes())
+ return rewriter.notifyMatchFailure(
+ branchOp,
+ "mismatch between adaptor operand types and computed block signature");
+ return rewriter.applySignatureConversion(block, *conversion, converter);
+}
+
+/// Flatten the given value ranges into a single vector of values.
+static SmallVector<Value> flattenValues(ArrayRef<ValueRange> values) {
+ SmallVector<Value> result;
+ for (const ValueRange &vals : values)
+ llvm::append_range(result, vals);
+ return result;
+}
+
+/// Convert the destination block signature (if necessary) and change the
+/// operands of the branch op.
+struct BranchOpConversion : public OpConversionPattern<cf::BranchOp> {
+ using OpConversionPattern<cf::BranchOp>::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(cf::BranchOp op, OneToNOpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ SmallVector<Value> flattenedAdaptor = flattenValues(adaptor.getOperands());
+ FailureOr<Block *> convertedBlock =
+ getConvertedBlock(rewriter, getTypeConverter(), op, op.getSuccessor(),
+ TypeRange(ValueRange(flattenedAdaptor)));
+ if (failed(convertedBlock))
+ return failure();
+ rewriter.replaceOpWithNewOp<cf::BranchOp>(op, flattenedAdaptor,
+ *convertedBlock);
+ return success();
+ }
+};
+
+/// Convert the destination block signatures (if necessary) and change the
+/// operands of the branch op.
+struct CondBranchOpConversion : public OpConversionPattern<cf::CondBranchOp> {
+ using OpConversionPattern<cf::CondBranchOp>::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(cf::CondBranchOp op, OneToNOpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ SmallVector<Value> flattenedAdaptorTrue =
+ flattenValues(adaptor.getTrueDestOperands());
+ SmallVector<Value> flattenedAdaptorFalse =
+ flattenValues(adaptor.getFalseDestOperands());
+ if (!llvm::hasSingleElement(adaptor.getCondition()))
+ return rewriter.notifyMatchFailure(op,
+ "expected single element condition");
+ FailureOr<Block *> convertedTrueBlock =
+ getConvertedBlock(rewriter, getTypeConverter(), op, op.getTrueDest(),
+ TypeRange(ValueRange(flattenedAdaptorTrue)));
+ if (failed(convertedTrueBlock))
+ return failure();
+ FailureOr<Block *> convertedFalseBlock =
+ getConvertedBlock(rewriter, getTypeConverter(), op, op.getFalseDest(),
+ TypeRange(ValueRange(flattenedAdaptorFalse)));
+ if (failed(convertedFalseBlock))
+ return failure();
+ rewriter.replaceOpWithNewOp<cf::CondBranchOp>(
+ op, llvm::getSingleElement(adaptor.getCondition()),
+ flattenedAdaptorTrue, flattenedAdaptorFalse, op.getBranchWeightsAttr(),
+ *convertedTrueBlock, *convertedFalseBlock);
+ return success();
+ }
+};
+
+/// Convert the destination block signatures (if necessary) and change the
+/// operands of the switch op.
+struct SwitchOpConversion : public OpConversionPattern<cf::SwitchOp> {
+ using OpConversionPattern<cf::SwitchOp>::OpConversionPattern;
+
+ LogicalResult
+ matchAndRewrite(cf::SwitchOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ // Get or convert default block.
+ FailureOr<Block *> convertedDefaultBlock = getConvertedBlock(
+ rewriter, getTypeConverter(), op, op.getDefaultDestination(),
+ TypeRange(adaptor.getDefaultOperands()));
+ if (failed(convertedDefaultBlock))
+ return failure();
+
+ // Get or convert all case blocks.
+ SmallVector<Block *> caseDestinations;
+ SmallVector<ValueRange> caseOperands = adaptor.getCaseOperands();
+ for (auto it : llvm::enumerate(op.getCaseDestinations())) {
+ Block *b = it.value();
+ FailureOr<Block *> convertedBlock =
+ getConvertedBlock(rewriter, getTypeConverter(), op, b,
+ TypeRange(caseOperands[it.index()]));
+ if (failed(convertedBlock))
+ return failure();
+ caseDestinations.push_back(*convertedBlock);
+ }
+
+ rewriter.replaceOpWithNewOp<cf::SwitchOp>(
+ op, adaptor.getFlag(), *convertedDefaultBlock,
+ adaptor.getDefaultOperands(), adaptor.getCaseValuesAttr(),
+ caseDestinations, caseOperands);
+ return success();
+ }
+};
+
+} // namespace
+
+void mlir::cf::populateCFStructuralTypeConversions(
+ const TypeConverter &typeConverter, RewritePatternSet &patterns,
+ PatternBenefit benefit) {
+ patterns.add<BranchOpConversion, CondBranchOpConversion, SwitchOpConversion>(
+ typeConverter, patterns.getContext(), benefit);
+}
+
+void mlir::cf::populateCFStructuralTypeConversionTarget(
+ const TypeConverter &typeConverter, ConversionTarget &target) {
+ target.addDynamicallyLegalOp<cf::BranchOp, cf::CondBranchOp, cf::SwitchOp>(
+ [&](Operation *op) { return typeConverter.isLegal(op->getOperands()); });
+}
+
+void mlir::cf::populateCFStructuralTypeConversionsAndLegality(
+ const TypeConverter &typeConverter, RewritePatternSet &patterns,
+ ConversionTarget &target, PatternBenefit benefit) {
+ populateCFStructuralTypeConversions(typeConverter, patterns, benefit);
+ populateCFStructuralTypeConversionTarget(typeConverter, target);
+}
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index cbc565b..3dc45ed 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -1474,6 +1474,8 @@ void MapOp::getAsmBlockArgumentNames(Region &region,
OpAsmSetValueNameFn setNameFn) {
for (Value v : getRegionInputArgs())
setNameFn(v, "in");
+ for (Value v : getRegionOutputArgs())
+ setNameFn(v, "init");
}
void MapOp::getAsmResultNames(function_ref<void(Value, StringRef)> setNameFn) {
@@ -1495,14 +1497,14 @@ void MapOp::build(
if (bodyBuild)
buildGenericRegion(builder, result.location, *result.regions.front(),
- inputs, /*outputs=*/{}, bodyBuild);
+ inputs, /*outputs=*/{init}, bodyBuild);
}
static void addBodyWithPayloadOp(OpAsmParser &parser, OperationState &result,
const OperationName &payloadOpName,
const NamedAttrList &payloadOpAttrs,
ArrayRef<Value> operands,
- bool initFirst = false) {
+ bool initFirst = false, bool mapInit = true) {
OpBuilder b(parser.getContext());
Region *body = result.addRegion();
Block &block = body->emplaceBlock();
@@ -1516,12 +1518,13 @@ static void addBodyWithPayloadOp(OpAsmParser &parser, OperationState &result,
// If initFirst flag is enabled, we consider init as the first position of
// payload operands.
if (initFirst) {
- payloadOpOperands.push_back(block.getArguments().back());
+ if (mapInit)
+ payloadOpOperands.push_back(block.getArguments().back());
for (const auto &arg : block.getArguments().drop_back())
payloadOpOperands.push_back(arg);
} else {
payloadOpOperands = {block.getArguments().begin(),
- block.getArguments().end()};
+ block.getArguments().end() - int(!mapInit)};
}
Operation *payloadOp = b.create(
@@ -1553,8 +1556,8 @@ ParseResult MapOp::parse(OpAsmParser &parser, OperationState &result) {
if (payloadOpName.has_value()) {
if (!result.operands.empty())
addBodyWithPayloadOp(parser, result, payloadOpName.value(),
- payloadOpAttrs,
- ArrayRef(result.operands).drop_back());
+ payloadOpAttrs, ArrayRef(result.operands), false,
+ false);
else
result.addRegion();
} else {
@@ -1570,7 +1573,11 @@ ParseResult MapOp::parse(OpAsmParser &parser, OperationState &result) {
return success();
}
-static bool canUseShortForm(Block *body, bool initFirst = false) {
+static bool canUseShortForm(Block *body, bool initFirst = false,
+ bool mapInit = true) {
+ // `intFirst == true` implies that we want to map init arg
+ if (initFirst && !mapInit)
+ return false;
// Check if the body can be printed in short form. The following 4 conditions
// must be satisfied:
@@ -1582,7 +1589,7 @@ static bool canUseShortForm(Block *body, bool initFirst = false) {
// 2) The payload op must have the same number of operands as the number of
// block arguments.
if (payload.getNumOperands() == 0 ||
- payload.getNumOperands() != body->getNumArguments())
+ payload.getNumOperands() != body->getNumArguments() - int(!mapInit))
return false;
// 3) If `initFirst` is true (e.g., for reduction ops), the init block
@@ -1600,7 +1607,8 @@ static bool canUseShortForm(Block *body, bool initFirst = false) {
}
} else {
for (const auto &[operand, bbArg] :
- llvm::zip(payload.getOperands(), body->getArguments())) {
+ llvm::zip(payload.getOperands(),
+ body->getArguments().drop_back(int(!mapInit)))) {
if (bbArg != operand)
return false;
}
@@ -1632,7 +1640,8 @@ static void printShortForm(OpAsmPrinter &p, Operation *payloadOp) {
void MapOp::print(OpAsmPrinter &p) {
Block *mapper = getBody();
- bool useShortForm = canUseShortForm(mapper);
+ bool useShortForm =
+ canUseShortForm(mapper, /*initFirst=*/false, /*mapInit*/ false);
if (useShortForm) {
printShortForm(p, &mapper->getOperations().front());
}
@@ -1658,11 +1667,13 @@ LogicalResult MapOp::verify() {
auto *bodyBlock = getBody();
auto blockArgs = bodyBlock->getArguments();
- // Checks if the number of `inputs` match the arity of the `mapper` region.
- if (getInputs().size() != blockArgs.size())
+ // Checks if the number of `inputs` + `init` match the arity of the `mapper`
+ // region.
+ if (getInputs().size() + 1 != blockArgs.size())
return emitOpError() << "expects number of operands to match the arity of "
"mapper, but got: "
- << getInputs().size() << " and " << blockArgs.size();
+ << getInputs().size() + 1 << " and "
+ << blockArgs.size();
// The parameters of mapper should all match the element type of inputs.
for (const auto &[bbArgType, inputArg] :
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
index 3e31393..75bb175 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
@@ -31,10 +31,8 @@ using namespace mlir;
using namespace mlir::linalg;
static LogicalResult generalizeNamedOpPrecondition(LinalgOp linalgOp) {
- // Bailout if `linalgOp` is already a generic or a linalg.map. We cannot
- // trivially generalize a `linalg.map`, as it does not use the output as
- // region arguments in the block.
- if (isa<GenericOp>(linalgOp) || isa<MapOp>(linalgOp))
+ // Bailout if `linalgOp` is already a generic.
+ if (isa<GenericOp>(linalgOp))
return failure();
// Check if the operation has exactly one region.
if (linalgOp->getNumRegions() != 1) {
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 9d62491..0f317ea 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -3911,21 +3911,21 @@ struct Conv1DGenerator
Value lhs = vector::TransferReadOp::create(
rewriter, loc, lhsType, lhsShaped, ValueRange{zero, zero, zero},
/*padding=*/arith::getZeroConstant(rewriter, loc, lhsEltType));
- auto maybeMaskedLhs = maybeMaskXferOp(
+ auto *maybeMaskedLhs = maybeMaskXferOp(
lhsType.getShape(), lhsType.getScalableDims(), lhs.getDefiningOp());
// Read rhs slice of size {kw, c} @ [0, 0].
Value rhs = vector::TransferReadOp::create(
rewriter, loc, rhsType, rhsShaped, ValueRange{zero, zero},
/*padding=*/arith::getZeroConstant(rewriter, loc, rhsEltType));
- auto maybeMaskedRhs = maybeMaskXferOp(
+ auto *maybeMaskedRhs = maybeMaskXferOp(
rhsType.getShape(), rhsType.getScalableDims(), rhs.getDefiningOp());
// Read res slice of size {n, w, c} @ [0, 0, 0].
Value res = vector::TransferReadOp::create(
rewriter, loc, resType, resShaped, ValueRange{zero, zero, zero},
/*padding=*/arith::getZeroConstant(rewriter, loc, resEltType));
- auto maybeMaskedRes = maybeMaskXferOp(
+ auto *maybeMaskedRes = maybeMaskXferOp(
resType.getShape(), resType.getScalableDims(), res.getDefiningOp());
//===------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index ac72002..110bfdc 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -41,10 +41,6 @@
using namespace mlir;
using namespace mlir::tensor;
-using llvm::divideCeilSigned;
-using llvm::divideFloorSigned;
-using llvm::mod;
-
/// Materialize a single constant operation from a given attribute value with
/// the desired resultant type.
Operation *TensorDialect::materializeConstant(OpBuilder &builder,
diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
index bce964e..c607ece 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -579,6 +579,7 @@ static Value lowerGenerateLikeOpBody(RewriterBase &rewriter, Location loc,
linalg::MapOp::create(rewriter, loc, tensorType, /*inputs=*/ValueRange(),
/*init=*/tensorDestination);
Block &linalgBody = linalgOp.getMapper().emplaceBlock();
+ linalgBody.addArgument(tensorType.getElementType(), loc);
// Create linalg::IndexOps.
rewriter.setInsertionPointToStart(&linalgBody);
@@ -1068,6 +1069,7 @@ struct SplatOpInterface
/*inputs=*/ValueRange(),
/*init=*/*tensorAlloc);
Block &linalgBody = linalgOp.getMapper().emplaceBlock();
+ linalgBody.addArgument(tensorType.getElementType(), loc);
// Create linalg::IndexOps.
rewriter.setInsertionPointToStart(&linalgBody);
diff --git a/mlir/lib/Query/Query.cpp b/mlir/lib/Query/Query.cpp
index 375e820..cf8a4d2 100644
--- a/mlir/lib/Query/Query.cpp
+++ b/mlir/lib/Query/Query.cpp
@@ -121,12 +121,13 @@ LogicalResult MatchQuery::run(llvm::raw_ostream &os, QuerySession &qs) const {
Operation *rootOp = qs.getRootOp();
int matchCount = 0;
matcher::MatchFinder finder;
+
+ StringRef functionName = matcher.getFunctionName();
auto matches = finder.collectMatches(rootOp, std::move(matcher));
// An extract call is recognized by considering if the matcher has a name.
// TODO: Consider making the extract more explicit.
- if (matcher.hasFunctionName()) {
- auto functionName = matcher.getFunctionName();
+ if (!functionName.empty()) {
std::vector<Operation *> flattenedMatches =
finder.flattenMatchedOps(matches);
Operation *function =
diff --git a/mlir/lib/Support/Timing.cpp b/mlir/lib/Support/Timing.cpp
index fb6f82c..16306d7 100644
--- a/mlir/lib/Support/Timing.cpp
+++ b/mlir/lib/Support/Timing.cpp
@@ -319,7 +319,6 @@ public:
void mergeChildren(AsyncChildrenMap &&other) {
for (auto &thread : other) {
mergeChildren(std::move(thread.second));
- assert(thread.second.empty());
}
other.clear();
}
diff --git a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
index 1dbce05..26f5a3e 100644
--- a/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
+++ b/mlir/test/Conversion/SCFToGPU/parallel_loop.mlir
@@ -641,3 +641,35 @@ func.func @parallel_reduction_1d_outside() {
// CHECK: scf.parallel
// CHECK-NEXT: scf.parallel
// CHECK: scf.reduce
+
+// -----
+
+// CHECK-LABEL: @nested_parallel_with_side_effect
+func.func @nested_parallel_with_side_effect() {
+ %c65536 = arith.constant 65536 : index
+ %c2 = arith.constant 2 : index
+ %c256 = arith.constant 256 : index
+ %c0 = arith.constant 0 : index
+ %c4 = arith.constant 4 : index
+ %c1 = arith.constant 1 : index
+ %alloc_0 = memref.alloc() : memref<2x256x256xf32>
+ %alloc_1 = memref.alloc() : memref<2x4x256x256xf32>
+ %alloc_2 = memref.alloc() : memref<4x4xf32>
+ %alloc_3 = memref.alloc() : memref<4x4xf32>
+ scf.parallel (%arg2, %arg3, %arg4) = (%c0, %c0, %c0) to (%c2, %c4, %c65536) step (%c1, %c1, %c1) {
+ %1 = arith.remsi %arg4, %c256 : index
+ %2 = arith.divsi %arg4, %c256 : index
+ %4 = memref.load %alloc_0[%arg2, %2, %1] : memref<2x256x256xf32>
+ memref.store %4, %alloc_1[%arg2, %arg3, %2, %1] : memref<2x4x256x256xf32>
+ scf.parallel (%arg5) = (%c0) to (%c4) step (%c1) {
+ %5 = memref.load %alloc_2[%arg5, %c0] : memref<4x4xf32>
+ memref.store %5, %alloc_3[%arg5, %c0] : memref<4x4xf32>
+ scf.reduce
+ } {mapping = [#gpu.loop_dim_map<processor = thread_x, map = (d0) -> (d0), bound = (d0) -> (d0)>]}
+ scf.reduce
+ } {mapping = [#gpu.loop_dim_map<processor = block_z, map = (d0) -> (d0), bound = (d0) -> (d0)>, #gpu.loop_dim_map<processor = block_y, map = (d0) -> (d0), bound = (d0) -> (d0)>, #gpu.loop_dim_map<processor = block_x, map = (d0) -> (d0), bound = (d0) -> (d0)>]}
+ return
+}
+
+// CHECK: gpu.launch
+// CHECK-NOT: scf.parallel
diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir
index 26d2d98..f4020ede 100644
--- a/mlir/test/Dialect/Linalg/canonicalize.mlir
+++ b/mlir/test/Dialect/Linalg/canonicalize.mlir
@@ -1423,7 +1423,7 @@ func.func @transpose_buffer(%input: memref<?xf32>,
func.func @recursive_effect(%arg : tensor<1xf32>) {
%init = arith.constant dense<0.0> : tensor<1xf32>
%mapped = linalg.map ins(%arg:tensor<1xf32>) outs(%init :tensor<1xf32>)
- (%in : f32) {
+ (%in : f32, %out: f32) {
vector.print %in : f32
linalg.yield %in : f32
}
diff --git a/mlir/test/Dialect/Linalg/generalize-named-ops.mlir b/mlir/test/Dialect/Linalg/generalize-named-ops.mlir
index ae07b1b..dcdd6c8 100644
--- a/mlir/test/Dialect/Linalg/generalize-named-ops.mlir
+++ b/mlir/test/Dialect/Linalg/generalize-named-ops.mlir
@@ -386,18 +386,24 @@ func.func @generalize_batch_reduce_gemm_bf16(%lhs: memref<7x8x9xbf16>, %rhs: mem
// -----
-// CHECK-LABEL: generalize_linalg_map
-func.func @generalize_linalg_map(%arg0: memref<1x8x8x8xf32>) {
+func.func @generalize_linalg_map(%arg0: memref<1x8x8x8xf32>, %arg1: memref<1x8x8x8xf32>, %arg2: memref<1x8x8x8xf32>) {
%cst = arith.constant 0.000000e+00 : f32
- // CHECK: linalg.map
- // CHECK-NOT: linalg.generic
- linalg.map outs(%arg0 : memref<1x8x8x8xf32>)
- () {
- linalg.yield %cst : f32
- }
+ linalg.map {arith.addf} ins(%arg0, %arg1: memref<1x8x8x8xf32>, memref<1x8x8x8xf32>) outs(%arg2 : memref<1x8x8x8xf32>)
return
}
+// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
+
+// CHECK: @generalize_linalg_map
+
+// CHECK: linalg.generic
+// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP0]], #[[MAP0]]]
+// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
+// CHECK-SAME: ins(%{{.+}}, %{{.+}} : memref<1x8x8x8xf32>, memref<1x8x8x8xf32>) outs(%{{.+}} : memref<1x8x8x8xf32>
+// CHECK: ^{{.+}}(%[[BBARG0:.+]]: f32, %[[BBARG1:.+]]: f32, %[[BBARG2:.+]]: f32)
+// CHECK: %[[ADD:.+]] = arith.addf %[[BBARG0]], %[[BBARG1]] : f32
+// CHECK: linalg.yield %[[ADD]] : f32
+
// -----
func.func @generalize_add(%lhs: memref<7x14x21xf32>, %rhs: memref<7x14x21xf32>,
diff --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir
index 40bf4d1..fabc8e6 100644
--- a/mlir/test/Dialect/Linalg/invalid.mlir
+++ b/mlir/test/Dialect/Linalg/invalid.mlir
@@ -681,7 +681,7 @@ func.func @map_binary_wrong_yield_operands(
%add = linalg.map
ins(%lhs, %rhs : tensor<64xf32>, tensor<64xf32>)
outs(%init:tensor<64xf32>)
- (%lhs_elem: f32, %rhs_elem: f32) {
+ (%lhs_elem: f32, %rhs_elem: f32, %out: f32) {
%0 = arith.addf %lhs_elem, %rhs_elem: f32
// expected-error @+1{{'linalg.yield' op expected number of yield values (2) to match the number of inits / outs operands of the enclosing LinalgOp (1)}}
linalg.yield %0, %0: f32, f32
@@ -694,11 +694,11 @@ func.func @map_binary_wrong_yield_operands(
func.func @map_input_mapper_arity_mismatch(
%lhs: tensor<64xf32>, %rhs: tensor<64xf32>, %init: tensor<64xf32>)
-> tensor<64xf32> {
- // expected-error@+1{{'linalg.map' op expects number of operands to match the arity of mapper, but got: 2 and 3}}
+ // expected-error@+1{{'linalg.map' op expects number of operands to match the arity of mapper, but got: 3 and 4}}
%add = linalg.map
ins(%lhs, %rhs : tensor<64xf32>, tensor<64xf32>)
outs(%init:tensor<64xf32>)
- (%lhs_elem: f32, %rhs_elem: f32, %extra_elem: f32) {
+ (%lhs_elem: f32, %rhs_elem: f32, %out: f32, %extra_elem: f32) {
%0 = arith.addf %lhs_elem, %rhs_elem: f32
linalg.yield %0: f32
}
@@ -714,7 +714,7 @@ func.func @map_input_mapper_type_mismatch(
%add = linalg.map
ins(%lhs, %rhs : tensor<64xf32>, tensor<64xf32>)
outs(%init:tensor<64xf32>)
- (%lhs_elem: f64, %rhs_elem: f64) {
+ (%lhs_elem: f64, %rhs_elem: f64, %out: f32) {
%0 = arith.addf %lhs_elem, %rhs_elem: f64
linalg.yield %0: f64
}
@@ -730,7 +730,7 @@ func.func @map_input_output_shape_mismatch(
%add = linalg.map
ins(%lhs, %rhs : tensor<64x64xf32>, tensor<64x64xf32>)
outs(%init:tensor<32xf32>)
- (%lhs_elem: f32, %rhs_elem: f32) {
+ (%lhs_elem: f32, %rhs_elem: f32, %out: f32) {
%0 = arith.addf %lhs_elem, %rhs_elem: f32
linalg.yield %0: f32
}
diff --git a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
index 1df15e8..85cc1ff 100644
--- a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
@@ -339,7 +339,7 @@ func.func @map_binary(%lhs: tensor<64xf32>, %rhs: tensor<64xf32>,
%add = linalg.map
ins(%lhs, %rhs: tensor<64xf32>, tensor<64xf32>)
outs(%init:tensor<64xf32>)
- (%lhs_elem: f32, %rhs_elem: f32) {
+ (%lhs_elem: f32, %rhs_elem: f32, %out: f32) {
%0 = arith.addf %lhs_elem, %rhs_elem: f32
linalg.yield %0: f32
}
diff --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir
index 563013d..7492892 100644
--- a/mlir/test/Dialect/Linalg/roundtrip.mlir
+++ b/mlir/test/Dialect/Linalg/roundtrip.mlir
@@ -341,7 +341,7 @@ func.func @mixed_parallel_reduced_results(%arg0 : tensor<?x?x?xf32>,
func.func @map_no_inputs(%init: tensor<64xf32>) -> tensor<64xf32> {
%add = linalg.map
outs(%init:tensor<64xf32>)
- () {
+ (%out: f32) {
%0 = arith.constant 0.0: f32
linalg.yield %0: f32
}
@@ -349,7 +349,7 @@ func.func @map_no_inputs(%init: tensor<64xf32>) -> tensor<64xf32> {
}
// CHECK-LABEL: func @map_no_inputs
// CHECK: linalg.map outs
-// CHECK-NEXT: () {
+// CHECK-NEXT: (%[[OUT:.*]]: f32) {
// CHECK-NEXT: arith.constant
// CHECK-NEXT: linalg.yield
// CHECK-NEXT: }
@@ -361,7 +361,7 @@ func.func @map_binary(%lhs: tensor<64xf32>, %rhs: tensor<64xf32>,
%add = linalg.map
ins(%lhs, %rhs: tensor<64xf32>, tensor<64xf32>)
outs(%init:tensor<64xf32>)
- (%lhs_elem: f32, %rhs_elem: f32) {
+ (%lhs_elem: f32, %rhs_elem: f32, %out: f32) {
%0 = arith.addf %lhs_elem, %rhs_elem: f32
linalg.yield %0: f32
}
@@ -378,7 +378,7 @@ func.func @map_binary_memref(%lhs: memref<64xf32>, %rhs: memref<64xf32>,
linalg.map
ins(%lhs, %rhs: memref<64xf32>, memref<64xf32>)
outs(%init:memref<64xf32>)
- (%lhs_elem: f32, %rhs_elem: f32) {
+ (%lhs_elem: f32, %rhs_elem: f32, %out: f32) {
%0 = arith.addf %lhs_elem, %rhs_elem: f32
linalg.yield %0: f32
}
@@ -393,7 +393,7 @@ func.func @map_unary(%input: tensor<64xf32>, %init: tensor<64xf32>) -> tensor<64
%abs = linalg.map
ins(%input:tensor<64xf32>)
outs(%init:tensor<64xf32>)
- (%input_elem: f32) {
+ (%input_elem: f32, %out: f32) {
%0 = math.absf %input_elem: f32
linalg.yield %0: f32
}
@@ -408,7 +408,7 @@ func.func @map_unary_memref(%input: memref<64xf32>, %init: memref<64xf32>) {
linalg.map
ins(%input:memref<64xf32>)
outs(%init:memref<64xf32>)
- (%input_elem: f32) {
+ (%input_elem: f32, %out: f32) {
%0 = math.absf %input_elem: f32
linalg.yield %0: f32
}
@@ -604,7 +604,7 @@ func.func @map_arith_with_attr(%lhs: tensor<64xf32>, %rhs: tensor<64xf32>,
%add = linalg.map
ins(%lhs, %rhs: tensor<64xf32>, tensor<64xf32>)
outs(%init:tensor<64xf32>)
- (%lhs_elem: f32, %rhs_elem: f32) {
+ (%lhs_elem: f32, %rhs_elem: f32, %out: f32) {
%0 = arith.addf %lhs_elem, %rhs_elem fastmath<fast> : f32
linalg.yield %0: f32
}
@@ -622,7 +622,7 @@ func.func @map_arith_with_attr(%lhs: tensor<64xf32>, %rhs: tensor<64xf32>,
func.func @map_not_short_form_compatible(%lhs: tensor<1x32xf32>, %rhs: tensor<1x32xf32>, %init: tensor<1x32xf32>) -> tensor<1x32xf32> {
%mapped = linalg.map ins(%lhs, %rhs : tensor<1x32xf32>, tensor<1x32xf32>) outs(%init : tensor<1x32xf32>)
- (%in_1: f32, %in_2: f32) {
+ (%in_1: f32, %in_2: f32, %out: f32) {
%1 = arith.maximumf %in_1, %in_2 : f32
linalg.yield %in_1 : f32
}
@@ -634,7 +634,7 @@ func.func @map_not_short_form_compatible(%lhs: tensor<1x32xf32>, %rhs: tensor<1x
// CHECK-NOT: linalg.map { arith.maximumf } ins(%[[LHS]] : tensor<1x32xf32>
// CHECK: linalg.map ins(%[[LHS]], %[[RHS]] : tensor<1x32xf32>, tensor<1x32xf32>)
// CHECK-SAME: outs(%[[INIT]] : tensor<1x32xf32>)
-// CHECK-NEXT: (%[[IN1:.*]]: f32, %[[IN2:.*]]: f32) {
+// CHECK-NEXT: (%[[IN1:.*]]: f32, %[[IN2:.*]]: f32, %[[OUT:.*]]: f32) {
// CHECK-NEXT: %[[MAX_RESULT:.*]] = arith.maximumf %[[IN1]], %[[IN2]] : f32
// CHECK-NEXT: linalg.yield %[[IN1]] : f32
// CHECK-NEXT: }
diff --git a/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir b/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir
index 93a0336..aa2c1da 100644
--- a/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization/linalg-ops-with-patterns.mlir
@@ -356,7 +356,7 @@ func.func @vectorize_map(%arg0: memref<64xf32>,
%arg1: memref<64xf32>, %arg2: memref<64xf32>) {
linalg.map ins(%arg0, %arg1 : memref<64xf32>, memref<64xf32>)
outs(%arg2 : memref<64xf32>)
- (%in: f32, %in_0: f32) {
+ (%in: f32, %in_0: f32, %out: f32) {
%0 = arith.addf %in, %in_0 : f32
linalg.yield %0 : f32
}
diff --git a/mlir/test/Dialect/Tensor/bufferize.mlir b/mlir/test/Dialect/Tensor/bufferize.mlir
index 296ca02..5eb2360 100644
--- a/mlir/test/Dialect/Tensor/bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/bufferize.mlir
@@ -728,7 +728,7 @@ func.func @tensor.concat_dynamic_nonconcat_dim(%f: tensor<?x?xf32>, %g: tensor<?
// CHECK-DAG: %[[ALLOC:.*]] = memref.alloc(%[[M]], %[[N]]) {{.*}} : memref<?x3x?xf32>
// CHECK: %[[ALLOC_T:.*]] = bufferization.to_tensor %[[ALLOC]]
// CHECK: %[[MAPPED:.*]] = linalg.map outs(%[[ALLOC_T]] : tensor<?x3x?xf32>)
-// CHECK: () {
+// CHECK: (%[[INIT:.*]]: f32) {
// CHECK: linalg.yield %[[F]] : f32
// CHECK: }
// CHECK: return %[[MAPPED]] : tensor<?x3x?xf32>
diff --git a/mlir/test/Interfaces/TilingInterface/lower-to-loops-using-interface.mlir b/mlir/test/Interfaces/TilingInterface/lower-to-loops-using-interface.mlir
index 8cbee3c..aa8882d 100644
--- a/mlir/test/Interfaces/TilingInterface/lower-to-loops-using-interface.mlir
+++ b/mlir/test/Interfaces/TilingInterface/lower-to-loops-using-interface.mlir
@@ -257,10 +257,10 @@ module attributes {transform.with_named_sequence} {
// -----
func.func @map(%lhs: memref<64xf32>,
- %rhs: memref<64xf32>, %out: memref<64xf32>) {
+ %rhs: memref<64xf32>, %init: memref<64xf32>) {
linalg.map ins(%lhs, %rhs : memref<64xf32>, memref<64xf32>)
- outs(%out : memref<64xf32>)
- (%in: f32, %in_0: f32) {
+ outs(%init : memref<64xf32>)
+ (%in: f32, %in_0: f32, %out: f32) {
%0 = arith.addf %in, %in_0 : f32
linalg.yield %0 : f32
}
diff --git a/mlir/test/Transforms/test-legalize-type-conversion.mlir b/mlir/test/Transforms/test-legalize-type-conversion.mlir
index c003f8b..91f83a0 100644
--- a/mlir/test/Transforms/test-legalize-type-conversion.mlir
+++ b/mlir/test/Transforms/test-legalize-type-conversion.mlir
@@ -143,3 +143,25 @@ func.func @test_signature_conversion_no_converter() {
return
}
+// -----
+
+// CHECK-LABEL: func @test_unstructured_cf_conversion(
+// CHECK-SAME: %[[arg0:.*]]: f64, %[[c:.*]]: i1)
+// CHECK: %[[cast1:.*]] = "test.cast"(%[[arg0]]) : (f64) -> f32
+// CHECK: "test.foo"(%[[cast1]])
+// CHECK: cf.br ^[[bb1:.*]](%[[arg0]] : f64)
+// CHECK: ^[[bb1]](%[[arg1:.*]]: f64):
+// CHECK: cf.cond_br %[[c]], ^[[bb1]](%[[arg1]] : f64), ^[[bb2:.*]](%[[arg1]] : f64)
+// CHECK: ^[[bb2]](%[[arg2:.*]]: f64):
+// CHECK: %[[cast2:.*]] = "test.cast"(%[[arg2]]) : (f64) -> f32
+// CHECK: "test.bar"(%[[cast2]])
+// CHECK: return
+func.func @test_unstructured_cf_conversion(%arg0: f32, %c: i1) {
+ "test.foo"(%arg0) : (f32) -> ()
+ cf.br ^bb1(%arg0: f32)
+^bb1(%arg1: f32):
+ cf.cond_br %c, ^bb1(%arg1 : f32), ^bb2(%arg1 : f32)
+^bb2(%arg2: f32):
+ "test.bar"(%arg2) : (f32) -> ()
+ return
+}
diff --git a/mlir/test/lib/Dialect/Test/CMakeLists.txt b/mlir/test/lib/Dialect/Test/CMakeLists.txt
index f099d01..9354a85 100644
--- a/mlir/test/lib/Dialect/Test/CMakeLists.txt
+++ b/mlir/test/lib/Dialect/Test/CMakeLists.txt
@@ -71,6 +71,7 @@ add_mlir_library(MLIRTestDialect
)
mlir_target_link_libraries(MLIRTestDialect PUBLIC
MLIRControlFlowInterfaces
+ MLIRControlFlowTransforms
MLIRDataLayoutInterfaces
MLIRDerivedAttributeOpInterface
MLIRDestinationStyleOpInterface
diff --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
index efbdbfb..fd2b943 100644
--- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp
+++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
@@ -11,6 +11,7 @@
#include "TestTypes.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/CommonFolders.h"
+#include "mlir/Dialect/ControlFlow/Transforms/StructuralTypeConversions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Func/Transforms/FuncConversions.h"
#include "mlir/Dialect/SCF/Transforms/Patterns.h"
@@ -2042,6 +2043,10 @@ struct TestTypeConversionDriver
});
converter.addConversion([](IndexType type) { return type; });
converter.addConversion([](IntegerType type, SmallVectorImpl<Type> &types) {
+ if (type.isInteger(1)) {
+ // i1 is legal.
+ types.push_back(type);
+ }
if (type.isInteger(38)) {
// i38 is legal.
types.push_back(type);
@@ -2175,6 +2180,8 @@ struct TestTypeConversionDriver
converter);
mlir::scf::populateSCFStructuralTypeConversionsAndLegality(
converter, patterns, target);
+ mlir::cf::populateCFStructuralTypeConversionsAndLegality(converter,
+ patterns, target);
ConversionConfig config;
config.allowPatternRollback = allowPatternRollback;
diff --git a/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.cpp b/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.cpp
index 496f18b..61db9d2 100644
--- a/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.cpp
+++ b/mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.cpp
@@ -797,7 +797,7 @@ DiagnosedSilenceableFailure mlir::test::TestProduceInvalidIR::applyToOne(
// Provide some IR that does not verify.
rewriter.setInsertionPointToStart(&target->getRegion(0).front());
TestDummyPayloadOp::create(rewriter, target->getLoc(), TypeRange(),
- ValueRange(), /*failToVerify=*/true);
+ ValueRange(), /*fail_to_verify=*/true);
return DiagnosedSilenceableFailure::success();
}
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index e8561cc..101dfb7 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -4681,6 +4681,8 @@ cc_library(
":ControlFlowDialect",
":IR",
":MemRefDialect",
+ ":Pass",
+ ":TransformUtils",
],
)
@@ -7898,6 +7900,7 @@ cc_library(
deps = [
":AffineDialect",
":AffineToStandard",
+ ":Analysis",
":ArithDialect",
":ComplexDialect",
":ConversionPassIncGen",
diff --git a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
index 778f0be..aa61da4 100644
--- a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
@@ -386,6 +386,7 @@ cc_library(
"//mlir:CallOpInterfaces",
"//mlir:CommonFolders",
"//mlir:ControlFlowInterfaces",
+ "//mlir:ControlFlowTransforms",
"//mlir:DLTIDialect",
"//mlir:DataLayoutInterfaces",
"//mlir:DerivedAttributeOpInterface",